Merge "Merge V8 5.6.326.50"
diff --git a/AUTHORS b/AUTHORS
index 0229c92..476d0c3 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -65,6 +65,7 @@
 Geoffrey Garside <ggarside@gmail.com>
 Gwang Yoon Hwang <ryumiel@company100.net>
 Han Choongwoo <cwhan.tunz@gmail.com>
+Henrique Ferreiro <henrique.ferreiro@gmail.com>
 Hirofumi Mako <mkhrfm@gmail.com>
 Honggyu Kim <honggyu.kp@gmail.com>
 Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
@@ -111,6 +112,7 @@
 Robert Nagy <robert.nagy@gmail.com>
 Ryan Dahl <ry@tinyclouds.org>
 Sakthipriyan Vairamani (thefourtheye) <thechargingvolcano@gmail.com>
+Sander Mathijs van Veen <sander@leaningtech.com>
 Sandro Santilli <strk@keybit.net>
 Sanjoy Das <sanjoy@playingwithpointers.com>
 Seo Sanghyeon <sanxiyn@gmail.com>
diff --git a/Android.base.mk b/Android.base.mk
index 5e72c6d..9f73691 100644
--- a/Android.base.mk
+++ b/Android.base.mk
@@ -1,10 +1,14 @@
+### GENERATED, do not edit
+### for changes, see genmakefiles.py
 LOCAL_PATH := $(call my-dir)
-
-v8_base_common_src := \
+include $(CLEAR_VARS)
+include $(LOCAL_PATH)/Android.v8common.mk
+LOCAL_MODULE := libv8base
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_SRC_FILES := \
 	src/base/bits.cc \
 	src/base/cpu.cc \
 	src/base/debug/stack_trace.cc \
-	src/base/debug/stack_trace_android.cc \
 	src/base/division-by-constant.cc \
 	src/base/file-utils.cc \
 	src/base/functional.cc \
@@ -18,58 +22,41 @@
 	src/base/platform/time.cc \
 	src/base/sys-info.cc \
 	src/base/utils/random-number-generator.cc
-
-include $(CLEAR_VARS)
-
-include $(LOCAL_PATH)/Android.v8common.mk
-
-# Set up the target identity
-LOCAL_MODULE := libv8base
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-
-# Target is always linux.
-LOCAL_SRC_FILES := \
-	$(v8_base_common_src) \
-	src/base/platform/platform-linux.cc \
-
-LOCAL_SRC_FILES_x86 += src/base/atomicops_internals_x86_gcc.cc
-LOCAL_SRC_FILES_x86_64 += src/base/atomicops_internals_x86_gcc.cc
-
+LOCAL_SRC_FILES += \
+	src/base/debug/stack_trace_android.cc \
+	src/base/platform/platform-linux.cc
 LOCAL_C_INCLUDES := $(LOCAL_PATH)/src
-
 include $(BUILD_STATIC_LIBRARY)
 
-# ====================================
 include $(CLEAR_VARS)
-
 include $(LOCAL_PATH)/Android.v8common.mk
-
-# Set up the target identity
 LOCAL_MODULE := libv8base
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-
-LOCAL_SRC_FILES := $(v8_base_common_src)
-
-# Host may be linux or darwin.
+LOCAL_SRC_FILES := \
+	src/base/bits.cc \
+	src/base/cpu.cc \
+	src/base/debug/stack_trace.cc \
+	src/base/division-by-constant.cc \
+	src/base/file-utils.cc \
+	src/base/functional.cc \
+	src/base/ieee754.cc \
+	src/base/logging.cc \
+	src/base/once.cc \
+	src/base/platform/condition-variable.cc \
+	src/base/platform/mutex.cc \
+	src/base/platform/platform-posix.cc \
+	src/base/platform/semaphore.cc \
+	src/base/platform/time.cc \
+	src/base/sys-info.cc \
+	src/base/utils/random-number-generator.cc
 ifeq ($(HOST_OS),linux)
 LOCAL_SRC_FILES += \
-	src/base/platform/platform-linux.cc
+	src/base/platform/platform-linux.cc \
+	src/base/debug/stack_trace_posix.cc
 endif
 ifeq ($(HOST_OS),darwin)
 LOCAL_SRC_FILES += \
 	src/base/platform/platform-macos.cc
 endif
-
-ifeq ($(HOST_ARCH),x86)
-LOCAL_SRC_FILES += src/base/atomicops_internals_x86_gcc.cc
-endif
-ifeq ($(HOST_ARCH),x86_64)
-LOCAL_SRC_FILES += src/base/atomicops_internals_x86_gcc.cc
-endif
-
 LOCAL_C_INCLUDES := $(LOCAL_PATH)/src
-
 include $(BUILD_HOST_STATIC_LIBRARY)
-
-v8_base_common_src :=
-
diff --git a/Android.mkpeephole.mk b/Android.mkpeephole.mk
index b47f602..8d28b3b 100644
--- a/Android.mkpeephole.mk
+++ b/Android.mkpeephole.mk
@@ -1,39 +1,25 @@
+### GENERATED, do not edit
+### for changes, see genmakefiles.py
 LOCAL_PATH := $(call my-dir)
 include $(CLEAR_VARS)
-
 include $(LOCAL_PATH)/Android.v8common.mk
-
-# Set up the target identity
 LOCAL_MODULE := v8mkpeephole
-
 LOCAL_SRC_FILES := \
+	src/interpreter/bytecode-operands.cc \
 	src/interpreter/bytecodes.cc \
-        src/interpreter/mkpeephole.cc
-
+	src/interpreter/mkpeephole.cc
 LOCAL_STATIC_LIBRARIES += libv8base liblog
 LOCAL_LDLIBS_linux += -lrt
-
 include $(BUILD_HOST_EXECUTABLE)
-
-#================================
 include $(CLEAR_VARS)
-
 include $(LOCAL_PATH)/Android.v8common.mk
-
-# Set up the target identity
 LOCAL_MODULE := v8peephole
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-
 generated_sources := $(call local-generated-sources-dir)
-
 PEEPHOLE_TOOL := $(HOST_OUT_EXECUTABLES)/v8mkpeephole
 PEEPHOLE_FILE := $(generated_sources)/bytecode-peephole-table.cc
 $(PEEPHOLE_FILE): PRIVATE_CUSTOM_TOOL = $(PEEPHOLE_TOOL) $(PEEPHOLE_FILE)
 $(PEEPHOLE_FILE): $(PEEPHOLE_TOOL)
 	$(transform-generated-source)
-
 LOCAL_GENERATED_SOURCES += $(PEEPHOLE_FILE)
-
 include $(BUILD_STATIC_LIBRARY)
-
-
diff --git a/Android.platform.mk b/Android.platform.mk
index 94a5c6e..0726dad 100644
--- a/Android.platform.mk
+++ b/Android.platform.mk
@@ -1,12 +1,10 @@
+### GENERATED, do not edit
+### for changes, see genmakefiles.py
 LOCAL_PATH := $(call my-dir)
 include $(CLEAR_VARS)
-
 include $(LOCAL_PATH)/Android.v8common.mk
-
-# Set up the target identity
 LOCAL_MODULE := libv8platform
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-
 LOCAL_SRC_FILES := \
 	src/libplatform/default-platform.cc \
 	src/libplatform/task-queue.cc \
@@ -16,11 +14,7 @@
 	src/libplatform/tracing/trace-writer.cc \
 	src/libplatform/tracing/tracing-controller.cc \
 	src/libplatform/worker-thread.cc
-
 LOCAL_C_INCLUDES := \
 	$(LOCAL_PATH)/src \
 	$(LOCAL_PATH)/include
-
 include $(BUILD_STATIC_LIBRARY)
-
-
diff --git a/Android.sampler.mk b/Android.sampler.mk
index 06cb684..d7af374 100644
--- a/Android.sampler.mk
+++ b/Android.sampler.mk
@@ -1,15 +1,13 @@
+### GENERATED, do not edit
+### for changes, see genmakefiles.py
 LOCAL_PATH := $(call my-dir)
 include $(CLEAR_VARS)
-
 include $(LOCAL_PATH)/Android.v8common.mk
-
-# Set up the target identity
 LOCAL_MODULE := libv8sampler
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-
 LOCAL_SRC_FILES := \
 	src/libsampler/sampler.cc
-
-LOCAL_C_INCLUDES := $(LOCAL_PATH)/src
-
+LOCAL_C_INCLUDES := \
+	$(LOCAL_PATH)/src \
+	$(LOCAL_PATH)/include
 include $(BUILD_STATIC_LIBRARY)
diff --git a/Android.v8.mk b/Android.v8.mk
index b048eb5..fce8dfe 100644
--- a/Android.v8.mk
+++ b/Android.v8.mk
@@ -1,34 +1,33 @@
+### GENERATED, do not edit
+### for changes, see genmakefiles.py
 LOCAL_PATH := $(call my-dir)
 include $(CLEAR_VARS)
-
 include $(LOCAL_PATH)/Android.v8common.mk
-
-# Set up the target identity
 LOCAL_MODULE := libv8src
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-
 LOCAL_SRC_FILES := \
 	src/accessors.cc \
 	src/address-map.cc \
-	src/allocation.cc \
 	src/allocation-site-scopes.cc \
+	src/allocation.cc \
 	src/api-arguments.cc \
-	src/api.cc \
 	src/api-experimental.cc \
 	src/api-natives.cc \
+	src/api.cc \
 	src/arguments.cc \
 	src/asmjs/asm-js.cc \
 	src/asmjs/asm-typer.cc \
 	src/asmjs/asm-types.cc \
 	src/asmjs/asm-wasm-builder.cc \
+	src/asmjs/switch-logic.cc \
 	src/assembler.cc \
 	src/assert-scope.cc \
-	src/ast/ast.cc \
 	src/ast/ast-expression-rewriter.cc \
 	src/ast/ast-literal-reindexer.cc \
 	src/ast/ast-numbering.cc \
 	src/ast/ast-types.cc \
 	src/ast/ast-value-factory.cc \
+	src/ast/ast.cc \
 	src/ast/compile-time-value.cc \
 	src/ast/context-slot-cache.cc \
 	src/ast/modules.cc \
@@ -39,17 +38,16 @@
 	src/background-parsing-task.cc \
 	src/bailout-reason.cc \
 	src/basic-block-profiler.cc \
-	src/bignum.cc \
 	src/bignum-dtoa.cc \
+	src/bignum.cc \
 	src/bit-vector.cc \
 	src/bootstrapper.cc \
 	src/builtins/builtins-api.cc \
-	src/builtins/builtins-arraybuffer.cc \
 	src/builtins/builtins-array.cc \
+	src/builtins/builtins-arraybuffer.cc \
 	src/builtins/builtins-boolean.cc \
 	src/builtins/builtins-call.cc \
 	src/builtins/builtins-callsite.cc \
-	src/builtins/builtins.cc \
 	src/builtins/builtins-conversion.cc \
 	src/builtins/builtins-dataview.cc \
 	src/builtins/builtins-date.cc \
@@ -66,6 +64,7 @@
 	src/builtins/builtins-math.cc \
 	src/builtins/builtins-number.cc \
 	src/builtins/builtins-object.cc \
+	src/builtins/builtins-promise.cc \
 	src/builtins/builtins-proxy.cc \
 	src/builtins/builtins-reflect.cc \
 	src/builtins/builtins-regexp.cc \
@@ -73,18 +72,23 @@
 	src/builtins/builtins-string.cc \
 	src/builtins/builtins-symbol.cc \
 	src/builtins/builtins-typedarray.cc \
+	src/builtins/builtins.cc \
 	src/cached-powers.cc \
 	src/cancelable-task.cc \
 	src/char-predicates.cc \
 	src/code-factory.cc \
-	src/codegen.cc \
 	src/code-stub-assembler.cc \
-	src/code-stubs.cc \
 	src/code-stubs-hydrogen.cc \
+	src/code-stubs.cc \
+	src/codegen.cc \
 	src/compilation-cache.cc \
 	src/compilation-dependencies.cc \
 	src/compilation-info.cc \
 	src/compilation-statistics.cc \
+	src/compiler-dispatcher/compiler-dispatcher-job.cc \
+	src/compiler-dispatcher/compiler-dispatcher-tracer.cc \
+	src/compiler-dispatcher/optimizing-compile-dispatcher.cc \
+	src/compiler.cc \
 	src/compiler/access-builder.cc \
 	src/compiler/access-info.cc \
 	src/compiler/all-nodes.cc \
@@ -95,35 +99,33 @@
 	src/compiler/bytecode-branch-analysis.cc \
 	src/compiler/bytecode-graph-builder.cc \
 	src/compiler/bytecode-loop-analysis.cc \
-	src/compiler.cc \
-	src/compiler/checkpoint-elimination.cc \
 	src/compiler/c-linkage.cc \
+	src/compiler/checkpoint-elimination.cc \
 	src/compiler/code-assembler.cc \
 	src/compiler/code-generator.cc \
 	src/compiler/common-node-cache.cc \
-	src/compiler/common-operator.cc \
 	src/compiler/common-operator-reducer.cc \
+	src/compiler/common-operator.cc \
+	src/compiler/compiler-source-position-table.cc \
 	src/compiler/control-builders.cc \
 	src/compiler/control-equivalence.cc \
 	src/compiler/control-flow-optimizer.cc \
 	src/compiler/dead-code-elimination.cc \
-	src/compiler-dispatcher/compiler-dispatcher-job.cc \
-	src/compiler-dispatcher/optimizing-compile-dispatcher.cc \
 	src/compiler/effect-control-linearizer.cc \
-	src/compiler/escape-analysis.cc \
 	src/compiler/escape-analysis-reducer.cc \
-	src/compiler/frame.cc \
+	src/compiler/escape-analysis.cc \
 	src/compiler/frame-elider.cc \
 	src/compiler/frame-states.cc \
+	src/compiler/frame.cc \
 	src/compiler/gap-resolver.cc \
-	src/compiler/graph.cc \
 	src/compiler/graph-reducer.cc \
 	src/compiler/graph-replay.cc \
 	src/compiler/graph-trimmer.cc \
 	src/compiler/graph-visualizer.cc \
-	src/compiler/instruction.cc \
+	src/compiler/graph.cc \
 	src/compiler/instruction-scheduler.cc \
 	src/compiler/instruction-selector.cc \
+	src/compiler/instruction.cc \
 	src/compiler/int64-lowering.cc \
 	src/compiler/js-builtin-reducer.cc \
 	src/compiler/js-call-reducer.cc \
@@ -133,62 +135,62 @@
 	src/compiler/js-generic-lowering.cc \
 	src/compiler/js-global-object-specialization.cc \
 	src/compiler/js-graph.cc \
-	src/compiler/js-inlining.cc \
 	src/compiler/js-inlining-heuristic.cc \
+	src/compiler/js-inlining.cc \
 	src/compiler/js-intrinsic-lowering.cc \
 	src/compiler/js-native-context-specialization.cc \
 	src/compiler/js-operator.cc \
 	src/compiler/js-typed-lowering.cc \
 	src/compiler/jump-threading.cc \
 	src/compiler/linkage.cc \
-	src/compiler/liveness-analyzer.cc \
 	src/compiler/live-range-separator.cc \
+	src/compiler/liveness-analyzer.cc \
 	src/compiler/load-elimination.cc \
 	src/compiler/loop-analysis.cc \
 	src/compiler/loop-peeling.cc \
 	src/compiler/loop-variable-optimizer.cc \
 	src/compiler/machine-graph-verifier.cc \
-	src/compiler/machine-operator.cc \
 	src/compiler/machine-operator-reducer.cc \
+	src/compiler/machine-operator.cc \
 	src/compiler/memory-optimizer.cc \
 	src/compiler/move-optimizer.cc \
 	src/compiler/node-cache.cc \
-	src/compiler/node.cc \
 	src/compiler/node-marker.cc \
 	src/compiler/node-matchers.cc \
 	src/compiler/node-properties.cc \
+	src/compiler/node.cc \
 	src/compiler/opcodes.cc \
 	src/compiler/operation-typer.cc \
-	src/compiler/operator.cc \
 	src/compiler/operator-properties.cc \
+	src/compiler/operator.cc \
 	src/compiler/osr.cc \
-	src/compiler/pipeline.cc \
 	src/compiler/pipeline-statistics.cc \
+	src/compiler/pipeline.cc \
 	src/compiler/raw-machine-assembler.cc \
 	src/compiler/redundancy-elimination.cc \
-	src/compiler/register-allocator.cc \
 	src/compiler/register-allocator-verifier.cc \
+	src/compiler/register-allocator.cc \
 	src/compiler/representation-change.cc \
 	src/compiler/schedule.cc \
 	src/compiler/scheduler.cc \
 	src/compiler/select-lowering.cc \
+	src/compiler/simd-scalar-lowering.cc \
 	src/compiler/simplified-lowering.cc \
-	src/compiler/simplified-operator.cc \
 	src/compiler/simplified-operator-reducer.cc \
-	src/compiler/source-position.cc \
+	src/compiler/simplified-operator.cc \
 	src/compiler/state-values-utils.cc \
 	src/compiler/store-store-elimination.cc \
 	src/compiler/tail-call-optimization.cc \
 	src/compiler/type-cache.cc \
-	src/compiler/typed-optimization.cc \
 	src/compiler/type-hint-analyzer.cc \
+	src/compiler/typed-optimization.cc \
 	src/compiler/typer.cc \
 	src/compiler/types.cc \
 	src/compiler/value-numbering-reducer.cc \
 	src/compiler/verifier.cc \
 	src/compiler/wasm-compiler.cc \
 	src/compiler/wasm-linkage.cc \
-	src/compiler/zone-pool.cc \
+	src/compiler/zone-stats.cc \
 	src/context-measure.cc \
 	src/contexts.cc \
 	src/conversions.cc \
@@ -196,7 +198,6 @@
 	src/crankshaft/compilation-phase.cc \
 	src/crankshaft/hydrogen-bce.cc \
 	src/crankshaft/hydrogen-canonicalize.cc \
-	src/crankshaft/hydrogen.cc \
 	src/crankshaft/hydrogen-check-elimination.cc \
 	src/crankshaft/hydrogen-dce.cc \
 	src/crankshaft/hydrogen-dehoist.cc \
@@ -207,7 +208,6 @@
 	src/crankshaft/hydrogen-infer-types.cc \
 	src/crankshaft/hydrogen-instructions.cc \
 	src/crankshaft/hydrogen-load-elimination.cc \
-	src/crankshaft/hydrogen-mark-deoptimize.cc \
 	src/crankshaft/hydrogen-mark-unreachable.cc \
 	src/crankshaft/hydrogen-osr.cc \
 	src/crankshaft/hydrogen-range-analysis.cc \
@@ -218,25 +218,26 @@
 	src/crankshaft/hydrogen-store-elimination.cc \
 	src/crankshaft/hydrogen-types.cc \
 	src/crankshaft/hydrogen-uint32-analysis.cc \
+	src/crankshaft/hydrogen.cc \
 	src/crankshaft/lithium-allocator.cc \
-	src/crankshaft/lithium.cc \
 	src/crankshaft/lithium-codegen.cc \
+	src/crankshaft/lithium.cc \
 	src/crankshaft/typing.cc \
 	src/date.cc \
 	src/dateparser.cc \
-	src/debug/debug.cc \
 	src/debug/debug-evaluate.cc \
 	src/debug/debug-frames.cc \
 	src/debug/debug-scopes.cc \
+	src/debug/debug.cc \
 	src/debug/liveedit.cc \
-	src/deoptimizer.cc \
 	src/deoptimize-reason.cc \
+	src/deoptimizer.cc \
 	src/disassembler.cc \
 	src/diy-fp.cc \
 	src/dtoa.cc \
 	src/eh-frame.cc \
-	src/elements.cc \
 	src/elements-kind.cc \
+	src/elements.cc \
 	src/execution.cc \
 	src/extensions/externalize-string-extension.cc \
 	src/extensions/free-buffer-extension.cc \
@@ -262,13 +263,12 @@
 	src/heap/gc-idle-time-handler.cc \
 	src/heap/gc-tracer.cc \
 	src/heap/heap.cc \
-	src/heap/incremental-marking.cc \
 	src/heap/incremental-marking-job.cc \
+	src/heap/incremental-marking.cc \
 	src/heap/mark-compact.cc \
 	src/heap/memory-reducer.cc \
 	src/heap/object-stats.cc \
 	src/heap/objects-visiting.cc \
-	src/heap/remembered-set.cc \
 	src/heap/scavenge-job.cc \
 	src/heap/scavenger.cc \
 	src/heap/spaces.cc \
@@ -277,9 +277,10 @@
 	src/ic/access-compiler.cc \
 	src/ic/call-optimization.cc \
 	src/ic/handler-compiler.cc \
-	src/ic/ic.cc \
 	src/ic/ic-compiler.cc \
 	src/ic/ic-state.cc \
+	src/ic/ic.cc \
+	src/ic/keyed-store-generic.cc \
 	src/ic/stub-cache.cc \
 	src/icu_util.cc \
 	src/identity-map.cc \
@@ -295,29 +296,29 @@
 	src/interpreter/bytecode-operands.cc \
 	src/interpreter/bytecode-peephole-optimizer.cc \
 	src/interpreter/bytecode-pipeline.cc \
-	src/interpreter/bytecode-register.cc \
 	src/interpreter/bytecode-register-optimizer.cc \
+	src/interpreter/bytecode-register.cc \
 	src/interpreter/bytecodes.cc \
 	src/interpreter/constant-array-builder.cc \
 	src/interpreter/control-flow-builders.cc \
 	src/interpreter/handler-table-builder.cc \
 	src/interpreter/interpreter-assembler.cc \
-	src/interpreter/interpreter.cc \
 	src/interpreter/interpreter-intrinsics.cc \
+	src/interpreter/interpreter.cc \
 	src/isolate.cc \
 	src/json-parser.cc \
 	src/json-stringifier.cc \
 	src/keys.cc \
 	src/layout-descriptor.cc \
-	src/log.cc \
 	src/log-utils.cc \
+	src/log.cc \
 	src/lookup-cache.cc \
 	src/lookup.cc \
 	src/machine-type.cc \
 	src/messages.cc \
-	src/objects.cc \
 	src/objects-debug.cc \
 	src/objects-printer.cc \
+	src/objects.cc \
 	src/ostreams.cc \
 	src/parsing/duplicate-finder.cc \
 	src/parsing/func-name-inferrer.cc \
@@ -328,8 +329,8 @@
 	src/parsing/preparse-data.cc \
 	src/parsing/preparser.cc \
 	src/parsing/rewriter.cc \
-	src/parsing/scanner.cc \
 	src/parsing/scanner-character-streams.cc \
+	src/parsing/scanner.cc \
 	src/parsing/token.cc \
 	src/pending-compilation-error-handler.cc \
 	src/perf-jit.cc \
@@ -343,21 +344,22 @@
 	src/profiler/strings-storage.cc \
 	src/profiler/tick-sample.cc \
 	src/profiler/tracing-cpu-profiler.cc \
-	src/property.cc \
+	src/promise-utils.cc \
 	src/property-descriptor.cc \
+	src/property.cc \
 	src/regexp/interpreter-irregexp.cc \
 	src/regexp/jsregexp.cc \
 	src/regexp/regexp-ast.cc \
-	src/regexp/regexp-macro-assembler.cc \
 	src/regexp/regexp-macro-assembler-irregexp.cc \
 	src/regexp/regexp-macro-assembler-tracer.cc \
+	src/regexp/regexp-macro-assembler.cc \
 	src/regexp/regexp-parser.cc \
 	src/regexp/regexp-stack.cc \
+	src/regexp/regexp-utils.cc \
 	src/register-configuration.cc \
 	src/runtime-profiler.cc \
 	src/runtime/runtime-array.cc \
 	src/runtime/runtime-atomics.cc \
-	src/runtime/runtime.cc \
 	src/runtime/runtime-classes.cc \
 	src/runtime/runtime-collections.cc \
 	src/runtime/runtime-compiler.cc \
@@ -374,9 +376,11 @@
 	src/runtime/runtime-literals.cc \
 	src/runtime/runtime-liveedit.cc \
 	src/runtime/runtime-maths.cc \
+	src/runtime/runtime-module.cc \
 	src/runtime/runtime-numbers.cc \
 	src/runtime/runtime-object.cc \
 	src/runtime/runtime-operators.cc \
+	src/runtime/runtime-promise.cc \
 	src/runtime/runtime-proxy.cc \
 	src/runtime/runtime-regexp.cc \
 	src/runtime/runtime-scopes.cc \
@@ -386,28 +390,32 @@
 	src/runtime/runtime-test.cc \
 	src/runtime/runtime-typedarray.cc \
 	src/runtime/runtime-wasm.cc \
+	src/runtime/runtime.cc \
 	src/safepoint-table.cc \
 	src/snapshot/code-serializer.cc \
 	src/snapshot/deserializer.cc \
 	src/snapshot/natives-common.cc \
 	src/snapshot/partial-serializer.cc \
-	src/snapshot/serializer.cc \
 	src/snapshot/serializer-common.cc \
+	src/snapshot/serializer.cc \
 	src/snapshot/snapshot-common.cc \
 	src/snapshot/snapshot-source-sink.cc \
 	src/snapshot/startup-serializer.cc \
 	src/source-position-table.cc \
+	src/source-position.cc \
 	src/startup-data-util.cc \
 	src/string-builder.cc \
 	src/string-stream.cc \
 	src/strtod.cc \
 	src/tracing/trace-event.cc \
+	src/tracing/traced-value.cc \
+	src/tracing/tracing-category-observer.cc \
 	src/transitions.cc \
 	src/type-feedback-vector.cc \
 	src/type-hints.cc \
 	src/type-info.cc \
-	src/unicode.cc \
 	src/unicode-decoder.cc \
+	src/unicode.cc \
 	src/uri.cc \
 	src/utils.cc \
 	src/v8.cc \
@@ -416,24 +424,23 @@
 	src/version.cc \
 	src/wasm/ast-decoder.cc \
 	src/wasm/module-decoder.cc \
-	src/wasm/switch-logic.cc \
+	src/wasm/signature-map.cc \
 	src/wasm/wasm-debug.cc \
 	src/wasm/wasm-external-refs.cc \
-	src/wasm/wasm-function-name-table.cc \
 	src/wasm/wasm-interpreter.cc \
 	src/wasm/wasm-js.cc \
 	src/wasm/wasm-module-builder.cc \
 	src/wasm/wasm-module.cc \
+	src/wasm/wasm-objects.cc \
 	src/wasm/wasm-opcodes.cc \
 	src/wasm/wasm-result.cc \
 	src/zone/accounting-allocator.cc \
-	src/zone/zone.cc \
-	src/zone/zone-segment.cc
-
+	src/zone/zone-segment.cc \
+	src/zone/zone.cc
 LOCAL_SRC_FILES_arm += \
 	src/arm/assembler-arm.cc \
-	src/arm/codegen-arm.cc \
 	src/arm/code-stubs-arm.cc \
+	src/arm/codegen-arm.cc \
 	src/arm/constants-arm.cc \
 	src/arm/cpu-arm.cc \
 	src/arm/deoptimizer-arm.cc \
@@ -459,11 +466,10 @@
 	src/ic/arm/ic-compiler-arm.cc \
 	src/ic/arm/stub-cache-arm.cc \
 	src/regexp/arm/regexp-macro-assembler-arm.cc
-
 LOCAL_SRC_FILES_arm64 += \
 	src/arm64/assembler-arm64.cc \
-	src/arm64/codegen-arm64.cc \
 	src/arm64/code-stubs-arm64.cc \
+	src/arm64/codegen-arm64.cc \
 	src/arm64/cpu-arm64.cc \
 	src/arm64/decoder-arm64.cc \
 	src/arm64/deoptimizer-arm64.cc \
@@ -493,7 +499,6 @@
 	src/ic/arm64/ic-compiler-arm64.cc \
 	src/ic/arm64/stub-cache-arm64.cc \
 	src/regexp/arm64/regexp-macro-assembler-arm64.cc
-
 LOCAL_SRC_FILES_mips += \
 	src/builtins/mips/builtins-mips.cc \
 	src/compiler/mips/code-generator-mips.cc \
@@ -510,8 +515,8 @@
 	src/ic/mips/ic-mips.cc \
 	src/ic/mips/stub-cache-mips.cc \
 	src/mips/assembler-mips.cc \
-	src/mips/codegen-mips.cc \
 	src/mips/code-stubs-mips.cc \
+	src/mips/codegen-mips.cc \
 	src/mips/constants-mips.cc \
 	src/mips/cpu-mips.cc \
 	src/mips/deoptimizer-mips.cc \
@@ -521,7 +526,6 @@
 	src/mips/macro-assembler-mips.cc \
 	src/mips/simulator-mips.cc \
 	src/regexp/mips/regexp-macro-assembler-mips.cc
-
 LOCAL_SRC_FILES_mips64 += \
 	src/builtins/mips64/builtins-mips64.cc \
 	src/compiler/mips64/code-generator-mips64.cc \
@@ -538,8 +542,8 @@
 	src/ic/mips64/ic-mips64.cc \
 	src/ic/mips64/stub-cache-mips64.cc \
 	src/mips64/assembler-mips64.cc \
-	src/mips64/codegen-mips64.cc \
 	src/mips64/code-stubs-mips64.cc \
+	src/mips64/codegen-mips64.cc \
 	src/mips64/constants-mips64.cc \
 	src/mips64/cpu-mips64.cc \
 	src/mips64/deoptimizer-mips64.cc \
@@ -549,7 +553,6 @@
 	src/mips64/macro-assembler-mips64.cc \
 	src/mips64/simulator-mips64.cc \
 	src/regexp/mips64/regexp-macro-assembler-mips64.cc
-
 LOCAL_SRC_FILES_x86 += \
 	src/builtins/ia32/builtins-ia32.cc \
 	src/compiler/ia32/code-generator-ia32.cc \
@@ -561,8 +564,8 @@
 	src/debug/ia32/debug-ia32.cc \
 	src/full-codegen/ia32/full-codegen-ia32.cc \
 	src/ia32/assembler-ia32.cc \
-	src/ia32/codegen-ia32.cc \
 	src/ia32/code-stubs-ia32.cc \
+	src/ia32/codegen-ia32.cc \
 	src/ia32/cpu-ia32.cc \
 	src/ia32/deoptimizer-ia32.cc \
 	src/ia32/disasm-ia32.cc \
@@ -576,7 +579,6 @@
 	src/ic/ia32/ic-ia32.cc \
 	src/ic/ia32/stub-cache-ia32.cc \
 	src/regexp/ia32/regexp-macro-assembler-ia32.cc
-
 LOCAL_SRC_FILES_x86_64 += \
 	src/builtins/x64/builtins-x64.cc \
 	src/compiler/x64/code-generator-x64.cc \
@@ -595,8 +597,8 @@
 	src/ic/x64/stub-cache-x64.cc \
 	src/regexp/x64/regexp-macro-assembler-x64.cc \
 	src/x64/assembler-x64.cc \
-	src/x64/codegen-x64.cc \
 	src/x64/code-stubs-x64.cc \
+	src/x64/codegen-x64.cc \
 	src/x64/cpu-x64.cc \
 	src/x64/deoptimizer-x64.cc \
 	src/x64/disasm-x64.cc \
@@ -605,23 +607,15 @@
 	src/x64/interface-descriptors-x64.cc \
 	src/x64/macro-assembler-x64.cc \
 	src/x64/simulator-x64.cc
-
-LOCAL_SRC_FILES += \
-	src/snapshot/snapshot-empty.cc \
-
 # Enable DEBUG option.
 ifeq ($(DEBUG_V8),true)
-  LOCAL_SRC_FILES += \
-		src/objects-debug.cc \
-		src/ast/prettyprinter.cc \
-		src/regexp/regexp-macro-assembler-tracer.cc
+LOCAL_SRC_FILES += \
+	src/objects-debug.cc \
+	src/ast/prettyprinter.cc \
+	src/regexp/regexp-macro-assembler-tracer.cc
 endif
-
 LOCAL_C_INCLUDES := \
 	$(LOCAL_PATH)/src \
 	external/icu/icu4c/source/common \
 	external/icu/icu4c/source/i18n
-
 include $(BUILD_STATIC_LIBRARY)
-
-
diff --git a/Android.v8common.mk b/Android.v8common.mk
index c6975a6..223db55 100644
--- a/Android.v8common.mk
+++ b/Android.v8common.mk
@@ -18,7 +18,8 @@
 	-DENABLE_VMSTATE_TRACKING \
 	-DV8_NATIVE_REGEXP \
 	-DV8_I18N_SUPPORT \
-	-std=gnu++0x
+	-std=gnu++0x \
+	-Os
 
 LOCAL_CFLAGS_arm += -DV8_TARGET_ARCH_ARM
 LOCAL_CFLAGS_arm64 += -DV8_TARGET_ARCH_ARM64
diff --git a/Android.v8gen.mk b/Android.v8gen.mk
index 87b3b03..d3f670e 100644
--- a/Android.v8gen.mk
+++ b/Android.v8gen.mk
@@ -1,15 +1,10 @@
+### GENERATED, do not edit
+### for changes, see genmakefiles.py
 LOCAL_PATH := $(call my-dir)
 include $(CLEAR_VARS)
-
 include $(LOCAL_PATH)/Android.v8common.mk
-
-# Set up the target identity
 LOCAL_MODULE := libv8gen
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-
-# The order of these JS library sources is important. The order here determines
-# the ordering of the JS code in libraries.cc, which must be in a specific order
-# to meet compiler dependency requirements.
 V8_LOCAL_JS_LIBRARY_FILES := \
 	src/js/macros.py \
 	src/messages.h \
@@ -19,8 +14,6 @@
 	src/js/symbol.js \
 	src/js/array.js \
 	src/js/string.js \
-	src/js/math.js \
-	src/js/regexp.js \
 	src/js/arraybuffer.js \
 	src/js/typedarray.js \
 	src/js/collection.js \
@@ -28,34 +21,27 @@
 	src/js/collection-iterator.js \
 	src/js/promise.js \
 	src/js/messages.js \
-	src/js/array-iterator.js \
 	src/js/templates.js \
 	src/js/spread.js \
 	src/js/proxy.js \
+	src/js/async-await.js \
 	src/debug/mirrors.js \
 	src/debug/debug.js \
 	src/debug/liveedit.js
-
 V8_LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES := \
 	src/js/macros.py \
 	src/messages.h \
 	src/js/harmony-atomics.js \
 	src/js/harmony-simd.js \
 	src/js/harmony-string-padding.js
-
+LOCAL_SRC_FILES += src/snapshot/snapshot-empty.cc
 LOCAL_JS_LIBRARY_FILES := $(addprefix $(LOCAL_PATH)/, $(V8_LOCAL_JS_LIBRARY_FILES))
 LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES := $(addprefix $(LOCAL_PATH)/, $(V8_LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES))
-
 generated_sources := $(call local-generated-sources-dir)
-
-# Copy js2c.py to generated sources directory and invoke there to avoid
-# generating jsmin.pyc in the source directory
 JS2C_PY := $(generated_sources)/js2c.py $(generated_sources)/jsmin.py
 $(JS2C_PY): $(generated_sources)/%.py : $(LOCAL_PATH)/tools/%.py | $(ACP)
 	@echo "Copying $@"
 	$(copy-file-to-target)
-
-# Generate libraries.cc
 GEN1 := $(generated_sources)/libraries.cc
 $(GEN1): SCRIPT := $(generated_sources)/js2c.py
 $(GEN1): $(LOCAL_JS_LIBRARY_FILES) $(JS2C_PY)
@@ -63,8 +49,6 @@
 	@mkdir -p $(dir $@)
 	python $(SCRIPT) $@ CORE $(LOCAL_JS_LIBRARY_FILES)
 V8_GENERATED_LIBRARIES := $(generated_sources)/libraries.cc
-
-# Generate experimental-libraries.cc
 GEN2 := $(generated_sources)/experimental-libraries.cc
 $(GEN2): SCRIPT := $(generated_sources)/js2c.py
 $(GEN2): $(LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES) $(JS2C_PY)
@@ -72,8 +56,6 @@
 	@mkdir -p $(dir $@)
 	python $(SCRIPT) $@ EXPERIMENTAL $(LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES)
 V8_GENERATED_LIBRARIES += $(generated_sources)/experimental-libraries.cc
-
-# Generate extra-libraries.cc
 GEN3 := $(generated_sources)/extra-libraries.cc
 $(GEN3): SCRIPT := $(generated_sources)/js2c.py
 $(GEN3): $(JS2C_PY)
@@ -81,17 +63,12 @@
 	@mkdir -p $(dir $@)
 	python $(SCRIPT) $@ EXTRAS
 V8_GENERATED_LIBRARIES += $(generated_sources)/extra-libraries.cc
-
-# Generate iexperimental-extra-libraries.cc
-GEN3 := $(generated_sources)/experimental-extra-libraries.cc
-$(GEN3): SCRIPT := $(generated_sources)/js2c.py
-$(GEN3): $(JS2C_PY)
+GEN4 := $(generated_sources)/experimental-extra-libraries.cc
+$(GEN4): SCRIPT := $(generated_sources)/js2c.py
+$(GEN4): $(JS2C_PY)
 	@echo "Generating experimental-extra-libraries.cc"
 	@mkdir -p $(dir $@)
 	python $(SCRIPT) $@ EXPERIMENTAL_EXTRAS
 V8_GENERATED_LIBRARIES += $(generated_sources)/experimental-extra-libraries.cc
-
 LOCAL_GENERATED_SOURCES += $(V8_GENERATED_LIBRARIES)
-
 include $(BUILD_STATIC_LIBRARY)
-
diff --git a/BUILD.gn b/BUILD.gn
index 06870b6..8587356 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -43,10 +43,6 @@
   # Sets -dENABLE_HANDLE_ZAPPING.
   v8_enable_handle_zapping = is_debug
 
-  # Enable ECMAScript Internationalization API. Enabling this feature will
-  # add a dependency on the ICU library.
-  v8_enable_i18n_support = true
-
   # Enable slow dchecks.
   v8_enable_slow_dchecks = false
 
@@ -65,6 +61,9 @@
   # Switches off inlining in V8.
   v8_no_inline = false
 
+  # Override OS page size when generating snapshot
+  v8_os_page_size = "0"
+
   # Similar to vfp but on MIPS.
   v8_can_use_fpu_instructions = true
 
@@ -128,6 +127,20 @@
 # This config should be applied to code using the libplatform.
 config("libplatform_config") {
   include_dirs = [ "include" ]
+  if (is_component_build) {
+    defines = [ "USING_V8_PLATFORM_SHARED" ]
+  }
+}
+
+# This config should be applied to code using the libbase.
+config("libbase_config") {
+  if (is_component_build) {
+    defines = [ "USING_V8_BASE_SHARED" ]
+  }
+  libs = []
+  if (is_android && current_toolchain != host_toolchain) {
+    libs += [ "log" ]
+  }
 }
 
 # This config should be applied to code using the libsampler.
@@ -145,10 +158,6 @@
   if (v8_enable_inspector_override) {
     include_dirs += [ "$target_gen_dir/include" ]
   }
-  libs = []
-  if (is_android && current_toolchain != host_toolchain) {
-    libs += [ "log" ]
-  }
 }
 
 # This config should only be applied to code that needs to be explicitly
@@ -361,6 +370,20 @@
       "-fno-inline",
     ]
   }
+
+  if (is_clang) {
+    cflags += [
+      "-Wsign-compare",
+
+      # TODO(hans): Remove once http://crbug.com/428099 is resolved.
+      "-Winconsistent-missing-override",
+    ]
+
+    if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
+        v8_current_cpu == "mips64el") {
+      cflags += [ "-Wshorten-64-to-32" ]
+    }
+  }
 }
 
 ###############################################################################
@@ -388,8 +411,6 @@
     "src/js/symbol.js",
     "src/js/array.js",
     "src/js/string.js",
-    "src/js/math.js",
-    "src/js/regexp.js",
     "src/js/arraybuffer.js",
     "src/js/typedarray.js",
     "src/js/collection.js",
@@ -397,7 +418,6 @@
     "src/js/collection-iterator.js",
     "src/js/promise.js",
     "src/js/messages.js",
-    "src/js/array-iterator.js",
     "src/js/templates.js",
     "src/js/spread.js",
     "src/js/proxy.js",
@@ -662,6 +682,13 @@
     ]
   }
 
+  if (v8_os_page_size != "0") {
+    args += [
+      "--v8_os_page_size",
+      v8_os_page_size,
+    ]
+  }
+
   if (v8_use_external_startup_data) {
     outputs += [ "$root_out_dir/snapshot_blob.bin" ]
     args += [
@@ -715,6 +742,7 @@
     "is_tsan=$is_tsan",
     "target_cpu=\"$target_cpu\"",
     "v8_enable_i18n_support=$v8_enable_i18n_support",
+    "v8_enable_inspector=$v8_enable_inspector_override",
     "v8_target_cpu=\"$v8_target_cpu\"",
     "v8_use_snapshot=$v8_use_snapshot",
   ]
@@ -863,6 +891,8 @@
     "src/asmjs/asm-types.h",
     "src/asmjs/asm-wasm-builder.cc",
     "src/asmjs/asm-wasm-builder.h",
+    "src/asmjs/switch-logic.cc",
+    "src/asmjs/switch-logic.h",
     "src/assembler.cc",
     "src/assembler.h",
     "src/assert-scope.cc",
@@ -930,6 +960,7 @@
     "src/builtins/builtins-math.cc",
     "src/builtins/builtins-number.cc",
     "src/builtins/builtins-object.cc",
+    "src/builtins/builtins-promise.cc",
     "src/builtins/builtins-proxy.cc",
     "src/builtins/builtins-reflect.cc",
     "src/builtins/builtins-regexp.cc",
@@ -969,6 +1000,8 @@
     "src/compilation-statistics.h",
     "src/compiler-dispatcher/compiler-dispatcher-job.cc",
     "src/compiler-dispatcher/compiler-dispatcher-job.h",
+    "src/compiler-dispatcher/compiler-dispatcher-tracer.cc",
+    "src/compiler-dispatcher/compiler-dispatcher-tracer.h",
     "src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
     "src/compiler-dispatcher/optimizing-compile-dispatcher.h",
     "src/compiler.cc",
@@ -1007,6 +1040,8 @@
     "src/compiler/common-operator-reducer.h",
     "src/compiler/common-operator.cc",
     "src/compiler/common-operator.h",
+    "src/compiler/compiler-source-position-table.cc",
+    "src/compiler/compiler-source-position-table.h",
     "src/compiler/control-builders.cc",
     "src/compiler/control-builders.h",
     "src/compiler/control-equivalence.cc",
@@ -1145,14 +1180,14 @@
     "src/compiler/scheduler.h",
     "src/compiler/select-lowering.cc",
     "src/compiler/select-lowering.h",
+    "src/compiler/simd-scalar-lowering.cc",
+    "src/compiler/simd-scalar-lowering.h",
     "src/compiler/simplified-lowering.cc",
     "src/compiler/simplified-lowering.h",
     "src/compiler/simplified-operator-reducer.cc",
     "src/compiler/simplified-operator-reducer.h",
     "src/compiler/simplified-operator.cc",
     "src/compiler/simplified-operator.h",
-    "src/compiler/source-position.cc",
-    "src/compiler/source-position.h",
     "src/compiler/state-values-utils.cc",
     "src/compiler/state-values-utils.h",
     "src/compiler/store-store-elimination.cc",
@@ -1177,8 +1212,8 @@
     "src/compiler/wasm-compiler.cc",
     "src/compiler/wasm-compiler.h",
     "src/compiler/wasm-linkage.cc",
-    "src/compiler/zone-pool.cc",
-    "src/compiler/zone-pool.h",
+    "src/compiler/zone-stats.cc",
+    "src/compiler/zone-stats.h",
     "src/context-measure.cc",
     "src/context-measure.h",
     "src/contexts-inl.h",
@@ -1218,8 +1253,6 @@
     "src/crankshaft/hydrogen-instructions.h",
     "src/crankshaft/hydrogen-load-elimination.cc",
     "src/crankshaft/hydrogen-load-elimination.h",
-    "src/crankshaft/hydrogen-mark-deoptimize.cc",
-    "src/crankshaft/hydrogen-mark-deoptimize.h",
     "src/crankshaft/hydrogen-mark-unreachable.cc",
     "src/crankshaft/hydrogen-mark-unreachable.h",
     "src/crankshaft/hydrogen-osr.cc",
@@ -1262,6 +1295,7 @@
     "src/debug/debug-evaluate.h",
     "src/debug/debug-frames.cc",
     "src/debug/debug-frames.h",
+    "src/debug/debug-interface.h",
     "src/debug/debug-scopes.cc",
     "src/debug/debug-scopes.h",
     "src/debug/debug.cc",
@@ -1363,7 +1397,6 @@
     "src/heap/objects-visiting.cc",
     "src/heap/objects-visiting.h",
     "src/heap/page-parallel-job.h",
-    "src/heap/remembered-set.cc",
     "src/heap/remembered-set.h",
     "src/heap/scavenge-job.cc",
     "src/heap/scavenge-job.h",
@@ -1378,12 +1411,14 @@
     "src/heap/store-buffer.h",
     "src/i18n.cc",
     "src/i18n.h",
+    "src/ic/access-compiler-data.h",
     "src/ic/access-compiler.cc",
     "src/ic/access-compiler.h",
     "src/ic/call-optimization.cc",
     "src/ic/call-optimization.h",
     "src/ic/handler-compiler.cc",
     "src/ic/handler-compiler.h",
+    "src/ic/handler-configuration-inl.h",
     "src/ic/handler-configuration.h",
     "src/ic/ic-compiler.cc",
     "src/ic/ic-compiler.h",
@@ -1392,6 +1427,8 @@
     "src/ic/ic-state.h",
     "src/ic/ic.cc",
     "src/ic/ic.h",
+    "src/ic/keyed-store-generic.cc",
+    "src/ic/keyed-store-generic.h",
     "src/ic/stub-cache.cc",
     "src/ic/stub-cache.h",
     "src/icu_util.cc",
@@ -1541,6 +1578,8 @@
     "src/profiler/tracing-cpu-profiler.h",
     "src/profiler/unbound-queue-inl.h",
     "src/profiler/unbound-queue.h",
+    "src/promise-utils.cc",
+    "src/promise-utils.h",
     "src/property-descriptor.cc",
     "src/property-descriptor.h",
     "src/property-details.h",
@@ -1566,6 +1605,8 @@
     "src/regexp/regexp-parser.h",
     "src/regexp/regexp-stack.cc",
     "src/regexp/regexp-stack.h",
+    "src/regexp/regexp-utils.cc",
+    "src/regexp/regexp-utils.h",
     "src/register-configuration.cc",
     "src/register-configuration.h",
     "src/runtime-profiler.cc",
@@ -1588,9 +1629,11 @@
     "src/runtime/runtime-literals.cc",
     "src/runtime/runtime-liveedit.cc",
     "src/runtime/runtime-maths.cc",
+    "src/runtime/runtime-module.cc",
     "src/runtime/runtime-numbers.cc",
     "src/runtime/runtime-object.cc",
     "src/runtime/runtime-operators.cc",
+    "src/runtime/runtime-promise.cc",
     "src/runtime/runtime-proxy.cc",
     "src/runtime/runtime-regexp.cc",
     "src/runtime/runtime-scopes.cc",
@@ -1628,6 +1671,7 @@
     "src/snapshot/startup-serializer.h",
     "src/source-position-table.cc",
     "src/source-position-table.h",
+    "src/source-position.cc",
     "src/source-position.h",
     "src/splay-tree-inl.h",
     "src/splay-tree.h",
@@ -1642,6 +1686,10 @@
     "src/strtod.h",
     "src/tracing/trace-event.cc",
     "src/tracing/trace-event.h",
+    "src/tracing/traced-value.cc",
+    "src/tracing/traced-value.h",
+    "src/tracing/tracing-category-observer.cc",
+    "src/tracing/tracing-category-observer.h",
     "src/transitions-inl.h",
     "src/transitions.cc",
     "src/transitions.h",
@@ -1680,16 +1728,14 @@
     "src/wasm/ast-decoder.h",
     "src/wasm/decoder.h",
     "src/wasm/leb-helper.h",
+    "src/wasm/managed.h",
     "src/wasm/module-decoder.cc",
     "src/wasm/module-decoder.h",
-    "src/wasm/switch-logic.cc",
-    "src/wasm/switch-logic.h",
+    "src/wasm/signature-map.cc",
+    "src/wasm/signature-map.h",
     "src/wasm/wasm-debug.cc",
-    "src/wasm/wasm-debug.h",
     "src/wasm/wasm-external-refs.cc",
     "src/wasm/wasm-external-refs.h",
-    "src/wasm/wasm-function-name-table.cc",
-    "src/wasm/wasm-function-name-table.h",
     "src/wasm/wasm-interpreter.cc",
     "src/wasm/wasm-interpreter.h",
     "src/wasm/wasm-js.cc",
@@ -1699,6 +1745,8 @@
     "src/wasm/wasm-module-builder.h",
     "src/wasm/wasm-module.cc",
     "src/wasm/wasm-module.h",
+    "src/wasm/wasm-objects.cc",
+    "src/wasm/wasm-objects.h",
     "src/wasm/wasm-opcodes.cc",
     "src/wasm/wasm-opcodes.h",
     "src/wasm/wasm-result.cc",
@@ -1707,6 +1755,7 @@
     "src/zone/accounting-allocator.h",
     "src/zone/zone-allocator.h",
     "src/zone/zone-allocator.h",
+    "src/zone/zone-chunk-list.h",
     "src/zone/zone-containers.h",
     "src/zone/zone-segment.cc",
     "src/zone/zone-segment.h",
@@ -2152,24 +2201,15 @@
   }
 }
 
-v8_source_set("v8_libbase") {
-  visibility = [ ":*" ]  # Only targets in this file can depend on this.
-
+v8_component("v8_libbase") {
   sources = [
     "src/base/adapters.h",
     "src/base/atomic-utils.h",
     "src/base/atomicops.h",
-    "src/base/atomicops_internals_arm64_gcc.h",
-    "src/base/atomicops_internals_arm_gcc.h",
     "src/base/atomicops_internals_atomicword_compat.h",
-    "src/base/atomicops_internals_mac.h",
-    "src/base/atomicops_internals_mips64_gcc.h",
-    "src/base/atomicops_internals_mips_gcc.h",
-    "src/base/atomicops_internals_s390_gcc.h",
-    "src/base/atomicops_internals_tsan.h",
-    "src/base/atomicops_internals_x86_gcc.cc",
-    "src/base/atomicops_internals_x86_gcc.h",
+    "src/base/atomicops_internals_portable.h",
     "src/base/atomicops_internals_x86_msvc.h",
+    "src/base/base-export.h",
     "src/base/bits.cc",
     "src/base/bits.h",
     "src/base/build_config.h",
@@ -2208,6 +2248,7 @@
     "src/base/platform/semaphore.h",
     "src/base/platform/time.cc",
     "src/base/platform/time.h",
+    "src/base/ring-buffer.h",
     "src/base/safe_conversions.h",
     "src/base/safe_conversions_impl.h",
     "src/base/safe_math.h",
@@ -2220,8 +2261,14 @@
 
   configs = [ ":internal_config_base" ]
 
+  public_configs = [ ":libbase_config" ]
+
   defines = []
 
+  if (is_component_build) {
+    defines = [ "BUILDING_V8_BASE_SHARED" ]
+  }
+
   if (is_posix) {
     sources += [ "src/base/platform/platform-posix.cc" ]
   }
@@ -2285,9 +2332,10 @@
   # TODO(jochen): Add support for qnx, freebsd, openbsd, netbsd, and solaris.
 }
 
-v8_source_set("v8_libplatform") {
+v8_component("v8_libplatform") {
   sources = [
     "//base/trace_event/common/trace_event_common.h",
+    "include/libplatform/libplatform-export.h",
     "include/libplatform/libplatform.h",
     "include/libplatform/v8-tracing.h",
     "src/libplatform/default-platform.cc",
@@ -2307,6 +2355,10 @@
 
   configs = [ ":internal_config_base" ]
 
+  if (is_component_build) {
+    defines = [ "BUILDING_V8_PLATFORM_SHARED" ]
+  }
+
   public_configs = [ ":libplatform_config" ]
 
   deps = [
@@ -2344,27 +2396,7 @@
   ]
 
   public_deps = [
-    ":v8_libplatform",
-  ]
-}
-
-# Used by fuzzers that would require exposing too many symbols for a proper
-# component build.
-v8_source_set("fuzzer_support_nocomponent") {
-  visibility = [ ":*" ]  # Only targets in this file can depend on this.
-
-  sources = [
-    "test/fuzzer/fuzzer-support.cc",
-    "test/fuzzer/fuzzer-support.h",
-  ]
-
-  configs = [ ":internal_config_base" ]
-
-  deps = [
-    ":v8_maybe_snapshot",
-  ]
-
-  public_deps = [
+    ":v8_libbase",
     ":v8_libplatform",
   ]
 }
@@ -2393,6 +2425,7 @@
 
     deps = [
       ":v8_base",
+      ":v8_libbase",
       ":v8_libplatform",
       ":v8_nosnapshot",
       "//build/config/sanitizers:deps",
@@ -2515,6 +2548,7 @@
   deps = [
     ":d8_js2c",
     ":v8",
+    ":v8_libbase",
     ":v8_libplatform",
     "//build/config/sanitizers:deps",
     "//build/win:default_exe_manifest",
@@ -2531,6 +2565,11 @@
   if (v8_enable_i18n_support) {
     deps += [ "//third_party/icu" ]
   }
+
+  defines = []
+  if (v8_enable_inspector_override) {
+    defines += [ "V8_INSPECTOR_ENABLED" ]
+  }
 }
 
 v8_isolate_run("d8") {
@@ -2555,6 +2594,7 @@
 
   deps = [
     ":v8",
+    ":v8_libbase",
     ":v8_libplatform",
     "//build/config/sanitizers:deps",
     "//build/win:default_exe_manifest",
@@ -2579,6 +2619,7 @@
 
   deps = [
     ":v8",
+    ":v8_libbase",
     ":v8_libplatform",
     "//build/config/sanitizers:deps",
     "//build/win:default_exe_manifest",
@@ -2601,32 +2642,16 @@
   ]
 
   deps = [
+    ":v8",
+    ":v8_libbase",
     ":v8_libplatform",
     "//build/config/sanitizers:deps",
     "//build/win:default_exe_manifest",
   ]
 
-  if (is_component_build) {
-    # v8_parser_shell can't be built against a shared library, so we
-    # need to depend on the underlying static target in that case.
-    deps += [ ":v8_maybe_snapshot" ]
-  } else {
-    deps += [ ":v8" ]
-  }
-
   if (v8_enable_i18n_support) {
     deps += [ "//third_party/icu" ]
   }
-
-  if (is_win) {
-    # Suppress warnings about importing locally defined symbols.
-    if (is_component_build) {
-      ldflags = [
-        "/ignore:4049",
-        "/ignore:4217",
-      ]
-    }
-  }
 }
 
 if (want_v8_shell) {
@@ -2644,6 +2669,7 @@
 
     deps = [
       ":v8",
+      ":v8_libbase",
       ":v8_libplatform",
       "//build/config/sanitizers:deps",
       "//build/win:default_exe_manifest",
@@ -2693,7 +2719,7 @@
   ]
 
   deps = [
-    ":fuzzer_support_nocomponent",
+    ":fuzzer_support",
   ]
 
   configs = [
@@ -2804,6 +2830,26 @@
 v8_fuzzer("wasm_code_fuzzer") {
 }
 
+v8_source_set("wasm_call_fuzzer") {
+  sources = [
+    "test/fuzzer/wasm-call.cc",
+  ]
+
+  deps = [
+    ":fuzzer_support",
+    ":wasm_module_runner",
+    ":wasm_test_signatures",
+  ]
+
+  configs = [
+    ":external_config",
+    ":internal_config_base",
+  ]
+}
+
+v8_fuzzer("wasm_call_fuzzer") {
+}
+
 v8_source_set("lib_wasm_section_fuzzer") {
   sources = [
     "test/fuzzer/wasm-section-fuzzers.cc",
diff --git a/ChangeLog b/ChangeLog
index 40c8537..2dc7756 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,1690 @@
+2016-11-15: Version 5.6.326
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-15: Version 5.6.325
+
+        [wasm] Be more lenient on the names section (issue 5632).
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-15: Version 5.6.324
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-15: Version 5.6.323
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-15: Version 5.6.322
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-15: Version 5.6.321
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-15: Version 5.6.320
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-15: Version 5.6.319
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-15: Version 5.6.318
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-15: Version 5.6.317
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.316
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.315
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.314
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.313
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.312
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.311
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.310
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.309
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.308
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.307
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.306
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.305
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-12: Version 5.6.304
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-12: Version 5.6.303
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-12: Version 5.6.302
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.301
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.300
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.299
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.298
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.297
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.296
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.295
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.294
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.293
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.292
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.291
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.290
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.289
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.288
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.287
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.286
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.285
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.284
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.283
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.282
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.281
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.280
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.279
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.278
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.277
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.276
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.275
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.274
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.273
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.272
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.271
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.270
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.269
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.268
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.267
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.266
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.265
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.264
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.263
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.262
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.261
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.260
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.259
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.258
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.257
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.256
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.255
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.254
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.253
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.252
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.251
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.250
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.249
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.248
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.247
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.246
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.245
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.244
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.243
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.242
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.241
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.240
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.239
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.238
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.237
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.236
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.235
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.234
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.233
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.232
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.231
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.230
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.229
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.228
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.227
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.226
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.225
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.224
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-06: Version 5.6.223
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-05: Version 5.6.222
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.221
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.220
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.219
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.218
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.217
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.216
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.215
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.214
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.213
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.212
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.211
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.210
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.209
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.208
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.207
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-03: Version 5.6.206
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-03: Version 5.6.205
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-03: Version 5.6.204
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-03: Version 5.6.203
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-03: Version 5.6.202
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-03: Version 5.6.201
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-03: Version 5.6.200
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-03: Version 5.6.199
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.198
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.197
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.196
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.195
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.194
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.193
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.192
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.191
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.190
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.189
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.188
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.187
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.186
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.185
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-31: Version 5.6.184
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-31: Version 5.6.183
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-31: Version 5.6.182
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-31: Version 5.6.181
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-31: Version 5.6.180
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-31: Version 5.6.179
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-29: Version 5.6.178
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.177
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.176
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.175
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.174
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.173
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.172
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.171
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.170
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.169
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.168
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.167
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.166
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.165
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.164
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.163
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.162
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.161
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.160
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.159
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.158
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.157
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.156
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.155
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.154
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.153
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.152
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.151
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.150
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-26: Version 5.6.149
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-25: Version 5.6.148
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-25: Version 5.6.147
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-25: Version 5.6.146
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-25: Version 5.6.145
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-25: Version 5.6.144
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-25: Version 5.6.143
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-24: Version 5.6.142
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-24: Version 5.6.141
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-24: Version 5.6.140
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-24: Version 5.6.139
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-24: Version 5.6.138
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-24: Version 5.6.137
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-24: Version 5.6.136
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-24: Version 5.6.135
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-21: Version 5.6.134
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-21: Version 5.6.133
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-21: Version 5.6.132
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-21: Version 5.6.131
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-21: Version 5.6.130
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-21: Version 5.6.129
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-21: Version 5.6.128
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-21: Version 5.6.127
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-20: Version 5.6.126
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-20: Version 5.6.125
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-20: Version 5.6.124
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-20: Version 5.6.123
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.122
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.121
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.120
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.119
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.118
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.117
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.116
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.115
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.114
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.113
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.112
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.111
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.110
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.109
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.108
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.107
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.106
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.105
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.104
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.103
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.102
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.101
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.100
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.99
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.98
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.97
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.96
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.95
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.94
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.93
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.92
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.91
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.90
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.89
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.88
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.87
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.86
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.85
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.84
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.83
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.82
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.81
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.80
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.79
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.78
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.77
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.76
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.75
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.74
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.73
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.72
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.71
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.70
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-16: Version 5.6.69
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-16: Version 5.6.68
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-15: Version 5.6.67
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-15: Version 5.6.66
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.65
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.64
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.63
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.62
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.61
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.60
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.59
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.58
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.57
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.56
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.55
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.54
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.53
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.52
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.51
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.50
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.49
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.48
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.47
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.46
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.45
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.44
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.43
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.42
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.41
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-12: Version 5.6.40
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-12: Version 5.6.39
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-12: Version 5.6.38
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.37
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.36
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.35
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.34
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.33
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.32
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.31
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.30
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.29
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.28
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.27
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.26
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.25
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.24
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.23
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.22
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.21
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.20
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.19
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.18
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-10: Version 5.6.17
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-10: Version 5.6.16
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-10: Version 5.6.15
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-10: Version 5.6.14
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-10: Version 5.6.13
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-09: Version 5.6.12
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-08: Version 5.6.11
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-08: Version 5.6.10
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-07: Version 5.6.9
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-07: Version 5.6.8
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-07: Version 5.6.7
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-07: Version 5.6.6
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-07: Version 5.6.5
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-07: Version 5.6.4
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-07: Version 5.6.3
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-07: Version 5.6.2
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-07: Version 5.6.1
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-06: Version 5.5.383
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-06: Version 5.5.382
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-06: Version 5.5.381
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-06: Version 5.5.380
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-06: Version 5.5.379
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-06: Version 5.5.378
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-06: Version 5.5.377
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-06: Version 5.5.376
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-05: Version 5.5.375
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-05: Version 5.5.374
+
+        Performance and stability improvements on all platforms.
+
+
+2016-10-05: Version 5.5.373
+
+        Performance and stability improvements on all platforms.
+
+
 2016-10-05: Version 5.5.372
 
         Performance and stability improvements on all platforms.
diff --git a/DEPS b/DEPS
index 058cd8b..161015d 100644
--- a/DEPS
+++ b/DEPS
@@ -8,19 +8,17 @@
 
 deps = {
   "v8/build":
-    Var("chromium_url") + "/chromium/src/build.git" + "@" + "475d5b37ded6589c9f8a0d19ced54ddf2e6d14a0",
+    Var("chromium_url") + "/chromium/src/build.git" + "@" + "a3b623a6eff6dc9d58a03251ae22bccf92f67cb2",
   "v8/tools/gyp":
     Var("chromium_url") + "/external/gyp.git" + "@" + "e7079f0e0e14108ab0dba58728ff219637458563",
   "v8/third_party/icu":
-    Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "b0bd3ee50bc2e768d7a17cbc60d87f517f024dbe",
+    Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "c1a237113f525a1561d4b322d7653e1083f79aaa",
   "v8/third_party/instrumented_libraries":
     Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "45f5814b1543e41ea0be54c771e3840ea52cca4a",
   "v8/buildtools":
-    Var("chromium_url") + "/chromium/buildtools.git" + "@" + "5fd66957f08bb752dca714a591c84587c9d70762",
+    Var("chromium_url") + "/chromium/buildtools.git" + "@" + "39b1db2ab4aa4b2ccaa263c29bdf63e7c1ee28aa",
   "v8/base/trace_event/common":
-    Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "e0fa02a02f61430dae2bddfd89a334ea4389f495",
-  "v8/third_party/WebKit/Source/platform/inspector_protocol":
-    Var("chromium_url") + "/chromium/src/third_party/WebKit/Source/platform/inspector_protocol.git" + "@" + "3280c57c4c575ce82ccd13e4a403492fb4ca624b",
+    Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "06294c8a4a6f744ef284cd63cfe54dbf61eea290",
   "v8/third_party/jinja2":
     Var("chromium_url") + "/chromium/src/third_party/jinja2.git" + "@" + "b61a2c009a579593a259c1b300e0ad02bf48fd78",
   "v8/third_party/markupsafe":
@@ -37,17 +35,19 @@
     Var("chromium_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
   "v8/test/simdjs/data": Var("chromium_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "baf493985cb9ea7cdbd0d68704860a8156de9556",
   "v8/test/test262/data":
-    Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "29c23844494a7cc2fbebc6948d2cb0bcaddb24e7",
+    Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "fb61ab44eb1bbc2699d714fc00e33af2a19411ce",
   "v8/test/test262/harness":
     Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "cbd968f54f7a95c6556d53ba852292a4c49d11d8",
   "v8/tools/clang":
-    Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "1f92f999fc374a479e98a189ebdfe25c09484486",
+    Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "75350a858c51ad69e2aae051a8727534542da29f",
 }
 
 deps_os = {
   "android": {
     "v8/third_party/android_tools":
       Var("chromium_url") + "/android_tools.git" + "@" + "25d57ead05d3dfef26e9c19b13ed10b0a69829cf",
+    "v8/third_party/catapult":
+      Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "6962f5c0344a79b152bf84460a93e1b2e11ea0f4",
   },
   "win": {
     "v8/third_party/cygwin":
diff --git a/Makefile b/Makefile
index a6d4d13..6eeac09 100644
--- a/Makefile
+++ b/Makefile
@@ -163,6 +163,14 @@
 ifeq ($(goma), on)
   GYPFLAGS += -Duse_goma=1
 endif
+# v8_os_page_size=0, when 0 or not specified use build OS page size
+ifdef v8_os_page_size
+  ifneq ($(v8_os_page_size), 0)
+    ifneq ($(snapshot), off)
+      GYPFLAGS += -Dv8_os_page_size=$(v8_os_page_size)
+    endif
+  endif
+endif
 # arm specific flags.
 # arm_version=<number | "default">
 ifneq ($(strip $(arm_version)),)
diff --git a/PRESUBMIT.py b/PRESUBMIT.py
index 78e7482..ad21833 100644
--- a/PRESUBMIT.py
+++ b/PRESUBMIT.py
@@ -67,7 +67,6 @@
         input_api.PresubmitLocalPath(), 'tools'))
   from presubmit import CppLintProcessor
   from presubmit import SourceProcessor
-  from presubmit import CheckExternalReferenceRegistration
   from presubmit import CheckAuthorizedAuthor
   from presubmit import CheckStatusFiles
 
@@ -78,9 +77,6 @@
     results.append(output_api.PresubmitError(
         "Copyright header, trailing whitespaces and two empty lines " \
         "between declarations check failed"))
-  if not CheckExternalReferenceRegistration(input_api.PresubmitLocalPath()):
-    results.append(output_api.PresubmitError(
-        "External references registration check failed"))
   if not CheckStatusFiles(input_api.PresubmitLocalPath()):
     results.append(output_api.PresubmitError("Status file check failed"))
   results.extend(CheckAuthorizedAuthor(input_api, output_api))
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index c253eac..3a30d8d 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,2 +1,2 @@
-v8 5.5.372.32
-https://chromium.googlesource.com/v8/v8/+/5.5.372.32
+v8 5.6.326.50
+https://chromium.googlesource.com/v8/v8/+/5.6.326.50
diff --git a/build_overrides/v8.gni b/build_overrides/v8.gni
index 09ea457..df8320d 100644
--- a/build_overrides/v8.gni
+++ b/build_overrides/v8.gni
@@ -26,7 +26,7 @@
 
 declare_args() {
   # Enable inspector. See include/v8-inspector.h.
-  v8_enable_inspector = false
+  v8_enable_inspector = true
 }
 
 v8_enable_inspector_override = v8_enable_inspector
diff --git a/genmakefiles.py b/genmakefiles.py
new file mode 100644
index 0000000..44b08ee
--- /dev/null
+++ b/genmakefiles.py
@@ -0,0 +1,304 @@
+#!/usr/bin/env python
+
+import os
+
+# Given a list of source files from the V8 gyp file, create a string that
+# can be put into the LOCAL_SRC_FILES makefile variable. One per line, followed
+# by a '\' and with the 'src' directory prepended.
+def _makefileSources(src_list):
+  result = ""
+  for i in xrange(0, len(src_list)):
+    result += '\tsrc/' + src_list[i]
+    if i != len(src_list) - 1:
+      result += ' \\'
+    result += '\n'
+  return result
+
+# Return a string that contains the common header variable setup used in
+# (most of) the V8 makefiles.
+def _makefileCommonHeader(module_name):
+  result = ""
+  result += '### GENERATED, do not edit\n'
+  result += '### for changes, see genmakefiles.py\n'
+  result += 'LOCAL_PATH := $(call my-dir)\n'
+  result += 'include $(CLEAR_VARS)\n'
+  result += 'include $(LOCAL_PATH)/Android.v8common.mk\n'
+  result += 'LOCAL_MODULE := ' + module_name + '\n'
+  return result
+
+# Write a makefile in the simpler format used by v8_libplatform and
+# v8_libsampler
+def _writeMakefile(filename, module_name, sources):
+  if not sources:
+    raise ValueError('No sources for ' + filename)
+
+  with open(filename, 'w') as out:
+    out.write(_makefileCommonHeader(module_name))
+    out.write('LOCAL_MODULE_CLASS := STATIC_LIBRARIES\n')
+
+    out.write('LOCAL_SRC_FILES := \\\n')
+
+    out.write(_makefileSources(sources))
+
+    out.write('LOCAL_C_INCLUDES := \\\n')
+    out.write('\t$(LOCAL_PATH)/src \\\n')
+    out.write('\t$(LOCAL_PATH)/include\n')
+
+    out.write('include $(BUILD_STATIC_LIBRARY)\n')
+
+def _writeMkpeepholeMakefile(target):
+  if not target:
+    raise ValueError('Must specify mkpeephole target properties')
+
+  with open('Android.mkpeephole.mk', 'w') as out:
+    out.write(_makefileCommonHeader('v8mkpeephole'))
+    out.write('LOCAL_SRC_FILES := \\\n')
+    sources = [x for x in target['sources'] if x.endswith('.cc')]
+    sources.sort()
+
+    out.write(_makefileSources(sources))
+
+    out.write('LOCAL_STATIC_LIBRARIES += libv8base liblog\n')
+    out.write('LOCAL_LDLIBS_linux += -lrt\n')
+    out.write('include $(BUILD_HOST_EXECUTABLE)\n')
+
+    out.write('include $(CLEAR_VARS)\n')
+    out.write('include $(LOCAL_PATH)/Android.v8common.mk\n')
+    out.write('LOCAL_MODULE := v8peephole\n')
+    out.write('LOCAL_MODULE_CLASS := STATIC_LIBRARIES\n')
+    out.write('generated_sources := $(call local-generated-sources-dir)\n')
+    out.write('PEEPHOLE_TOOL := $(HOST_OUT_EXECUTABLES)/v8mkpeephole\n')
+    out.write('PEEPHOLE_FILE := ' \
+        '$(generated_sources)/bytecode-peephole-table.cc\n')
+    out.write('$(PEEPHOLE_FILE): PRIVATE_CUSTOM_TOOL = ' \
+        '$(PEEPHOLE_TOOL) $(PEEPHOLE_FILE)\n')
+    out.write('$(PEEPHOLE_FILE): $(PEEPHOLE_TOOL)\n')
+    out.write('\t$(transform-generated-source)\n')
+    out.write('LOCAL_GENERATED_SOURCES += $(PEEPHOLE_FILE)\n')
+    out.write('include $(BUILD_STATIC_LIBRARY)\n')
+
+def _writeV8SrcMakefile(target):
+  if not target:
+    raise ValueError('Must specify v8_base target properties')
+
+  with open('Android.v8.mk', 'w') as out:
+    out.write(_makefileCommonHeader('libv8src'))
+    out.write('LOCAL_MODULE_CLASS := STATIC_LIBRARIES\n')
+
+    out.write('LOCAL_SRC_FILES := \\\n')
+
+    sources = [x for x in target['sources'] if x.endswith('.cc')]
+    sources.sort()
+
+    out.write(_makefileSources(sources))
+
+    arm_src = None
+    arm64_src = None
+    x86_src = None
+    x86_64_src = None
+    mips_src = None
+    mips64_src = None
+    for condition in target['conditions']:
+      if condition[0] == 'v8_target_arch=="arm"':
+        arm_src = [x for x in condition[1]['sources'] if x.endswith('.cc')]
+      elif condition[0] == 'v8_target_arch=="arm64"':
+        arm64_src = [x for x in condition[1]['sources'] if x.endswith('.cc')]
+      elif condition[0] == 'v8_target_arch=="ia32"':
+        x86_src = [x for x in condition[1]['sources'] if x.endswith('.cc')]
+      elif condition[0] \
+          == 'v8_target_arch=="mips" or v8_target_arch=="mipsel"':
+        mips_src = [x for x in condition[1]['sources'] if x.endswith('.cc')]
+      elif condition[0] \
+          == 'v8_target_arch=="mips64" or v8_target_arch=="mips64el"':
+        mips64_src = [x for x in condition[1]['sources'] if x.endswith('.cc')]
+      elif condition[0] == 'v8_target_arch=="x64"':
+        x86_64_src = [x for x in condition[1]['sources'] if x.endswith('.cc')]
+
+    arm_src.sort()
+    arm64_src.sort()
+    x86_src.sort()
+    x86_64_src.sort()
+    mips_src.sort()
+    mips64_src.sort()
+
+    out.write('LOCAL_SRC_FILES_arm += \\\n')
+    out.write(_makefileSources(arm_src))
+    out.write('LOCAL_SRC_FILES_arm64 += \\\n')
+    out.write(_makefileSources(arm64_src))
+    out.write('LOCAL_SRC_FILES_mips += \\\n')
+    out.write(_makefileSources(mips_src))
+    out.write('LOCAL_SRC_FILES_mips64 += \\\n')
+    out.write(_makefileSources(mips64_src))
+    out.write('LOCAL_SRC_FILES_x86 += \\\n')
+    out.write(_makefileSources(x86_src))
+    out.write('LOCAL_SRC_FILES_x86_64 += \\\n')
+    out.write(_makefileSources(x86_64_src))
+
+    out.write('# Enable DEBUG option.\n')
+    out.write('ifeq ($(DEBUG_V8),true)\n')
+    out.write('LOCAL_SRC_FILES += \\\n')
+    out.write('\tsrc/objects-debug.cc \\\n')
+    out.write('\tsrc/ast/prettyprinter.cc \\\n')
+    out.write('\tsrc/regexp/regexp-macro-assembler-tracer.cc\n')
+    out.write('endif\n')
+    out.write('LOCAL_C_INCLUDES := \\\n')
+    out.write('\t$(LOCAL_PATH)/src \\\n')
+    out.write('\texternal/icu/icu4c/source/common \\\n')
+    out.write('\texternal/icu/icu4c/source/i18n\n')
+    out.write('include $(BUILD_STATIC_LIBRARY)\n')
+
+def _writeGeneratedFilesMakfile(target):
+  if not target:
+    raise ValueError('Must specify j2sc target properties')
+
+  with open('Android.v8gen.mk', 'w') as out:
+    out.write(_makefileCommonHeader('libv8gen'))
+    out.write('LOCAL_MODULE_CLASS := STATIC_LIBRARIES\n')
+
+    sources = target['variables']['library_files']
+    out.write('V8_LOCAL_JS_LIBRARY_FILES := \\\n')
+    out.write(_makefileSources(sources))
+
+    sources = target['variables']['experimental_library_files']
+    out.write('V8_LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES := \\\n')
+    out.write(_makefileSources(sources))
+
+    out.write('LOCAL_SRC_FILES += src/snapshot/snapshot-empty.cc\n')
+
+    out.write('LOCAL_JS_LIBRARY_FILES := ' \
+        '$(addprefix $(LOCAL_PATH)/, $(V8_LOCAL_JS_LIBRARY_FILES))\n')
+    out.write('LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES := ' \
+        '$(addprefix $(LOCAL_PATH)/, ' \
+        '$(V8_LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES))\n')
+    out.write('generated_sources := $(call local-generated-sources-dir)\n')
+    out.write('')
+
+    # Copy js2c.py to generated sources directory and invoke there to avoid
+    # generating jsmin.pyc in the source directory
+    out.write('JS2C_PY := ' \
+        '$(generated_sources)/js2c.py $(generated_sources)/jsmin.py\n')
+    out.write('$(JS2C_PY): ' \
+        '$(generated_sources)/%.py : $(LOCAL_PATH)/tools/%.py | $(ACP)\n')
+    out.write('\t@echo "Copying $@"\n')
+    out.write('\t$(copy-file-to-target)\n')
+
+    # Generate libraries.cc
+    out.write('GEN1 := $(generated_sources)/libraries.cc\n')
+    out.write('$(GEN1): SCRIPT := $(generated_sources)/js2c.py\n')
+    out.write('$(GEN1): $(LOCAL_JS_LIBRARY_FILES) $(JS2C_PY)\n')
+    out.write('\t@echo "Generating libraries.cc"\n')
+    out.write('\t@mkdir -p $(dir $@)\n')
+    out.write('\tpython $(SCRIPT) $@ CORE $(LOCAL_JS_LIBRARY_FILES)\n')
+    out.write('V8_GENERATED_LIBRARIES := $(generated_sources)/libraries.cc\n')
+
+    # Generate experimental-libraries.cc
+    out.write('GEN2 := $(generated_sources)/experimental-libraries.cc\n')
+    out.write('$(GEN2): SCRIPT := $(generated_sources)/js2c.py\n')
+    out.write('$(GEN2): $(LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES) $(JS2C_PY)\n')
+    out.write('\t@echo "Generating experimental-libraries.cc"\n')
+    out.write('\t@mkdir -p $(dir $@)\n')
+    out.write('\tpython $(SCRIPT) $@ EXPERIMENTAL ' \
+        '$(LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES)\n')
+    out.write('V8_GENERATED_LIBRARIES ' \
+        '+= $(generated_sources)/experimental-libraries.cc\n')
+
+    # Generate extra-libraries.cc
+    out.write('GEN3 := $(generated_sources)/extra-libraries.cc\n')
+    out.write('$(GEN3): SCRIPT := $(generated_sources)/js2c.py\n')
+    out.write('$(GEN3): $(JS2C_PY)\n')
+    out.write('\t@echo "Generating extra-libraries.cc"\n')
+    out.write('\t@mkdir -p $(dir $@)\n')
+    out.write('\tpython $(SCRIPT) $@ EXTRAS\n')
+    out.write('V8_GENERATED_LIBRARIES ' \
+        '+= $(generated_sources)/extra-libraries.cc\n')
+
+    # Generate experimental-extra-libraries.cc
+    out.write('GEN4 := $(generated_sources)/experimental-extra-libraries.cc\n')
+    out.write('$(GEN4): SCRIPT := $(generated_sources)/js2c.py\n')
+    out.write('$(GEN4): $(JS2C_PY)\n')
+    out.write('\t@echo "Generating experimental-extra-libraries.cc"\n')
+    out.write('\t@mkdir -p $(dir $@)\n')
+    out.write('\tpython $(SCRIPT) $@ EXPERIMENTAL_EXTRAS\n')
+    out.write('V8_GENERATED_LIBRARIES ' \
+        '+= $(generated_sources)/experimental-extra-libraries.cc\n')
+
+    out.write('LOCAL_GENERATED_SOURCES += $(V8_GENERATED_LIBRARIES)\n')
+
+    out.write('include $(BUILD_STATIC_LIBRARY)\n')
+
+def _writeLibBaseMakefile(target):
+  if not target:
+    raise ValueError('Must specify v8_libbase target properties')
+
+  with open('Android.base.mk', 'w') as out:
+    out.write(_makefileCommonHeader('libv8base'))
+    out.write('LOCAL_MODULE_CLASS := STATIC_LIBRARIES\n')
+
+    out.write('LOCAL_SRC_FILES := \\\n')
+
+    sources = [x for x in target['sources'] if x.endswith('.cc')]
+    sources += ['base/platform/platform-posix.cc']
+    sources.sort()
+
+    out.write(_makefileSources(sources))
+    out.write('LOCAL_SRC_FILES += \\\n')
+    out.write('\tsrc/base/debug/stack_trace_android.cc \\\n')
+    out.write('\tsrc/base/platform/platform-linux.cc\n')
+
+    out.write('LOCAL_C_INCLUDES := $(LOCAL_PATH)/src\n')
+    out.write('include $(BUILD_STATIC_LIBRARY)\n\n')
+
+    out.write('include $(CLEAR_VARS)\n')
+
+    out.write('include $(LOCAL_PATH)/Android.v8common.mk\n')
+
+    # Set up the target identity
+    out.write('LOCAL_MODULE := libv8base\n')
+    out.write('LOCAL_MODULE_CLASS := STATIC_LIBRARIES\n')
+
+    out.write('LOCAL_SRC_FILES := \\\n')
+    out.write(_makefileSources(sources))
+
+    # Host may be linux or darwin.
+    out.write('ifeq ($(HOST_OS),linux)\n')
+    out.write('LOCAL_SRC_FILES += \\\n')
+    out.write('\tsrc/base/platform/platform-linux.cc \\\n')
+    out.write('\tsrc/base/debug/stack_trace_posix.cc\n')
+    out.write('endif\n')
+    out.write('ifeq ($(HOST_OS),darwin)\n')
+    out.write('LOCAL_SRC_FILES += \\\n')
+    out.write('\tsrc/base/platform/platform-macos.cc\n')
+    out.write('endif\n')
+
+    out.write('LOCAL_C_INCLUDES := $(LOCAL_PATH)/src\n')
+    out.write('include $(BUILD_HOST_STATIC_LIBRARY)\n')
+
+
+# Slurp in the content of the V8 gyp file.
+with open(os.path.join(os.getcwd(), './src/v8.gyp'), 'r') as f:
+  gyp = eval(f.read())
+
+# Find the targets that we're interested in and write out the makefiles.
+for target in gyp['targets']:
+  name = target['target_name']
+  sources = None
+  if target.get('sources'):
+    sources = [x for x in target['sources'] if x.endswith('.cc')]
+    sources.sort()
+
+  if name == 'v8_libplatform':
+    _writeMakefile('Android.platform.mk', 'libv8platform', sources)
+  elif name == 'v8_libsampler':
+    _writeMakefile('Android.sampler.mk', 'libv8sampler', sources)
+  elif name == 'v8_base':
+    _writeV8SrcMakefile(target)
+  elif name == 'mkpeephole':
+    _writeMkpeepholeMakefile(target)
+  elif name == 'js2c':
+    _writeGeneratedFilesMakfile(target)
+  elif name == 'v8_libbase':
+    _writeLibBaseMakefile(target)
+
+
+
+
diff --git a/gni/isolate.gni b/gni/isolate.gni
index 93c828d..1cc3a38 100644
--- a/gni/isolate.gni
+++ b/gni/isolate.gni
@@ -3,6 +3,7 @@
 # found in the LICENSE file.
 
 import("//build/config/sanitizers/sanitizers.gni")
+import("//build_overrides/v8.gni")
 import("//third_party/icu/config.gni")
 import("v8.gni")
 
@@ -12,11 +13,21 @@
 }
 
 template("v8_isolate_run") {
+  forward_variables_from(invoker,
+                         "*",
+                         [
+                           "deps",
+                           "isolate",
+                         ])
+
   # Remember target name as within the action scope the target name will be
   # different.
   name = target_name
-  if (name != "" && invoker.isolate != "" && invoker.deps != [] &&
-      v8_test_isolation_mode != "noop") {
+
+  assert(defined(invoker.deps))
+  assert(defined(invoker.isolate))
+
+  if (name != "" && v8_test_isolation_mode != "noop") {
     action(name + "_run") {
       testonly = true
 
@@ -86,6 +97,11 @@
       } else {
         icu_use_data_file_flag = "0"
       }
+      if (v8_enable_inspector_override) {
+        enable_inspector = "1"
+      } else {
+        enable_inspector = "0"
+      }
       if (v8_use_external_startup_data) {
         use_external_startup_data = "1"
       } else {
@@ -107,7 +123,6 @@
         gcmole = "0"
       }
 
-
       # Note, all paths will be rebased in isolate_driver.py to be relative to
       # the isolate file.
       args = [
@@ -142,6 +157,8 @@
         "--config-variable",
         "icu_use_data_file_flag=$icu_use_data_file_flag",
         "--config-variable",
+        "is_gn=1",
+        "--config-variable",
         "msan=$msan",
         "--config-variable",
         "tsan=$tsan",
@@ -154,6 +171,8 @@
         "--config-variable",
         "target_arch=$target_arch",
         "--config-variable",
+        "v8_enable_inspector=$enable_inspector",
+        "--config-variable",
         "v8_use_external_startup_data=$use_external_startup_data",
         "--config-variable",
         "v8_use_snapshot=$use_snapshot",
diff --git a/gni/v8.gni b/gni/v8.gni
index 7ff7f6f..3759572 100644
--- a/gni/v8.gni
+++ b/gni/v8.gni
@@ -26,6 +26,10 @@
   # Use external files for startup data blobs:
   # the JS builtins sources and the start snapshot.
   v8_use_external_startup_data = ""
+
+  # Enable ECMAScript Internationalization API. Enabling this feature will
+  # add a dependency on the ICU library.
+  v8_enable_i18n_support = true
 }
 
 if (v8_use_external_startup_data == "") {
@@ -38,15 +42,17 @@
   v8_enable_backtrace = is_debug && !v8_optimized_debug
 }
 
-###############################################################################
-# Templates
-#
-
 # Points to // in v8 stand-alone or to //v8/ in chromium. We need absolute
 # paths for all configs in templates as they are shared in different
 # subdirectories.
 v8_path_prefix = get_path_info("../", "abspath")
 
+v8_inspector_js_protocol = v8_path_prefix + "/src/inspector/js_protocol.json"
+
+###############################################################################
+# Templates
+#
+
 # Common configs to remove or add in all v8 targets.
 v8_remove_configs = [ "//build/config/compiler:chromium_code" ]
 v8_add_configs = [
@@ -87,7 +93,15 @@
 
 template("v8_executable") {
   executable(target_name) {
-    forward_variables_from(invoker, "*", [ "configs" ])
+    forward_variables_from(invoker,
+                           "*",
+                           [
+                             "configs",
+                             "remove_configs",
+                           ])
+    if (defined(invoker.remove_configs)) {
+      configs -= invoker.remove_configs
+    }
     configs += invoker.configs
     configs -= v8_remove_configs
     configs += v8_add_configs
diff --git a/gypfiles/all.gyp b/gypfiles/all.gyp
index 6b4ef82..a3f2eed 100644
--- a/gypfiles/all.gyp
+++ b/gypfiles/all.gyp
@@ -25,6 +25,12 @@
             '../test/unittests/unittests.gyp:*',
           ],
         }],
+        ['v8_enable_inspector==1', {
+          'dependencies': [
+            '../test/debugger/debugger.gyp:*',
+            '../test/inspector/inspector.gyp:*',
+          ],
+        }],
         ['test_isolation_mode != "noop"', {
           'dependencies': [
             '../test/bot_default.gyp:*',
diff --git a/gypfiles/get_landmines.py b/gypfiles/get_landmines.py
index 432dfd7..e6b6da6 100755
--- a/gypfiles/get_landmines.py
+++ b/gypfiles/get_landmines.py
@@ -30,6 +30,7 @@
   print 'Clobber after Android NDK update.'
   print 'Clober to fix windows build problems.'
   print 'Clober again to fix windows build problems.'
+  print 'Clobber to possibly resolve failure on win-32 bot.'
   return 0
 
 
diff --git a/gypfiles/isolate.gypi b/gypfiles/isolate.gypi
index 149818c..8f53a15 100644
--- a/gypfiles/isolate.gypi
+++ b/gypfiles/isolate.gypi
@@ -74,12 +74,14 @@
         '--config-variable', 'gcmole=<(gcmole)',
         '--config-variable', 'has_valgrind=<(has_valgrind)',
         '--config-variable', 'icu_use_data_file_flag=<(icu_use_data_file_flag)',
+        '--config-variable', 'is_gn=0',
         '--config-variable', 'msan=<(msan)',
         '--config-variable', 'tsan=<(tsan)',
         '--config-variable', 'coverage=<(coverage)',
         '--config-variable', 'sanitizer_coverage=<(sanitizer_coverage)',
         '--config-variable', 'component=<(component)',
         '--config-variable', 'target_arch=<(target_arch)',
+        '--config-variable', 'v8_enable_inspector=<(v8_enable_inspector)',
         '--config-variable', 'v8_use_external_startup_data=<(v8_use_external_startup_data)',
         '--config-variable', 'v8_use_snapshot=<(v8_use_snapshot)',
       ],
diff --git a/gypfiles/standalone.gypi b/gypfiles/standalone.gypi
index 7e41ce8..d438a5a 100644
--- a/gypfiles/standalone.gypi
+++ b/gypfiles/standalone.gypi
@@ -455,6 +455,7 @@
     'variables': {
       'v8_code%': '<(v8_code)',
       'clang_warning_flags': [
+        '-Wsign-compare',
         # TODO(thakis): https://crbug.com/604888
         '-Wno-undefined-var-template',
         # TODO(yangguo): issue 5258
@@ -503,7 +504,9 @@
     },
     'conditions':[
       ['clang==0', {
-        'cflags+': ['-Wno-sign-compare',],
+        'cflags+': [
+          '-Wno-uninitialized',
+        ],
       }],
       ['clang==1 or host_clang==1', {
         # This is here so that all files get recompiled after a clang roll and
diff --git a/include/libplatform/DEPS b/include/libplatform/DEPS
index 15e75e6..d8bcf99 100644
--- a/include/libplatform/DEPS
+++ b/include/libplatform/DEPS
@@ -1,3 +1,7 @@
+include_rules = [
+  "+libplatform/libplatform-export.h",
+]
+
 specific_include_rules = {
   "libplatform\.h": [
     "+libplatform/v8-tracing.h",
diff --git a/include/libplatform/libplatform-export.h b/include/libplatform/libplatform-export.h
new file mode 100644
index 0000000..1561843
--- /dev/null
+++ b/include/libplatform/libplatform-export.h
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIBPLATFORM_LIBPLATFORM_EXPORT_H_
+#define V8_LIBPLATFORM_LIBPLATFORM_EXPORT_H_
+
+#if defined(_WIN32)
+
+#ifdef BUILDING_V8_PLATFORM_SHARED
+#define V8_PLATFORM_EXPORT __declspec(dllexport)
+#elif USING_V8_PLATFORM_SHARED
+#define V8_PLATFORM_EXPORT __declspec(dllimport)
+#else
+#define V8_PLATFORM_EXPORT
+#endif  // BUILDING_V8_PLATFORM_SHARED
+
+#else  // defined(_WIN32)
+
+// Setup for Linux shared library export.
+#ifdef BUILDING_V8_PLATFORM_SHARED
+#define V8_PLATFORM_EXPORT __attribute__((visibility("default")))
+#else
+#define V8_PLATFORM_EXPORT
+#endif
+
+#endif  // defined(_WIN32)
+
+#endif  // V8_LIBPLATFORM_LIBPLATFORM_EXPORT_H_
diff --git a/include/libplatform/libplatform.h b/include/libplatform/libplatform.h
index 5b5eee6..40f3f66 100644
--- a/include/libplatform/libplatform.h
+++ b/include/libplatform/libplatform.h
@@ -5,6 +5,7 @@
 #ifndef V8_LIBPLATFORM_LIBPLATFORM_H_
 #define V8_LIBPLATFORM_LIBPLATFORM_H_
 
+#include "libplatform/libplatform-export.h"
 #include "libplatform/v8-tracing.h"
 #include "v8-platform.h"  // NOLINT(build/include)
 
@@ -19,8 +20,8 @@
  * of zero is passed, a suitable default based on the current number of
  * processors online will be chosen.
  */
-v8::Platform* CreateDefaultPlatform(int thread_pool_size = 0);
-
+V8_PLATFORM_EXPORT v8::Platform* CreateDefaultPlatform(
+    int thread_pool_size = 0);
 
 /**
  * Pumps the message loop for the given isolate.
@@ -30,14 +31,15 @@
  * not block if no task is pending. The |platform| has to be created using
  * |CreateDefaultPlatform|.
  */
-bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate);
+V8_PLATFORM_EXPORT bool PumpMessageLoop(v8::Platform* platform,
+                                        v8::Isolate* isolate);
 
 /**
  * Attempts to set the tracing controller for the given platform.
  *
  * The |platform| has to be created using |CreateDefaultPlatform|.
  */
-void SetTracingController(
+V8_PLATFORM_EXPORT void SetTracingController(
     v8::Platform* platform,
     v8::platform::tracing::TracingController* tracing_controller);
 
diff --git a/include/libplatform/v8-tracing.h b/include/libplatform/v8-tracing.h
index e9f4941..902f8ea 100644
--- a/include/libplatform/v8-tracing.h
+++ b/include/libplatform/v8-tracing.h
@@ -10,6 +10,7 @@
 #include <unordered_set>
 #include <vector>
 
+#include "libplatform/libplatform-export.h"
 #include "v8-platform.h"  // NOLINT(build/include)
 
 namespace v8 {
@@ -23,7 +24,7 @@
 
 const int kTraceMaxNumArgs = 2;
 
-class TraceObject {
+class V8_PLATFORM_EXPORT TraceObject {
  public:
   union ArgValue {
     bool as_bool;
@@ -103,7 +104,7 @@
   void operator=(const TraceObject&) = delete;
 };
 
-class TraceWriter {
+class V8_PLATFORM_EXPORT TraceWriter {
  public:
   TraceWriter() {}
   virtual ~TraceWriter() {}
@@ -118,7 +119,7 @@
   void operator=(const TraceWriter&) = delete;
 };
 
-class TraceBufferChunk {
+class V8_PLATFORM_EXPORT TraceBufferChunk {
  public:
   explicit TraceBufferChunk(uint32_t seq);
 
@@ -142,7 +143,7 @@
   void operator=(const TraceBufferChunk&) = delete;
 };
 
-class TraceBuffer {
+class V8_PLATFORM_EXPORT TraceBuffer {
  public:
   TraceBuffer() {}
   virtual ~TraceBuffer() {}
@@ -178,45 +179,37 @@
   ECHO_TO_CONSOLE,
 };
 
-class TraceConfig {
+class V8_PLATFORM_EXPORT TraceConfig {
  public:
   typedef std::vector<std::string> StringList;
 
   static TraceConfig* CreateDefaultTraceConfig();
 
-  TraceConfig()
-      : enable_sampling_(false),
-        enable_systrace_(false),
-        enable_argument_filter_(false) {}
+  TraceConfig() : enable_systrace_(false), enable_argument_filter_(false) {}
   TraceRecordMode GetTraceRecordMode() const { return record_mode_; }
-  bool IsSamplingEnabled() const { return enable_sampling_; }
   bool IsSystraceEnabled() const { return enable_systrace_; }
   bool IsArgumentFilterEnabled() const { return enable_argument_filter_; }
 
   void SetTraceRecordMode(TraceRecordMode mode) { record_mode_ = mode; }
-  void EnableSampling() { enable_sampling_ = true; }
   void EnableSystrace() { enable_systrace_ = true; }
   void EnableArgumentFilter() { enable_argument_filter_ = true; }
 
   void AddIncludedCategory(const char* included_category);
-  void AddExcludedCategory(const char* excluded_category);
 
   bool IsCategoryGroupEnabled(const char* category_group) const;
 
  private:
   TraceRecordMode record_mode_;
-  bool enable_sampling_ : 1;
   bool enable_systrace_ : 1;
   bool enable_argument_filter_ : 1;
   StringList included_categories_;
-  StringList excluded_categories_;
 
   // Disallow copy and assign
   TraceConfig(const TraceConfig&) = delete;
   void operator=(const TraceConfig&) = delete;
 };
 
-class TracingController {
+class V8_PLATFORM_EXPORT TracingController {
  public:
   enum Mode { DISABLED = 0, RECORDING_MODE };
 
diff --git a/include/v8-profiler.h b/include/v8-profiler.h
index 6ee0340..74c0613 100644
--- a/include/v8-profiler.h
+++ b/include/v8-profiler.h
@@ -48,7 +48,7 @@
 
 /**
  * TracingCpuProfiler monitors tracing being enabled/disabled
- * and emits CpuProfile trace events once v8.cpu_profile2 tracing category
+ * and emits CpuProfile trace events once v8.cpu_profiler tracing category
  * is enabled. It has no overhead unless the category is enabled.
  */
 class V8_EXPORT TracingCpuProfiler {
diff --git a/include/v8-util.h b/include/v8-util.h
index 99c59fe..8133fdd 100644
--- a/include/v8-util.h
+++ b/include/v8-util.h
@@ -206,19 +206,14 @@
   }
 
   /**
-   * Deprecated. Call V8::RegisterExternallyReferencedObject with the map value
-   * for given key.
-   * TODO(hlopko) Remove once migration to reporter is finished.
+   * Call V8::RegisterExternallyReferencedObject with the map value for given
+   * key.
    */
-  void RegisterExternallyReferencedObject(K& key) {}
-
-  /**
-   * Use EmbedderReachableReferenceReporter with the map value for given key.
-   */
-  void RegisterExternallyReferencedObject(
-      EmbedderReachableReferenceReporter* reporter, K& key) {
+  void RegisterExternallyReferencedObject(K& key) {
     DCHECK(Contains(key));
-    reporter->ReportExternalReference(FromVal(Traits::Get(&impl_, key)));
+    V8::RegisterExternallyReferencedObject(
+        reinterpret_cast<internal::Object**>(FromVal(Traits::Get(&impl_, key))),
+        reinterpret_cast<internal::Isolate*>(GetIsolate()));
   }
 
   /**
diff --git a/include/v8-version.h b/include/v8-version.h
index 16133c3..eda966b 100644
--- a/include/v8-version.h
+++ b/include/v8-version.h
@@ -9,9 +9,9 @@
 // NOTE these macros are used by some of the tool scripts and the build
 // system so their names cannot be changed without changing the scripts.
 #define V8_MAJOR_VERSION 5
-#define V8_MINOR_VERSION 5
-#define V8_BUILD_NUMBER 372
-#define V8_PATCH_LEVEL 32
+#define V8_MINOR_VERSION 6
+#define V8_BUILD_NUMBER 326
+#define V8_PATCH_LEVEL 50
 
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/include/v8.h b/include/v8.h
index 36edf53..5348ba7 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -35,11 +35,6 @@
 // the V8 DLL USING_V8_SHARED needs to be defined. When either building the V8
 // static library or building a program which uses the V8 static library neither
 // BUILDING_V8_SHARED nor USING_V8_SHARED should be defined.
-#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
-#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\
-  build configuration to ensure that at most one of these is set
-#endif
-
 #ifdef BUILDING_V8_SHARED
 # define V8_EXPORT __declspec(dllexport)
 #elif USING_V8_SHARED
@@ -468,16 +463,6 @@
 enum class WeakCallbackType { kParameter, kInternalFields, kFinalizer };
 
 /**
- * A reporter class that embedder will use to report reachable references found
- * by EmbedderHeapTracer.
- */
-class V8_EXPORT EmbedderReachableReferenceReporter {
- public:
-  virtual void ReportExternalReference(Value* object) = 0;
-  virtual ~EmbedderReachableReferenceReporter() = default;
-};
-
-/**
  * An object reference that is independent of any handle scope.  Where
  * a Local handle only lives as long as the HandleScope in which it was
  * allocated, a PersistentBase handle remains valid until it is explicitly
@@ -574,18 +559,11 @@
   V8_INLINE void ClearWeak() { ClearWeak<void>(); }
 
   /**
-   * Deprecated.
-   * TODO(hlopko): remove once migration to reporter is finished.
-   */
-  V8_INLINE void RegisterExternalReference(Isolate* isolate) const {}
-
-  /**
    * Allows the embedder to tell the v8 garbage collector that a certain object
    * is alive. Only allowed when the embedder is asked to trace its heap by
    * EmbedderHeapTracer.
    */
-  V8_INLINE void RegisterExternalReference(
-      EmbedderReachableReferenceReporter* reporter) const;
+  V8_INLINE void RegisterExternalReference(Isolate* isolate) const;
 
   /**
    * Marks the reference to this object independent. Garbage collector is free
@@ -596,18 +574,6 @@
   V8_INLINE void MarkIndependent();
 
   /**
-   * Marks the reference to this object partially dependent. Partially dependent
-   * handles only depend on other partially dependent handles and these
-   * dependencies are provided through object groups. It provides a way to build
-   * smaller object groups for young objects that represent only a subset of all
-   * external dependencies. This mark is automatically cleared after each
-   * garbage collection.
-   */
-  V8_INLINE V8_DEPRECATED(
-      "deprecated optimization, do not use partially dependent groups",
-      void MarkPartiallyDependent());
-
-  /**
    * Marks the reference to this object as active. The scavenge garbage
    * collection should not reclaim the objects marked as active.
    * This bit is cleared after the each garbage collection pass.
@@ -1106,22 +1072,22 @@
    */
   Local<String> GetModuleRequest(int i) const;
 
-  void SetEmbedderData(Local<Value> data);
-  Local<Value> GetEmbedderData() const;
+  /**
+   * Returns the identity hash for this object.
+   */
+  int GetIdentityHash() const;
 
   typedef MaybeLocal<Module> (*ResolveCallback)(Local<Context> context,
                                                 Local<String> specifier,
-                                                Local<Module> referrer,
-                                                Local<Value> data);
+                                                Local<Module> referrer);
 
   /**
    * ModuleDeclarationInstantiation
    *
    * Returns false if an exception occurred during instantiation.
    */
-  V8_WARN_UNUSED_RESULT bool Instantiate(
-      Local<Context> context, ResolveCallback callback,
-      Local<Value> callback_data = Local<Value>());
+  V8_WARN_UNUSED_RESULT bool Instantiate(Local<Context> context,
+                                         ResolveCallback callback);
 
   /**
    * ModuleEvaluation
@@ -1745,6 +1711,19 @@
      * Nothing<bool>() returned.
      */
     virtual Maybe<bool> WriteHostObject(Isolate* isolate, Local<Object> object);
+
+    /*
+     * Allocates memory for the buffer of at least the size provided. The actual
+     * size (which may be greater or equal) is written to |actual_size|. If no
+     * buffer has been allocated yet, nullptr will be provided.
+     */
+    virtual void* ReallocateBufferMemory(void* old_buffer, size_t size,
+                                         size_t* actual_size);
+
+    /*
+     * Frees a buffer allocated with |ReallocateBufferMemory|.
+     */
+    virtual void FreeBufferMemory(void* buffer);
   };
 
   explicit ValueSerializer(Isolate* isolate);
@@ -1766,7 +1745,15 @@
    * Returns the stored data. This serializer should not be used once the buffer
    * is released. The contents are undefined if a previous write has failed.
    */
-  std::vector<uint8_t> ReleaseBuffer();
+  V8_DEPRECATE_SOON("Use Release()", std::vector<uint8_t> ReleaseBuffer());
+
+  /*
+   * Returns the stored data (allocated using the delegate's
+   * AllocateBufferMemory) and its size. This serializer should not be used once
+   * the buffer is released. The contents are undefined if a previous write has
+   * failed.
+   */
+  V8_WARN_UNUSED_RESULT std::pair<uint8_t*, size_t> Release();
 
   /*
    * Marks an ArrayBuffer as havings its contents transferred out of band.
@@ -1832,7 +1819,6 @@
    * May, for example, reject an invalid or unsupported wire format.
    */
   V8_WARN_UNUSED_RESULT Maybe<bool> ReadHeader(Local<Context> context);
-  V8_DEPRECATE_SOON("Use Local<Context> version", Maybe<bool> ReadHeader());
 
   /*
    * Deserializes a JavaScript value from the buffer.
@@ -3542,7 +3528,7 @@
   /**
    * \return The receiver. In many cases, this is the object on which the
    * property access was intercepted. When using
-   * `Reflect.Get`, `Function.prototype.call`, or similar functions, it is the
+   * `Reflect.get`, `Function.prototype.call`, or similar functions, it is the
    * object passed in as receiver or thisArg.
    *
    * \code
@@ -3607,7 +3593,7 @@
    * \return True if the intercepted function should throw if an error occurs.
    * Usually, `true` corresponds to `'use strict'`.
    *
-   * \note Always `false` when intercepting `Reflect.Set()`
+   * \note Always `false` when intercepting `Reflect.set()`
    * independent of the language mode.
    */
   V8_INLINE bool ShouldThrowOnError() const;
@@ -3902,13 +3888,29 @@
 class V8_EXPORT WasmCompiledModule : public Object {
  public:
   typedef std::pair<std::unique_ptr<const uint8_t[]>, size_t> SerializedModule;
+  // A buffer that is owned by the caller.
+  typedef std::pair<const uint8_t*, size_t> CallerOwnedBuffer;
+  // Get the wasm-encoded bytes that were used to compile this module.
+  Local<String> GetWasmWireBytes();
 
+  // Serialize the compiled module. The serialized data does not include the
+  // uncompiled bytes.
   SerializedModule Serialize();
-  static MaybeLocal<WasmCompiledModule> Deserialize(
-      Isolate* isolate, const SerializedModule& serialized_data);
+
+  // If possible, deserialize the module, otherwise compile it from the provided
+  // uncompiled bytes.
+  static MaybeLocal<WasmCompiledModule> DeserializeOrCompile(
+      Isolate* isolate, const CallerOwnedBuffer& serialized_module,
+      const CallerOwnedBuffer& wire_bytes);
   V8_INLINE static WasmCompiledModule* Cast(Value* obj);
 
  private:
+  static MaybeLocal<WasmCompiledModule> Deserialize(
+      Isolate* isolate, const CallerOwnedBuffer& serialized_module,
+      const CallerOwnedBuffer& wire_bytes);
+  static MaybeLocal<WasmCompiledModule> Compile(Isolate* isolate,
+                                                const uint8_t* start,
+                                                size_t length);
   WasmCompiledModule();
   static void CheckCast(Value* obj);
 };
@@ -4623,6 +4625,8 @@
    */
   void Set(Local<Name> name, Local<Data> value,
            PropertyAttribute attributes = None);
+  void SetPrivate(Local<Private> name, Local<Data> value,
+                  PropertyAttribute attributes = None);
   V8_INLINE void Set(Isolate* isolate, const char* name, Local<Data> value);
 
   void SetAccessorProperty(
@@ -4675,6 +4679,14 @@
       AccessControl settings = DEFAULT);
 
   /**
+   * Like SetNativeDataProperty, but V8 will replace the native data property
+   * with a real data property on first access.
+   */
+  void SetLazyDataProperty(Local<Name> name, AccessorNameGetterCallback getter,
+                           Local<Value> data = Local<Value>(),
+                           PropertyAttribute attribute = None);
+
+  /**
    * During template instantiation, sets the value with the intrinsic property
    * from the correct context.
    */
@@ -5095,6 +5107,14 @@
       Local<Value> data = Local<Value>(),
       Local<Signature> signature = Local<Signature>(), int length = 0);
 
+  /**
+   * Creates a function template backed/cached by a private property.
+   */
+  static Local<FunctionTemplate> NewWithCache(
+      Isolate* isolate, FunctionCallback callback,
+      Local<Private> cache_property, Local<Value> data = Local<Value>(),
+      Local<Signature> signature = Local<Signature>(), int length = 0);
+
   /** Returns the unique function instance in the current execution context.*/
   V8_DEPRECATE_SOON("Use maybe version", Local<Function> GetFunction());
   V8_WARN_UNUSED_RESULT MaybeLocal<Function> GetFunction(
@@ -5676,6 +5696,10 @@
   void set_code_range_size(size_t limit_in_mb) {
     code_range_size_ = limit_in_mb;
   }
+  size_t max_zone_pool_size() const { return max_zone_pool_size_; }
+  void set_max_zone_pool_size(const size_t bytes) {
+    max_zone_pool_size_ = bytes;
+  }
 
  private:
   int max_semi_space_size_;
@@ -5683,6 +5707,7 @@
   int max_executable_size_;
   uint32_t* stack_limit_;
   size_t code_range_size_;
+  size_t max_zone_pool_size_;
 };
 
 
@@ -6145,11 +6170,11 @@
 enum class MemoryPressureLevel { kNone, kModerate, kCritical };
 
 /**
- * Interface for tracing through the embedder heap. During the v8 garbage
+ * Interface for tracing through the embedder heap. During a v8 garbage
  * collection, v8 collects hidden fields of all potential wrappers, and at the
  * end of its marking phase iterates the collection and asks the embedder to
- * trace through its heap and use reporter to report each js object reachable
- * from any of the given wrappers.
+ * trace through its heap and use reporter to report each JavaScript object
+ * reachable from any of the given wrappers.
  *
  * Before the first call to the TraceWrappersFrom function TracePrologue will be
  * called. When the garbage collection cycle is finished, TraceEpilogue will be
@@ -6167,30 +6192,26 @@
   };
 
   /**
-   * V8 will call this method with internal fields of found wrappers. The
-   * embedder is expected to store them in its marking deque and trace
-   * reachable wrappers from them when called through |AdvanceTracing|.
+   * Called by v8 to register internal fields of found wrappers.
+   *
+   * The embedder is expected to store them somewhere and trace reachable
+   * wrappers from them when called through |AdvanceTracing|.
    */
   virtual void RegisterV8References(
       const std::vector<std::pair<void*, void*> >& internal_fields) = 0;
 
   /**
-   * Deprecated.
-   * TODO(hlopko) Remove once the migration to reporter is finished.
+   * Called at the beginning of a GC cycle.
    */
-  virtual void TracePrologue() {}
+  virtual void TracePrologue() = 0;
 
   /**
-   * V8 will call this method at the beginning of a GC cycle. Embedder is
-   * expected to use EmbedderReachableReferenceReporter for reporting all
-   * reachable v8 objects.
-   */
-  virtual void TracePrologue(EmbedderReachableReferenceReporter* reporter) {}
-
-  /**
-   * Embedder is expected to trace its heap starting from wrappers reported by
-   * RegisterV8References method, and use reporter for all reachable wrappers.
-   * Embedder is expected to stop tracing by the given deadline.
+   * Called to to make a tracing step in the embedder.
+   *
+   * The embedder is expected to trace its heap starting from wrappers reported
+   * by RegisterV8References method, and report back all reachable wrappers.
+   * Furthermore, the embedder is expected to stop tracing by the given
+   * deadline.
    *
    * Returns true if there is still work to do.
    */
@@ -6198,22 +6219,25 @@
                               AdvanceTracingActions actions) = 0;
 
   /**
-   * V8 will call this method at the end of a GC cycle.
+   * Called at the end of a GC cycle.
    *
    * Note that allocation is *not* allowed within |TraceEpilogue|.
    */
   virtual void TraceEpilogue() = 0;
 
   /**
-   * Let embedder know v8 entered final marking pause (no more incremental steps
-   * will follow).
+   * Called upon entering the final marking pause. No more incremental marking
+   * steps will follow this call.
    */
-  virtual void EnterFinalPause() {}
+  virtual void EnterFinalPause() = 0;
 
   /**
-   * Throw away all intermediate data and reset to the initial state.
+   * Called when tracing is aborted.
+   *
+   * The embedder is expected to throw away all intermediate data and reset to
+   * the initial state.
    */
-  virtual void AbortTracing() {}
+  virtual void AbortTracing() = 0;
 
   /**
    * Returns the number of wrappers that are still to be traced by the embedder.
@@ -6225,6 +6249,19 @@
 };
 
 /**
+ * Callback to the embedder used in SnapshotCreator to handle internal fields.
+ */
+typedef StartupData (*SerializeInternalFieldsCallback)(Local<Object> holder,
+                                                       int index);
+
+/**
+ * Callback to the embedder used to deserialize internal fields.
+ */
+typedef void (*DeserializeInternalFieldsCallback)(Local<Object> holder,
+                                                  int index,
+                                                  StartupData payload);
+
+/**
  * Isolate represents an isolated instance of the V8 engine.  V8 isolates have
  * completely separate states.  Objects from one isolate must not be used in
  * other isolates.  The embedder can create multiple isolates and use them in
@@ -6246,7 +6283,8 @@
           create_histogram_callback(nullptr),
           add_histogram_sample_callback(nullptr),
           array_buffer_allocator(nullptr),
-          external_references(nullptr) {}
+          external_references(nullptr),
+          deserialize_internal_fields_callback(nullptr) {}
 
     /**
      * The optional entry_hook allows the host application to provide the
@@ -6302,6 +6340,12 @@
      * entire lifetime of the isolate.
      */
     intptr_t* external_references;
+
+    /**
+     * Specifies an optional callback to deserialize internal fields. It
+     * should match the SerializeInternalFieldCallback used to serialize.
+     */
+    DeserializeInternalFieldsCallback deserialize_internal_fields_callback;
   };
 
 
@@ -7506,6 +7550,9 @@
                          int* index);
   static Local<Value> GetEternal(Isolate* isolate, int index);
 
+  static void RegisterExternallyReferencedObject(internal::Object** object,
+                                                 internal::Isolate* isolate);
+
   template <class K, class V, class T>
   friend class PersistentValueMapBase;
 
@@ -7528,7 +7575,7 @@
 /**
  * Helper class to create a snapshot data blob.
  */
-class SnapshotCreator {
+class V8_EXPORT SnapshotCreator {
  public:
   enum class FunctionCodeHandling { kClear, kKeep };
 
@@ -7567,10 +7614,12 @@
    * This must not be called from within a handle scope.
    * \param function_code_handling whether to include compiled function code
    *        in the snapshot.
+   * \param callback to serialize embedder-set internal fields.
    * \returns { nullptr, 0 } on failure, and a startup snapshot on success. The
    *        caller acquires ownership of the data array in the return value.
    */
-  StartupData CreateBlob(FunctionCodeHandling function_code_handling);
+  StartupData CreateBlob(FunctionCodeHandling function_code_handling,
+                         SerializeInternalFieldsCallback callback = nullptr);
 
   // Disallow copying and assigning.
   SnapshotCreator(const SnapshotCreator&) = delete;
@@ -7824,7 +7873,6 @@
   const char** names_;
 };
 
-
 /**
  * A sandboxed execution context with its own set of built-in objects
  * and functions.
@@ -8286,11 +8334,10 @@
   static const int kNodeStateIsPendingValue = 3;
   static const int kNodeStateIsNearDeathValue = 4;
   static const int kNodeIsIndependentShift = 3;
-  static const int kNodeIsPartiallyDependentShift = 4;
   static const int kNodeIsActiveShift = 4;
 
-  static const int kJSObjectType = 0xb9;
-  static const int kJSApiObjectType = 0xb8;
+  static const int kJSObjectType = 0xbc;
+  static const int kJSApiObjectType = 0xbb;
   static const int kFirstNonstringType = 0x80;
   static const int kOddballType = 0x83;
   static const int kForeignType = 0x87;
@@ -8566,10 +8613,11 @@
 }
 
 template <class T>
-void PersistentBase<T>::RegisterExternalReference(
-    EmbedderReachableReferenceReporter* reporter) const {
+void PersistentBase<T>::RegisterExternalReference(Isolate* isolate) const {
   if (IsEmpty()) return;
-  reporter->ReportExternalReference(this->val_);
+  V8::RegisterExternallyReferencedObject(
+      reinterpret_cast<internal::Object**>(this->val_),
+      reinterpret_cast<internal::Isolate*>(isolate));
 }
 
 template <class T>
@@ -8581,17 +8629,6 @@
                     I::kNodeIsIndependentShift);
 }
 
-
-template <class T>
-void PersistentBase<T>::MarkPartiallyDependent() {
-  typedef internal::Internals I;
-  if (this->IsEmpty()) return;
-  I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
-                    true,
-                    I::kNodeIsPartiallyDependentShift);
-}
-
-
 template <class T>
 void PersistentBase<T>::MarkActive() {
   typedef internal::Internals I;
diff --git a/infra/config/cq.cfg b/infra/config/cq.cfg
index 3c645fd..e93895f 100644
--- a/infra/config/cq.cfg
+++ b/infra/config/cq.cfg
@@ -104,7 +104,7 @@
       }
       builders {
         name: "v8_linux64_sanitizer_coverage_rel"
-        experiment_percentage: 100
+        experiment_percentage: 20
       }
     }
     buckets {
diff --git a/infra/mb/mb_config.pyl b/infra/mb/mb_config.pyl
index 2747be5..d6a2a2d 100644
--- a/infra/mb/mb_config.pyl
+++ b/infra/mb/mb_config.pyl
@@ -40,7 +40,7 @@
       'V8 Linux - shared': 'gn_release_x86_shared_verify_heap',
       'V8 Linux - noi18n - debug': 'gn_debug_x86_no_i18n',
       # Linux64.
-      'V8 Linux64 - builder': 'gn_release_x64',
+      'V8 Linux64 - builder': 'gn_release_x64_valgrind',
       'V8 Linux64 - debug builder': 'gn_debug_x64_valgrind',
       'V8 Linux64 - custom snapshot - debug builder': 'gn_debug_x64_custom',
       'V8 Linux64 - internal snapshot': 'gn_release_x64_internal',
@@ -156,7 +156,7 @@
       'v8_linux_nosnap_dbg': 'gn_debug_x86_no_snap_trybot',
       'v8_linux_gcc_compile_rel': 'gn_release_x86_gcc_minimal_symbols',
       'v8_linux_gcc_rel': 'gn_release_x86_gcc_minimal_symbols',
-      'v8_linux64_rel_ng': 'gn_release_x64_trybot',
+      'v8_linux64_rel_ng': 'gn_release_x64_valgrind_trybot',
       'v8_linux64_gyp_rel_ng': 'gyp_release_x64',
       'v8_linux64_avx2_rel_ng': 'gn_release_x64_trybot',
       'v8_linux64_avx2_dbg': 'gn_debug_x64_trybot',
@@ -296,6 +296,10 @@
       'gn', 'release_bot', 'x64', 'tsan', 'swarming'],
     'gn_release_x64_tsan_minimal_symbols': [
       'gn', 'release_bot', 'x64', 'tsan', 'minimal_symbols', 'swarming'],
+    'gn_release_x64_valgrind': [
+      'gn', 'release_bot', 'x64', 'swarming', 'valgrind'],
+    'gn_release_x64_valgrind_trybot': [
+      'gn', 'release_trybot', 'x64', 'swarming', 'valgrind'],
 
     # GN debug configs for x64.
     'gn_debug_x64': [
@@ -317,7 +321,8 @@
     'gn_debug_x86_minimal_symbols': [
       'gn', 'debug_bot', 'x86', 'minimal_symbols', 'swarming'],
     'gn_debug_x86_no_i18n': [
-      'gn', 'debug_bot', 'x86', 'v8_no_i18n'],
+      'gn', 'debug_bot', 'x86', 'swarming', 'v8_disable_inspector',
+      'v8_no_i18n'],
     'gn_debug_x86_no_snap': [
       'gn', 'debug_bot', 'x86', 'swarming', 'v8_snapshot_none'],
     'gn_debug_x86_no_snap_trybot': [
@@ -339,7 +344,8 @@
     'gn_release_x86_minimal_symbols': [
       'gn', 'release_bot', 'x86', 'minimal_symbols', 'swarming'],
     'gn_release_x86_no_i18n_trybot': [
-      'gn', 'release_trybot', 'x86', 'swarming', 'v8_no_i18n'],
+      'gn', 'release_trybot', 'x86', 'swarming', 'v8_disable_inspector',
+      'v8_no_i18n'],
     'gn_release_x86_no_snap': [
       'gn', 'release_bot', 'x86', 'swarming', 'v8_snapshot_none'],
     'gn_release_x86_no_snap_shared_minimal_symbols': [
@@ -354,7 +360,8 @@
 
     # Gyp debug configs for simulators.
     'gyp_debug_simulate_x87_no_snap': [
-      'gyp', 'debug_bot', 'simulate_x87', 'swarming', 'v8_snapshot_none'],
+      'gyp', 'debug_bot_static', 'simulate_x87', 'swarming',
+      'v8_snapshot_none'],
 
     # Gyp debug configs for x86.
     'gyp_debug_x86': [
@@ -461,6 +468,12 @@
         'v8_optimized_debug'],
     },
 
+    'debug_bot_static': {
+      'mixins': [
+        'debug', 'static', 'goma', 'v8_enable_slow_dchecks',
+        'v8_optimized_debug'],
+    },
+
     'debug_trybot': {
       'mixins': ['debug_bot', 'minimal_symbols'],
     },
@@ -613,6 +626,11 @@
       'gyp_defines': 'v8_enable_i18n_support=0 icu_use_data_file_flag=0',
     },
 
+    'v8_disable_inspector': {
+      'gn_args': 'v8_enable_inspector=false',
+      'gyp_defines': 'v8_enable_inspector=0 ',
+    },
+
     'v8_enable_disassembler': {
       'gn_args': 'v8_enable_disassembler=true',
       'gyp_defines': 'v8_enable_disassembler=1',
diff --git a/samples/samples.gyp b/samples/samples.gyp
index e5e9ef0..e7c26cf 100644
--- a/samples/samples.gyp
+++ b/samples/samples.gyp
@@ -36,6 +36,7 @@
     'type': 'executable',
     'dependencies': [
       '../src/v8.gyp:v8',
+      '../src/v8.gyp:v8_libbase',
       '../src/v8.gyp:v8_libplatform',
     ],
     'include_dirs': [
diff --git a/src/DEPS b/src/DEPS
index b1c428d..9114669 100644
--- a/src/DEPS
+++ b/src/DEPS
@@ -8,6 +8,7 @@
   "-src/heap",
   "+src/heap/heap.h",
   "+src/heap/heap-inl.h",
+  "-src/inspector",
   "-src/interpreter",
   "+src/interpreter/bytecode-array-iterator.h",
   "+src/interpreter/bytecode-decoder.h",
diff --git a/src/accessors.cc b/src/accessors.cc
index da44151..9ec24b8 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -19,13 +19,9 @@
 namespace v8 {
 namespace internal {
 
-
 Handle<AccessorInfo> Accessors::MakeAccessor(
-    Isolate* isolate,
-    Handle<Name> name,
-    AccessorNameGetterCallback getter,
-    AccessorNameSetterCallback setter,
-    PropertyAttributes attributes) {
+    Isolate* isolate, Handle<Name> name, AccessorNameGetterCallback getter,
+    AccessorNameBooleanSetterCallback setter, PropertyAttributes attributes) {
   Factory* factory = isolate->factory();
   Handle<AccessorInfo> info = factory->NewAccessorInfo();
   info->set_property_attributes(attributes);
@@ -33,6 +29,7 @@
   info->set_all_can_write(false);
   info->set_is_special_data_property(true);
   info->set_is_sloppy(false);
+  info->set_replace_on_access(false);
   name = factory->InternalizeName(name);
   info->set_name(*name);
   Handle<Object> get = v8::FromCData(isolate, getter);
@@ -106,7 +103,7 @@
 
 void Accessors::ReconfigureToDataProperty(
     v8::Local<v8::Name> key, v8::Local<v8::Value> val,
-    const v8::PropertyCallbackInfo<void>& info) {
+    const v8::PropertyCallbackInfo<v8::Boolean>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
   Handle<Object> receiver = Utils::OpenHandle(*info.This());
@@ -116,7 +113,11 @@
   Handle<Object> value = Utils::OpenHandle(*val);
   MaybeHandle<Object> result =
       ReplaceAccessorWithDataProperty(isolate, receiver, holder, name, value);
-  if (result.is_null()) isolate->OptionalRescheduleException(false);
+  if (result.is_null()) {
+    isolate->OptionalRescheduleException(false);
+  } else {
+    info.GetReturnValue().Set(true);
+  }
 }
 
 //
@@ -151,6 +152,8 @@
     v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+  RuntimeCallTimerScope timer(
+      isolate, &RuntimeCallStats::AccessorNameGetterCallback_ArrayLength);
   DisallowHeapAllocation no_allocation;
   HandleScope scope(isolate);
   JSArray* holder = JSArray::cast(*Utils::OpenHandle(*info.Holder()));
@@ -158,11 +161,9 @@
   info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
 }
 
-
 void Accessors::ArrayLengthSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> val,
-    const v8::PropertyCallbackInfo<void>& info) {
+    v8::Local<v8::Name> name, v8::Local<v8::Value> val,
+    const v8::PropertyCallbackInfo<v8::Boolean>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
 
@@ -178,17 +179,21 @@
 
   JSArray::SetLength(array, length);
 
-  if (info.ShouldThrowOnError()) {
-    uint32_t actual_new_len = 0;
-    CHECK(array->length()->ToArrayLength(&actual_new_len));
-    // Throw TypeError if there were non-deletable elements.
-    if (actual_new_len != length) {
+  uint32_t actual_new_len = 0;
+  CHECK(array->length()->ToArrayLength(&actual_new_len));
+  // Fail if there were non-deletable elements.
+  if (actual_new_len != length) {
+    if (info.ShouldThrowOnError()) {
       Factory* factory = isolate->factory();
       isolate->Throw(*factory->NewTypeError(
           MessageTemplate::kStrictDeleteProperty,
           factory->NewNumberFromUint(actual_new_len - 1), array));
       isolate->OptionalRescheduleException(false);
+    } else {
+      info.GetReturnValue().Set(false);
     }
+  } else {
+    info.GetReturnValue().Set(true);
   }
 }
 
@@ -202,6 +207,50 @@
                       attributes);
 }
 
+//
+// Accessors::ModuleNamespaceEntry
+//
+
+void Accessors::ModuleNamespaceEntryGetter(
+    v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+  HandleScope scope(isolate);
+  JSModuleNamespace* holder =
+      JSModuleNamespace::cast(*Utils::OpenHandle(*info.Holder()));
+  Handle<Object> result;
+  if (!holder->GetExport(Handle<String>::cast(Utils::OpenHandle(*name)))
+           .ToHandle(&result)) {
+    isolate->OptionalRescheduleException(false);
+  } else {
+    info.GetReturnValue().Set(Utils::ToLocal(result));
+  }
+}
+
+void Accessors::ModuleNamespaceEntrySetter(
+    v8::Local<v8::Name> name, v8::Local<v8::Value> val,
+    const v8::PropertyCallbackInfo<v8::Boolean>& info) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+  HandleScope scope(isolate);
+  Factory* factory = isolate->factory();
+  Handle<JSModuleNamespace> holder =
+      Handle<JSModuleNamespace>::cast(Utils::OpenHandle(*info.Holder()));
+
+  if (info.ShouldThrowOnError()) {
+    isolate->Throw(*factory->NewTypeError(
+        MessageTemplate::kStrictReadOnlyProperty, Utils::OpenHandle(*name),
+        i::Object::TypeOf(isolate, holder), holder));
+    isolate->OptionalRescheduleException(false);
+  } else {
+    info.GetReturnValue().Set(false);
+  }
+}
+
+Handle<AccessorInfo> Accessors::ModuleNamespaceEntryInfo(
+    Isolate* isolate, Handle<String> name, PropertyAttributes attributes) {
+  return MakeAccessor(isolate, name, &ModuleNamespaceEntryGetter,
+                      &ModuleNamespaceEntrySetter, attributes);
+}
+
 
 //
 // Accessors::StringLength
@@ -211,6 +260,8 @@
     v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+  RuntimeCallTimerScope timer(
+      isolate, &RuntimeCallStats::AccessorNameGetterCallback_StringLength);
   DisallowHeapAllocation no_allocation;
   HandleScope scope(isolate);
 
@@ -418,40 +469,6 @@
 
 
 //
-// Accessors::ScriptGetLineEnds
-//
-
-
-void Accessors::ScriptLineEndsGetter(
-    v8::Local<v8::Name> name,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
-  HandleScope scope(isolate);
-  Handle<Object> object = Utils::OpenHandle(*info.Holder());
-  Handle<Script> script(
-      Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
-  Script::InitLineEnds(script);
-  DCHECK(script->line_ends()->IsFixedArray());
-  Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
-  // We do not want anyone to modify this array from JS.
-  DCHECK(*line_ends == isolate->heap()->empty_fixed_array() ||
-         line_ends->map() == isolate->heap()->fixed_cow_array_map());
-  Handle<JSArray> js_array =
-      isolate->factory()->NewJSArrayWithElements(line_ends);
-  info.GetReturnValue().Set(Utils::ToLocal(js_array));
-}
-
-
-Handle<AccessorInfo> Accessors::ScriptLineEndsInfo(
-      Isolate* isolate, PropertyAttributes attributes) {
-  Handle<String> name(isolate->factory()->InternalizeOneByteString(
-      STATIC_CHAR_VECTOR("line_ends")));
-  return MakeAccessor(isolate, name, &ScriptLineEndsGetter, nullptr,
-                      attributes);
-}
-
-
-//
 // Accessors::ScriptSourceUrl
 //
 
@@ -691,6 +708,8 @@
     v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+  RuntimeCallTimerScope timer(
+      isolate, &RuntimeCallStats::AccessorNameGetterCallback_FunctionPrototype);
   HandleScope scope(isolate);
   Handle<JSFunction> function =
       Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -698,11 +717,9 @@
   info.GetReturnValue().Set(Utils::ToLocal(result));
 }
 
-
 void Accessors::FunctionPrototypeSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> val,
-    const v8::PropertyCallbackInfo<void>& info) {
+    v8::Local<v8::Name> name, v8::Local<v8::Value> val,
+    const v8::PropertyCallbackInfo<v8::Boolean>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
   Handle<Object> value = Utils::OpenHandle(*val);
@@ -710,6 +727,8 @@
       Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
   if (SetFunctionPrototype(isolate, object, value).is_null()) {
     isolate->OptionalRescheduleException(false);
+  } else {
+    info.GetReturnValue().Set(true);
   }
 }
 
@@ -738,7 +757,7 @@
       Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
   Handle<Object> result;
   if (!JSFunction::GetLength(isolate, function).ToHandle(&result)) {
-    result = handle(Smi::FromInt(0), isolate);
+    result = handle(Smi::kZero, isolate);
     isolate->OptionalRescheduleException(false);
   }
 
@@ -1064,6 +1083,9 @@
 void Accessors::BoundFunctionLengthGetter(
     v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+  RuntimeCallTimerScope timer(
+      isolate,
+      &RuntimeCallStats::AccessorNameGetterCallback_BoundFunctionLength);
   HandleScope scope(isolate);
   Handle<JSBoundFunction> function =
       Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -1072,7 +1094,7 @@
   Handle<JSFunction> target(JSFunction::cast(function->bound_target_function()),
                             isolate);
   if (!JSFunction::GetLength(isolate, target).ToHandle(&target_length)) {
-    target_length = handle(Smi::FromInt(0), isolate);
+    target_length = handle(Smi::kZero, isolate);
     isolate->OptionalRescheduleException(false);
     return;
   }
@@ -1098,6 +1120,8 @@
 void Accessors::BoundFunctionNameGetter(
     v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+  RuntimeCallTimerScope timer(
+      isolate, &RuntimeCallStats::AccessorNameGetterCallback_BoundFunctionName);
   HandleScope scope(isolate);
   Handle<JSBoundFunction> function =
       Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -1207,9 +1231,9 @@
   info.GetReturnValue().Set(value);
 }
 
-void Accessors::ErrorStackSetter(v8::Local<v8::Name> name,
-                                 v8::Local<v8::Value> val,
-                                 const v8::PropertyCallbackInfo<void>& info) {
+void Accessors::ErrorStackSetter(
+    v8::Local<v8::Name> name, v8::Local<v8::Value> val,
+    const v8::PropertyCallbackInfo<v8::Boolean>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
   Handle<JSObject> obj =
diff --git a/src/accessors.h b/src/accessors.h
index 2171a35..f53d309 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -37,7 +37,6 @@
   V(ScriptEvalFromScriptPosition) \
   V(ScriptEvalFromFunctionName)   \
   V(ScriptId)                     \
-  V(ScriptLineEnds)               \
   V(ScriptLineOffset)             \
   V(ScriptName)                   \
   V(ScriptSource)                 \
@@ -48,10 +47,11 @@
   V(StringLength)
 
 #define ACCESSOR_SETTER_LIST(V) \
-  V(ReconfigureToDataProperty)  \
   V(ArrayLengthSetter)          \
   V(ErrorStackSetter)           \
-  V(FunctionPrototypeSetter)
+  V(FunctionPrototypeSetter)    \
+  V(ModuleNamespaceEntrySetter) \
+  V(ReconfigureToDataProperty)
 
 // Accessors contains all predefined proxy accessors.
 
@@ -70,10 +70,16 @@
 
 #define ACCESSOR_SETTER_DECLARATION(name)                                \
   static void name(v8::Local<v8::Name> name, v8::Local<v8::Value> value, \
-                   const v8::PropertyCallbackInfo<void>& info);
+                   const v8::PropertyCallbackInfo<v8::Boolean>& info);
   ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
 #undef ACCESSOR_SETTER_DECLARATION
 
+  static void ModuleNamespaceEntryGetter(
+      v8::Local<v8::Name> name,
+      const v8::PropertyCallbackInfo<v8::Value>& info);
+  static Handle<AccessorInfo> ModuleNamespaceEntryInfo(
+      Isolate* isolate, Handle<String> name, PropertyAttributes attributes);
+
   enum DescriptorId {
 #define ACCESSOR_INFO_DECLARATION(name) \
     k##name##Getter, \
@@ -93,12 +99,21 @@
   static bool IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
                                       int* object_offset);
 
+  // Create an AccessorInfo. The setter is optional (can be nullptr).
+  //
+  // Note that the type of setter is AccessorNameBooleanSetterCallback instead
+  // of v8::AccessorNameSetterCallback.  The difference is that the former can
+  // set a (boolean) return value. The setter should roughly follow the same
+  // conventions as many of the internal methods in objects.cc:
+  // - The return value is unset iff there was an exception.
+  // - If the ShouldThrow argument is true, the return value must not be false.
+  typedef void (*AccessorNameBooleanSetterCallback)(
+      Local<v8::Name> property, Local<v8::Value> value,
+      const PropertyCallbackInfo<v8::Boolean>& info);
+
   static Handle<AccessorInfo> MakeAccessor(
-      Isolate* isolate,
-      Handle<Name> name,
-      AccessorNameGetterCallback getter,
-      AccessorNameSetterCallback setter,
-      PropertyAttributes attributes);
+      Isolate* isolate, Handle<Name> name, AccessorNameGetterCallback getter,
+      AccessorNameBooleanSetterCallback setter, PropertyAttributes attributes);
 };
 
 }  // namespace internal
diff --git a/src/address-map.cc b/src/address-map.cc
index 3122b33..79f8e62 100644
--- a/src/address-map.cc
+++ b/src/address-map.cc
@@ -13,7 +13,7 @@
 RootIndexMap::RootIndexMap(Isolate* isolate) {
   map_ = isolate->root_index_map();
   if (map_ != NULL) return;
-  map_ = new base::HashMap();
+  map_ = new HeapObjectToIndexHashMap();
   for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
     Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
     Object* root = isolate->heap()->root(root_index);
@@ -22,12 +22,12 @@
     // not be referenced through the root list in the snapshot.
     if (isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
       HeapObject* heap_object = HeapObject::cast(root);
-      base::HashMap::Entry* entry = LookupEntry(map_, heap_object, false);
-      if (entry != NULL) {
+      Maybe<uint32_t> maybe_index = map_->Get(heap_object);
+      if (maybe_index.IsJust()) {
         // Some are initialized to a previous value in the root list.
-        DCHECK_LT(GetValue(entry), i);
+        DCHECK_LT(maybe_index.FromJust(), i);
       } else {
-        SetValue(LookupEntry(map_, heap_object, true), i);
+        map_->Set(heap_object, i);
       }
     } else {
       // Immortal immovable root objects are constant and allocated on the first
diff --git a/src/address-map.h b/src/address-map.h
index 95e9cb0..d50847f 100644
--- a/src/address-map.h
+++ b/src/address-map.h
@@ -5,6 +5,7 @@
 #ifndef V8_ADDRESS_MAP_H_
 #define V8_ADDRESS_MAP_H_
 
+#include "include/v8.h"
 #include "src/assert-scope.h"
 #include "src/base/hashmap.h"
 #include "src/objects.h"
@@ -12,49 +13,50 @@
 namespace v8 {
 namespace internal {
 
-class AddressMapBase {
- protected:
-  static void SetValue(base::HashMap::Entry* entry, uint32_t v) {
-    entry->value = reinterpret_cast<void*>(v);
+template <typename Type>
+class PointerToIndexHashMap
+    : public base::TemplateHashMapImpl<uintptr_t, uint32_t,
+                                       base::KeyEqualityMatcher<intptr_t>,
+                                       base::DefaultAllocationPolicy> {
+ public:
+  typedef base::TemplateHashMapEntry<uintptr_t, uint32_t> Entry;
+
+  inline void Set(Type value, uint32_t index) {
+    uintptr_t key = Key(value);
+    LookupOrInsert(key, Hash(key))->value = index;
   }
 
-  static uint32_t GetValue(base::HashMap::Entry* entry) {
-    return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
-  }
-
-  inline static base::HashMap::Entry* LookupEntry(base::HashMap* map,
-                                                  HeapObject* obj,
-                                                  bool insert) {
-    if (insert) {
-      map->LookupOrInsert(Key(obj), Hash(obj));
-    }
-    return map->Lookup(Key(obj), Hash(obj));
+  inline Maybe<uint32_t> Get(Type value) const {
+    uintptr_t key = Key(value);
+    Entry* entry = Lookup(key, Hash(key));
+    if (entry == nullptr) return Nothing<uint32_t>();
+    return Just(entry->value);
   }
 
  private:
-  static uint32_t Hash(HeapObject* obj) {
-    return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
+  static uintptr_t Key(Type value) {
+    return reinterpret_cast<uintptr_t>(value);
   }
 
-  static void* Key(HeapObject* obj) {
-    return reinterpret_cast<void*>(obj->address());
-  }
+  static uint32_t Hash(uintptr_t key) { return static_cast<uint32_t>(key); }
 };
 
-class RootIndexMap : public AddressMapBase {
+class AddressToIndexHashMap : public PointerToIndexHashMap<Address> {};
+class HeapObjectToIndexHashMap : public PointerToIndexHashMap<HeapObject*> {};
+
+class RootIndexMap {
  public:
   explicit RootIndexMap(Isolate* isolate);
 
   static const int kInvalidRootIndex = -1;
 
   int Lookup(HeapObject* obj) {
-    base::HashMap::Entry* entry = LookupEntry(map_, obj, false);
-    if (entry) return GetValue(entry);
-    return kInvalidRootIndex;
+    Maybe<uint32_t> maybe_index = map_->Get(obj);
+    return maybe_index.IsJust() ? maybe_index.FromJust() : kInvalidRootIndex;
   }
 
  private:
-  base::HashMap* map_;
+  HeapObjectToIndexHashMap* map_;
 
   DISALLOW_COPY_AND_ASSIGN(RootIndexMap);
 };
@@ -186,21 +188,21 @@
 
 // Mapping objects to their location after deserialization.
 // This is used during building, but not at runtime by V8.
-class SerializerReferenceMap : public AddressMapBase {
+class SerializerReferenceMap {
  public:
   SerializerReferenceMap()
       : no_allocation_(), map_(), attached_reference_index_(0) {}
 
   SerializerReference Lookup(HeapObject* obj) {
-    base::HashMap::Entry* entry = LookupEntry(&map_, obj, false);
-    return entry ? SerializerReference(GetValue(entry)) : SerializerReference();
+    Maybe<uint32_t> maybe_index = map_.Get(obj);
+    return maybe_index.IsJust() ? SerializerReference(maybe_index.FromJust())
+                                : SerializerReference();
   }
 
   void Add(HeapObject* obj, SerializerReference b) {
     DCHECK(b.is_valid());
-    DCHECK_NULL(LookupEntry(&map_, obj, false));
-    base::HashMap::Entry* entry = LookupEntry(&map_, obj, true);
-    SetValue(entry, b.bitfield_);
+    DCHECK(map_.Get(obj).IsNothing());
+    map_.Set(obj, b.bitfield_);
   }
 
   SerializerReference AddAttachedReference(HeapObject* attached_reference) {
@@ -212,7 +214,7 @@
 
  private:
   DisallowHeapAllocation no_allocation_;
-  base::HashMap map_;
+  HeapObjectToIndexHashMap map_;
   int attached_reference_index_;
   DISALLOW_COPY_AND_ASSIGN(SerializerReferenceMap);
 };
diff --git a/src/allocation.h b/src/allocation.h
index a92b71f..e87a3f1 100644
--- a/src/allocation.h
+++ b/src/allocation.h
@@ -5,6 +5,7 @@
 #ifndef V8_ALLOCATION_H_
 #define V8_ALLOCATION_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/globals.h"
 
 namespace v8 {
@@ -39,7 +40,7 @@
   void* operator new(size_t size);
   void  operator delete(void* p);
 };
-#define BASE_EMBEDDED : public Embedded
+#define BASE_EMBEDDED : public NON_EXPORTED_BASE(Embedded)
 #else
 #define BASE_EMBEDDED
 #endif
diff --git a/src/api-arguments.h b/src/api-arguments.h
index 9e01f3a..d6d1b95 100644
--- a/src/api-arguments.h
+++ b/src/api-arguments.h
@@ -88,7 +88,7 @@
         Smi::FromInt(should_throw == Object::THROW_ON_ERROR ? 1 : 0);
 
     // Here the hole is set as default value.
-    // It cannot escape into js as it's remove in Call below.
+    // It cannot escape into js as it's removed in Call below.
     values[T::kReturnValueDefaultValueIndex] =
         isolate->heap()->the_hole_value();
     values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
diff --git a/src/api-natives.cc b/src/api-natives.cc
index ea2cce5..3fe59e2 100644
--- a/src/api-natives.cc
+++ b/src/api-natives.cc
@@ -437,9 +437,7 @@
           JSObject::GetProperty(parent_instance,
                                 isolate->factory()->prototype_string()),
           JSFunction);
-      MAYBE_RETURN(JSObject::SetPrototype(prototype, parent_prototype, false,
-                                          Object::THROW_ON_ERROR),
-                   MaybeHandle<JSFunction>());
+      JSObject::ForceSetPrototype(prototype, parent_prototype);
     }
   }
   Handle<JSFunction> function = ApiNatives::CreateApiFunction(
@@ -533,24 +531,22 @@
 void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
                                  Handle<Name> name, Handle<Object> value,
                                  PropertyAttributes attributes) {
-  const int kSize = 3;
   PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
   auto details_handle = handle(details.AsSmi(), isolate);
-  Handle<Object> data[kSize] = {name, details_handle, value};
-  AddPropertyToPropertyList(isolate, info, kSize, data);
+  Handle<Object> data[] = {name, details_handle, value};
+  AddPropertyToPropertyList(isolate, info, arraysize(data), data);
 }
 
 
 void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
                                  Handle<Name> name, v8::Intrinsic intrinsic,
                                  PropertyAttributes attributes) {
-  const int kSize = 4;
   auto value = handle(Smi::FromInt(intrinsic), isolate);
   auto intrinsic_marker = isolate->factory()->true_value();
   PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
   auto details_handle = handle(details.AsSmi(), isolate);
-  Handle<Object> data[kSize] = {name, intrinsic_marker, details_handle, value};
-  AddPropertyToPropertyList(isolate, info, kSize, data);
+  Handle<Object> data[] = {name, intrinsic_marker, details_handle, value};
+  AddPropertyToPropertyList(isolate, info, arraysize(data), data);
 }
 
 
@@ -560,11 +556,10 @@
                                      Handle<FunctionTemplateInfo> getter,
                                      Handle<FunctionTemplateInfo> setter,
                                      PropertyAttributes attributes) {
-  const int kSize = 4;
   PropertyDetails details(attributes, ACCESSOR, 0, PropertyCellType::kNoCell);
   auto details_handle = handle(details.AsSmi(), isolate);
-  Handle<Object> data[kSize] = {name, details_handle, getter, setter};
-  AddPropertyToPropertyList(isolate, info, kSize, data);
+  Handle<Object> data[] = {name, details_handle, getter, setter};
+  AddPropertyToPropertyList(isolate, info, arraysize(data), data);
 }
 
 
@@ -618,10 +613,12 @@
   }
 
   int internal_field_count = 0;
+  bool immutable_proto = false;
   if (!obj->instance_template()->IsUndefined(isolate)) {
     Handle<ObjectTemplateInfo> instance_template = Handle<ObjectTemplateInfo>(
         ObjectTemplateInfo::cast(obj->instance_template()));
     internal_field_count = instance_template->internal_field_count();
+    immutable_proto = instance_template->immutable_proto();
   }
 
   // TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
@@ -681,6 +678,8 @@
     map->set_is_constructor(true);
   }
 
+  if (immutable_proto) map->set_immutable_proto(true);
+
   return result;
 }
 
diff --git a/src/api.cc b/src/api.cc
index 44933b9..da7f2ef 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -73,6 +73,8 @@
 #include "src/version.h"
 #include "src/vm-state-inl.h"
 #include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-result.h"
 
 namespace v8 {
 
@@ -197,7 +199,6 @@
       : isolate_(isolate), context_(context), escaped_(false) {
     // TODO(dcarney): remove this when blink stops crashing.
     DCHECK(!isolate_->external_caught_exception());
-    isolate_->IncrementJsCallsFromApiCounter();
     isolate_->handle_scope_implementer()->IncrementCallDepth();
     if (!context.IsEmpty()) {
       i::Handle<i::Context> env = Utils::OpenHandle(*context);
@@ -273,10 +274,23 @@
   i::Isolate* isolate = i::Isolate::Current();
   char last_few_messages[Heap::kTraceRingBufferSize + 1];
   char js_stacktrace[Heap::kStacktraceBufferSize + 1];
+  i::HeapStats heap_stats;
+
+  if (isolate == nullptr) {
+    // On a background thread -> we cannot retrieve memory information from the
+    // Isolate. Write easy-to-recognize values on the stack.
+    memset(last_few_messages, 0x0badc0de, Heap::kTraceRingBufferSize + 1);
+    memset(js_stacktrace, 0x0badc0de, Heap::kStacktraceBufferSize + 1);
+    memset(&heap_stats, 0xbadc0de, sizeof(heap_stats));
+    // Note that the embedder's oom handler won't be called in this case. We
+    // just crash.
+    FATAL("API fatal error handler returned after process out of memory");
+    return;
+  }
+
   memset(last_few_messages, 0, Heap::kTraceRingBufferSize + 1);
   memset(js_stacktrace, 0, Heap::kStacktraceBufferSize + 1);
 
-  i::HeapStats heap_stats;
   intptr_t start_marker;
   heap_stats.start_marker = &start_marker;
   size_t new_space_size;
@@ -509,7 +523,8 @@
 }
 
 StartupData SnapshotCreator::CreateBlob(
-    SnapshotCreator::FunctionCodeHandling function_code_handling) {
+    SnapshotCreator::FunctionCodeHandling function_code_handling,
+    SerializeInternalFieldsCallback callback) {
   SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(data->isolate_);
   DCHECK(!data->created_);
@@ -544,18 +559,30 @@
   }
   data->contexts_.Clear();
 
+#ifdef DEBUG
+  i::ExternalReferenceTable::instance(isolate)->ResetCount();
+#endif  // DEBUG
+
   i::StartupSerializer startup_serializer(isolate, function_code_handling);
   startup_serializer.SerializeStrongReferences();
 
   // Serialize each context with a new partial serializer.
   i::List<i::SnapshotData*> context_snapshots(num_contexts);
   for (int i = 0; i < num_contexts; i++) {
-    i::PartialSerializer partial_serializer(isolate, &startup_serializer);
+    i::PartialSerializer partial_serializer(isolate, &startup_serializer,
+                                            callback);
     partial_serializer.Serialize(&contexts[i]);
     context_snapshots.Add(new i::SnapshotData(&partial_serializer));
   }
 
   startup_serializer.SerializeWeakReferencesAndDeferred();
+
+#ifdef DEBUG
+  if (i::FLAG_external_reference_stats) {
+    i::ExternalReferenceTable::instance(isolate)->PrintCount();
+  }
+#endif  // DEBUG
+
   i::SnapshotData startup_snapshot(&startup_serializer);
   StartupData result =
       i::Snapshot::CreateSnapshotBlob(&startup_snapshot, &context_snapshots);
@@ -642,6 +669,7 @@
 
 void V8::SetFlagsFromString(const char* str, int length) {
   i::FlagList::SetFlagsFromString(str, length);
+  i::FlagList::EnforceFlagImplications();
 }
 
 
@@ -696,13 +724,13 @@
   CHECK(source != NULL || source_length_ == 0);
 }
 
-
 ResourceConstraints::ResourceConstraints()
     : max_semi_space_size_(0),
       max_old_space_size_(0),
       max_executable_size_(0),
       stack_limit_(NULL),
-      code_range_size_(0) { }
+      code_range_size_(0),
+      max_zone_pool_size_(0) {}
 
 void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
                                             uint64_t virtual_memory_limit) {
@@ -722,18 +750,25 @@
     set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeLowMemoryDevice);
     set_max_old_space_size(i::Heap::kMaxOldSpaceSizeLowMemoryDevice);
     set_max_executable_size(i::Heap::kMaxExecutableSizeLowMemoryDevice);
+    set_max_zone_pool_size(i::AccountingAllocator::kMaxPoolSizeLowMemoryDevice);
   } else if (physical_memory <= medium_limit) {
     set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeMediumMemoryDevice);
     set_max_old_space_size(i::Heap::kMaxOldSpaceSizeMediumMemoryDevice);
     set_max_executable_size(i::Heap::kMaxExecutableSizeMediumMemoryDevice);
+    set_max_zone_pool_size(
+        i::AccountingAllocator::kMaxPoolSizeMediumMemoryDevice);
   } else if (physical_memory <= high_limit) {
     set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeHighMemoryDevice);
     set_max_old_space_size(i::Heap::kMaxOldSpaceSizeHighMemoryDevice);
     set_max_executable_size(i::Heap::kMaxExecutableSizeHighMemoryDevice);
+    set_max_zone_pool_size(
+        i::AccountingAllocator::kMaxPoolSizeHighMemoryDevice);
   } else {
     set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeHugeMemoryDevice);
     set_max_old_space_size(i::Heap::kMaxOldSpaceSizeHugeMemoryDevice);
     set_max_executable_size(i::Heap::kMaxExecutableSizeHugeMemoryDevice);
+    set_max_zone_pool_size(
+        i::AccountingAllocator::kMaxPoolSizeHugeMemoryDevice);
   }
 
   if (virtual_memory_limit > 0 && i::kRequiresCodeRange) {
@@ -752,11 +787,14 @@
   int old_space_size = constraints.max_old_space_size();
   int max_executable_size = constraints.max_executable_size();
   size_t code_range_size = constraints.code_range_size();
+  size_t max_pool_size = constraints.max_zone_pool_size();
   if (semi_space_size != 0 || old_space_size != 0 ||
       max_executable_size != 0 || code_range_size != 0) {
     isolate->heap()->ConfigureHeap(semi_space_size, old_space_size,
                                    max_executable_size, code_range_size);
   }
+  isolate->allocator()->ConfigureSegmentPool(max_pool_size);
+
   if (constraints.stack_limit() != NULL) {
     uintptr_t limit = reinterpret_cast<uintptr_t>(constraints.stack_limit());
     isolate->stack_guard()->SetStackLimit(limit);
@@ -786,6 +824,11 @@
   return result.location();
 }
 
+void V8::RegisterExternallyReferencedObject(i::Object** object,
+                                            i::Isolate* isolate) {
+  isolate->heap()->RegisterExternallyReferencedObject(object);
+}
+
 void V8::MakeWeak(i::Object** location, void* parameter,
                   int internal_field_index1, int internal_field_index2,
                   WeakCallbackInfo<void>::Callback weak_callback) {
@@ -1061,7 +1104,7 @@
   auto value_obj = Utils::OpenHandle(*value);
   CHECK(!value_obj->IsJSReceiver() || value_obj->IsTemplateInfo());
   if (value_obj->IsObjectTemplateInfo()) {
-    templ->set_serial_number(i::Smi::FromInt(0));
+    templ->set_serial_number(i::Smi::kZero);
     if (templ->IsFunctionTemplateInfo()) {
       i::Handle<i::FunctionTemplateInfo>::cast(templ)->set_do_not_cache(true);
     }
@@ -1071,6 +1114,11 @@
                                  static_cast<i::PropertyAttributes>(attribute));
 }
 
+void Template::SetPrivate(v8::Local<Private> name, v8::Local<Data> value,
+                          v8::PropertyAttribute attribute) {
+  Set(Utils::ToLocal(Utils::OpenHandle(reinterpret_cast<Name*>(*name))), value,
+      attribute);
+}
 
 void Template::SetAccessorProperty(
     v8::Local<v8::Name> name,
@@ -1134,11 +1182,11 @@
   info->set_parent_template(*Utils::OpenHandle(*value));
 }
 
-
 static Local<FunctionTemplate> FunctionTemplateNew(
     i::Isolate* isolate, FunctionCallback callback,
     experimental::FastAccessorBuilder* fast_handler, v8::Local<Value> data,
-    v8::Local<Signature> signature, int length, bool do_not_cache) {
+    v8::Local<Signature> signature, int length, bool do_not_cache,
+    v8::Local<Private> cached_property_name = v8::Local<Private>()) {
   i::Handle<i::Struct> struct_obj =
       isolate->factory()->NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE);
   i::Handle<i::FunctionTemplateInfo> obj =
@@ -1162,6 +1210,10 @@
   obj->set_accept_any_receiver(true);
   if (!signature.IsEmpty())
     obj->set_signature(*Utils::OpenHandle(*signature));
+  obj->set_cached_property_name(
+      cached_property_name.IsEmpty()
+          ? isolate->heap()->the_hole_value()
+          : *Utils::OpenHandle(*cached_property_name));
   return Utils::ToLocal(obj);
 }
 
@@ -1199,13 +1251,21 @@
     experimental::FastAccessorBuilder* fast_handler, v8::Local<Value> data,
     v8::Local<Signature> signature, int length) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  DCHECK(!i_isolate->serializer_enabled());
   LOG_API(i_isolate, FunctionTemplate, NewWithFastHandler);
   ENTER_V8(i_isolate);
   return FunctionTemplateNew(i_isolate, callback, fast_handler, data, signature,
                              length, false);
 }
 
+Local<FunctionTemplate> FunctionTemplate::NewWithCache(
+    Isolate* isolate, FunctionCallback callback, Local<Private> cache_property,
+    Local<Value> data, Local<Signature> signature, int length) {
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  LOG_API(i_isolate, FunctionTemplate, NewWithFastHandler);
+  ENTER_V8(i_isolate);
+  return FunctionTemplateNew(i_isolate, callback, nullptr, data, signature,
+                             length, false, cache_property);
+}
 
 Local<Signature> Signature::New(Isolate* isolate,
                                 Local<FunctionTemplate> receiver) {
@@ -1271,10 +1331,13 @@
 i::Handle<i::AccessorInfo> MakeAccessorInfo(
     v8::Local<Name> name, Getter getter, Setter setter, v8::Local<Value> data,
     v8::AccessControl settings, v8::PropertyAttribute attributes,
-    v8::Local<AccessorSignature> signature, bool is_special_data_property) {
+    v8::Local<AccessorSignature> signature, bool is_special_data_property,
+    bool replace_on_access) {
   i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate();
   i::Handle<i::AccessorInfo> obj = isolate->factory()->NewAccessorInfo();
   SET_FIELD_WRAPPED(obj, set_getter, getter);
+  DCHECK_IMPLIES(replace_on_access,
+                 is_special_data_property && setter == nullptr);
   if (is_special_data_property && setter == nullptr) {
     setter = reinterpret_cast<Setter>(&i::Accessors::ReconfigureToDataProperty);
   }
@@ -1286,6 +1349,7 @@
   }
   obj->set_data(*Utils::OpenHandle(*data));
   obj->set_is_special_data_property(is_special_data_property);
+  obj->set_replace_on_access(replace_on_access);
   return SetAccessorInfoProperties(obj, name, settings, attributes, signature);
 }
 
@@ -1395,7 +1459,7 @@
   obj->set_serial_number(i::Smi::FromInt(next_serial_number));
   if (!constructor.IsEmpty())
     obj->set_constructor(*Utils::OpenHandle(*constructor));
-  obj->set_data(i::Smi::FromInt(0));
+  obj->set_data(i::Smi::kZero);
   return Utils::ToLocal(obj);
 }
 
@@ -1437,20 +1501,21 @@
   return constructor;
 }
 
-
 template <typename Getter, typename Setter, typename Data, typename Template>
 static bool TemplateSetAccessor(Template* template_obj, v8::Local<Name> name,
                                 Getter getter, Setter setter, Data data,
                                 AccessControl settings,
                                 PropertyAttribute attribute,
                                 v8::Local<AccessorSignature> signature,
-                                bool is_special_data_property) {
+                                bool is_special_data_property,
+                                bool replace_on_access) {
   auto info = Utils::OpenHandle(template_obj);
   auto isolate = info->GetIsolate();
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
-  auto obj = MakeAccessorInfo(name, getter, setter, data, settings, attribute,
-                              signature, is_special_data_property);
+  auto obj =
+      MakeAccessorInfo(name, getter, setter, data, settings, attribute,
+                       signature, is_special_data_property, replace_on_access);
   if (obj.is_null()) return false;
   i::ApiNatives::AddNativeDataProperty(isolate, info, obj);
   return true;
@@ -1465,7 +1530,7 @@
                                      v8::Local<AccessorSignature> signature,
                                      AccessControl settings) {
   TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
-                      signature, true);
+                      signature, true, false);
 }
 
 
@@ -1477,9 +1542,17 @@
                                      v8::Local<AccessorSignature> signature,
                                      AccessControl settings) {
   TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
-                      signature, true);
+                      signature, true, false);
 }
 
+void Template::SetLazyDataProperty(v8::Local<Name> name,
+                                   AccessorNameGetterCallback getter,
+                                   v8::Local<Value> data,
+                                   PropertyAttribute attribute) {
+  TemplateSetAccessor(
+      this, name, getter, static_cast<AccessorNameSetterCallback>(nullptr),
+      data, DEFAULT, attribute, Local<AccessorSignature>(), true, true);
+}
 
 void Template::SetIntrinsicDataProperty(Local<Name> name, Intrinsic intrinsic,
                                         PropertyAttribute attribute) {
@@ -1500,7 +1573,7 @@
                                  PropertyAttribute attribute,
                                  v8::Local<AccessorSignature> signature) {
   TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
-                      signature, i::FLAG_disable_old_api_accessors);
+                      signature, i::FLAG_disable_old_api_accessors, false);
 }
 
 
@@ -1511,7 +1584,7 @@
                                  PropertyAttribute attribute,
                                  v8::Local<AccessorSignature> signature) {
   TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
-                      signature, i::FLAG_disable_old_api_accessors);
+                      signature, i::FLAG_disable_old_api_accessors, false);
 }
 
 template <typename Getter, typename Setter, typename Query, typename Descriptor,
@@ -1900,22 +1973,13 @@
   return ToApiHandle<String>(i::handle(module_requests->get(i), isolate));
 }
 
-void Module::SetEmbedderData(Local<Value> data) {
-  Utils::OpenHandle(this)->set_embedder_data(*Utils::OpenHandle(*data));
-}
-
-Local<Value> Module::GetEmbedderData() const {
-  auto self = Utils::OpenHandle(this);
-  return ToApiHandle<Value>(
-      i::handle(self->embedder_data(), self->GetIsolate()));
-}
+int Module::GetIdentityHash() const { return Utils::OpenHandle(this)->hash(); }
 
 bool Module::Instantiate(Local<Context> context,
-                         Module::ResolveCallback callback,
-                         Local<Value> callback_data) {
+                         Module::ResolveCallback callback) {
   PREPARE_FOR_EXECUTION_BOOL(context, Module, Instantiate);
-  has_pending_exception = !i::Module::Instantiate(
-      Utils::OpenHandle(this), context, callback, callback_data);
+  has_pending_exception =
+      !i::Module::Instantiate(Utils::OpenHandle(this), context, callback);
   RETURN_ON_FAILED_EXECUTION_BOOL();
   return true;
 }
@@ -1930,7 +1994,7 @@
 
   i::Handle<i::Module> self = Utils::OpenHandle(this);
   // It's an API error to call Evaluate before Instantiate.
-  CHECK(self->code()->IsJSFunction());
+  CHECK(self->instantiated());
 
   Local<Value> result;
   has_pending_exception = !ToLocal(i::Module::Evaluate(self), &result);
@@ -2252,18 +2316,10 @@
 
   source->info->set_script(script);
 
-  {
-    // Create a canonical handle scope if compiling ignition bytecode. This is
-    // required by the constant array builder to de-duplicate objects without
-    // dereferencing handles.
-    std::unique_ptr<i::CanonicalHandleScope> canonical;
-    if (i::FLAG_ignition) canonical.reset(new i::CanonicalHandleScope(isolate));
-
-    // Do the parsing tasks which need to be done on the main thread. This will
-    // also handle parse errors.
-    source->parser->Internalize(isolate, script,
-                                source->info->literal() == nullptr);
-  }
+  // Do the parsing tasks which need to be done on the main thread. This will
+  // also handle parse errors.
+  source->parser->Internalize(isolate, script,
+                              source->info->literal() == nullptr);
   source->parser->HandleSourceURLComments(isolate, script);
 
   i::Handle<i::SharedFunctionInfo> result;
@@ -2926,6 +2982,17 @@
   return Nothing<bool>();
 }
 
+void* ValueSerializer::Delegate::ReallocateBufferMemory(void* old_buffer,
+                                                        size_t size,
+                                                        size_t* actual_size) {
+  *actual_size = size;
+  return realloc(old_buffer, size);
+}
+
+void ValueSerializer::Delegate::FreeBufferMemory(void* buffer) {
+  return free(buffer);
+}
+
 struct ValueSerializer::PrivateData {
   explicit PrivateData(i::Isolate* i, ValueSerializer::Delegate* delegate)
       : isolate(i), serializer(i, delegate) {}
@@ -2958,6 +3025,10 @@
   return private_->serializer.ReleaseBuffer();
 }
 
+std::pair<uint8_t*, size_t> ValueSerializer::Release() {
+  return private_->serializer.Release();
+}
+
 void ValueSerializer::TransferArrayBuffer(uint32_t transfer_id,
                                           Local<ArrayBuffer> array_buffer) {
   private_->serializer.TransferArrayBuffer(transfer_id,
@@ -3053,11 +3124,6 @@
   return Just(true);
 }
 
-Maybe<bool> ValueDeserializer::ReadHeader() {
-  Isolate* isolate = reinterpret_cast<Isolate*>(private_->isolate);
-  return ReadHeader(isolate->GetEnteredContext());
-}
-
 void ValueDeserializer::SetSupportsLegacyWireFormat(
     bool supports_legacy_wire_format) {
   private_->supports_legacy_wire_format = supports_legacy_wire_format;
@@ -4479,8 +4545,9 @@
   i::Handle<i::JSObject> obj =
       i::Handle<i::JSObject>::cast(Utils::OpenHandle(self));
   v8::Local<AccessorSignature> signature;
-  auto info = MakeAccessorInfo(name, getter, setter, data, settings, attributes,
-                               signature, i::FLAG_disable_old_api_accessors);
+  auto info =
+      MakeAccessorInfo(name, getter, setter, data, settings, attributes,
+                       signature, i::FLAG_disable_old_api_accessors, false);
   if (info.is_null()) return Nothing<bool>();
   bool fast = obj->HasFastProperties();
   i::Handle<i::Object> result;
@@ -6052,6 +6119,9 @@
       proxy_constructor->set_prototype_template(
           *Utils::OpenHandle(*global_template));
 
+      proxy_template->SetInternalFieldCount(
+          global_template->InternalFieldCount());
+
       // Migrate security handlers from global_template to
       // proxy_template.  Temporarily removing access check
       // information from the global template.
@@ -6316,7 +6386,16 @@
 bool FunctionTemplate::HasInstance(v8::Local<v8::Value> value) {
   auto self = Utils::OpenHandle(this);
   auto obj = Utils::OpenHandle(*value);
-  return obj->IsJSObject() && self->IsTemplateFor(i::JSObject::cast(*obj));
+  if (obj->IsJSObject() && self->IsTemplateFor(i::JSObject::cast(*obj))) {
+    return true;
+  }
+  if (obj->IsJSGlobalProxy()) {
+    // If it's a global proxy object, then test with the global object.
+    i::PrototypeIterator iter(i::JSObject::cast(*obj)->map());
+    if (iter.IsAtEnd()) return false;
+    return self->IsTemplateFor(iter.GetCurrent<i::JSGlobalObject>());
+  }
+  return false;
 }
 
 
@@ -7184,34 +7263,77 @@
   RETURN_ESCAPED(result);
 }
 
+Local<String> WasmCompiledModule::GetWasmWireBytes() {
+  i::Handle<i::JSObject> obj =
+      i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
+  i::Handle<i::WasmCompiledModule> compiled_part =
+      i::handle(i::WasmCompiledModule::cast(obj->GetInternalField(0)));
+  i::Handle<i::String> wire_bytes = compiled_part->module_bytes();
+  return Local<String>::Cast(Utils::ToLocal(wire_bytes));
+}
+
 WasmCompiledModule::SerializedModule WasmCompiledModule::Serialize() {
   i::Handle<i::JSObject> obj =
       i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
-  i::Handle<i::FixedArray> compiled_part =
-      i::handle(i::FixedArray::cast(obj->GetInternalField(0)));
+  i::Handle<i::WasmCompiledModule> compiled_part =
+      i::handle(i::WasmCompiledModule::cast(obj->GetInternalField(0)));
+
   std::unique_ptr<i::ScriptData> script_data =
       i::WasmCompiledModuleSerializer::SerializeWasmModule(obj->GetIsolate(),
                                                            compiled_part);
   script_data->ReleaseDataOwnership();
+
   size_t size = static_cast<size_t>(script_data->length());
   return {std::unique_ptr<const uint8_t[]>(script_data->data()), size};
 }
 
 MaybeLocal<WasmCompiledModule> WasmCompiledModule::Deserialize(
     Isolate* isolate,
-    const WasmCompiledModule::SerializedModule& serialized_data) {
-  int size = static_cast<int>(serialized_data.second);
-  i::ScriptData sc(serialized_data.first.get(), size);
+    const WasmCompiledModule::CallerOwnedBuffer& serialized_module,
+    const WasmCompiledModule::CallerOwnedBuffer& wire_bytes) {
+  int size = static_cast<int>(serialized_module.second);
+  i::ScriptData sc(serialized_module.first, size);
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   i::MaybeHandle<i::FixedArray> maybe_compiled_part =
-      i::WasmCompiledModuleSerializer::DeserializeWasmModule(i_isolate, &sc);
+      i::WasmCompiledModuleSerializer::DeserializeWasmModule(
+          i_isolate, &sc,
+          {wire_bytes.first, static_cast<int>(wire_bytes.second)});
   i::Handle<i::FixedArray> compiled_part;
   if (!maybe_compiled_part.ToHandle(&compiled_part)) {
     return MaybeLocal<WasmCompiledModule>();
   }
+  i::Handle<i::WasmCompiledModule> compiled_module =
+      handle(i::WasmCompiledModule::cast(*compiled_part));
   return Local<WasmCompiledModule>::Cast(
-      Utils::ToLocal(i::wasm::CreateCompiledModuleObject(
-          i_isolate, compiled_part, i::wasm::ModuleOrigin::kWasmOrigin)));
+      Utils::ToLocal(i::Handle<i::JSObject>::cast(
+          i::WasmModuleObject::New(i_isolate, compiled_module))));
+}
+
+MaybeLocal<WasmCompiledModule> WasmCompiledModule::DeserializeOrCompile(
+    Isolate* isolate,
+    const WasmCompiledModule::CallerOwnedBuffer& serialized_module,
+    const WasmCompiledModule::CallerOwnedBuffer& wire_bytes) {
+  MaybeLocal<WasmCompiledModule> ret =
+      Deserialize(isolate, serialized_module, wire_bytes);
+  if (!ret.IsEmpty()) {
+    return ret;
+  }
+  return Compile(isolate, wire_bytes.first, wire_bytes.second);
+}
+
+MaybeLocal<WasmCompiledModule> WasmCompiledModule::Compile(Isolate* isolate,
+                                                           const uint8_t* start,
+                                                           size_t length) {
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  i::wasm::ErrorThrower thrower(i_isolate, "WasmCompiledModule::Deserialize()");
+  i::MaybeHandle<i::JSObject> maybe_compiled =
+      i::wasm::CreateModuleObjectFromBytes(
+          i_isolate, start, start + length, &thrower,
+          i::wasm::ModuleOrigin::kWasmOrigin, i::Handle<i::Script>::null(),
+          nullptr, nullptr);
+  if (maybe_compiled.is_null()) return MaybeLocal<WasmCompiledModule>();
+  return Local<WasmCompiledModule>::Cast(
+      Utils::ToLocal(maybe_compiled.ToHandleChecked()));
 }
 
 // static
@@ -7277,7 +7399,11 @@
   ENTER_V8(i_isolate);
   i::Handle<i::JSArrayBuffer> obj =
       i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
-  i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length);
+  // TODO(jbroman): It may be useful in the future to provide a MaybeLocal
+  // version that throws an exception or otherwise does not crash.
+  if (!i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length)) {
+    i::FatalProcessOutOfMemory("v8::ArrayBuffer::New");
+  }
   return Utils::ToLocal(obj);
 }
 
@@ -7467,8 +7593,12 @@
   ENTER_V8(i_isolate);
   i::Handle<i::JSArrayBuffer> obj =
       i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
-  i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length, true,
-                                        i::SharedFlag::kShared);
+  // TODO(jbroman): It may be useful in the future to provide a MaybeLocal
+  // version that throws an exception or otherwise does not crash.
+  if (!i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length, true,
+                                             i::SharedFlag::kShared)) {
+    i::FatalProcessOutOfMemory("v8::SharedArrayBuffer::New");
+  }
   return Utils::ToLocalShared(obj);
 }
 
@@ -7839,6 +7969,8 @@
   }
 
   isolate->set_api_external_references(params.external_references);
+  isolate->set_deserialize_internal_fields_callback(
+      params.deserialize_internal_fields_callback);
   SetResourceConstraints(isolate, params.constraints);
   // TODO(jochen): Once we got rid of Isolate::Current(), we can remove this.
   Isolate::Scope isolate_scope(v8_isolate);
@@ -7992,7 +8124,7 @@
 bool Isolate::GetHeapObjectStatisticsAtLastGC(
     HeapObjectStatistics* object_statistics, size_t type_index) {
   if (!object_statistics) return false;
-  if (!i::FLAG_track_gc_object_stats) return false;
+  if (V8_LIKELY(!i::FLAG_gc_stats)) return false;
 
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
   i::Heap* heap = isolate->heap();
@@ -8245,6 +8377,7 @@
 void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
   isolate->heap()->MemoryPressureNotification(level, Locker::IsLocked(this));
+  isolate->allocator()->MemoryPressureNotification(level);
 }
 
 void Isolate::SetRAILMode(RAILMode rail_mode) {
@@ -8693,6 +8826,299 @@
   return Utils::ToLocal(result);
 }
 
+bool DebugInterface::SetDebugEventListener(Isolate* isolate,
+                                           DebugInterface::EventCallback that,
+                                           Local<Value> data) {
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  ENTER_V8(i_isolate);
+  i::HandleScope scope(i_isolate);
+  i::Handle<i::Object> foreign = i_isolate->factory()->undefined_value();
+  if (that != NULL) {
+    foreign = i_isolate->factory()->NewForeign(FUNCTION_ADDR(that));
+  }
+  i_isolate->debug()->SetEventListener(foreign, Utils::OpenHandle(*data, true));
+  return true;
+}
+
+Local<Context> DebugInterface::GetDebugContext(Isolate* isolate) {
+  return Debug::GetDebugContext(isolate);
+}
+
+MaybeLocal<Value> DebugInterface::Call(Local<Context> context,
+                                       v8::Local<v8::Function> fun,
+                                       v8::Local<v8::Value> data) {
+  return Debug::Call(context, fun, data);
+}
+
+void DebugInterface::SetLiveEditEnabled(Isolate* isolate, bool enable) {
+  Debug::SetLiveEditEnabled(isolate, enable);
+}
+
+void DebugInterface::DebugBreak(Isolate* isolate) {
+  Debug::DebugBreak(isolate);
+}
+
+void DebugInterface::CancelDebugBreak(Isolate* isolate) {
+  Debug::CancelDebugBreak(isolate);
+}
+
+MaybeLocal<Array> DebugInterface::GetInternalProperties(Isolate* isolate,
+                                                        Local<Value> value) {
+  return Debug::GetInternalProperties(isolate, value);
+}
+
+void DebugInterface::ChangeBreakOnException(Isolate* isolate,
+                                            ExceptionBreakState type) {
+  i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  internal_isolate->debug()->ChangeBreakOnException(
+      i::BreakException, type == BreakOnAnyException);
+  internal_isolate->debug()->ChangeBreakOnException(i::BreakUncaughtException,
+                                                    type != NoBreakOnException);
+}
+
+void DebugInterface::PrepareStep(Isolate* v8_isolate, StepAction action) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+  ENTER_V8(isolate);
+  CHECK(isolate->debug()->CheckExecutionState());
+  // Clear all current stepping setup.
+  isolate->debug()->ClearStepping();
+  // Prepare step.
+  isolate->debug()->PrepareStep(static_cast<i::StepAction>(action));
+}
+
+void DebugInterface::ClearStepping(Isolate* v8_isolate) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+  ENTER_V8(isolate);
+  // Clear all current stepping setup.
+  isolate->debug()->ClearStepping();
+}
+
+v8::Isolate* DebugInterface::Script::GetIsolate() const {
+  return reinterpret_cast<v8::Isolate*>(Utils::OpenHandle(this)->GetIsolate());
+}
+
+ScriptOriginOptions DebugInterface::Script::OriginOptions() const {
+  return Utils::OpenHandle(this)->origin_options();
+}
+
+bool DebugInterface::Script::WasCompiled() const {
+  return Utils::OpenHandle(this)->compilation_state() ==
+         i::Script::COMPILATION_STATE_COMPILED;
+}
+
+int DebugInterface::Script::Id() const { return Utils::OpenHandle(this)->id(); }
+
+int DebugInterface::Script::LineOffset() const {
+  return Utils::OpenHandle(this)->line_offset();
+}
+
+int DebugInterface::Script::ColumnOffset() const {
+  return Utils::OpenHandle(this)->column_offset();
+}
+
+std::vector<int> DebugInterface::Script::LineEnds() const {
+  i::Handle<i::Script> script = Utils::OpenHandle(this);
+  i::Isolate* isolate = script->GetIsolate();
+  i::HandleScope scope(isolate);
+  i::Script::InitLineEnds(script);
+  CHECK(script->line_ends()->IsFixedArray());
+  i::Handle<i::FixedArray> line_ends(i::FixedArray::cast(script->line_ends()));
+  std::vector<int> result(line_ends->length());
+  for (int i = 0; i < line_ends->length(); ++i) {
+    i::Smi* line_end = i::Smi::cast(line_ends->get(i));
+    result[i] = line_end->value();
+  }
+  return result;
+}
+
+MaybeLocal<String> DebugInterface::Script::Name() const {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  i::HandleScope handle_scope(isolate);
+  i::Handle<i::Script> script = Utils::OpenHandle(this);
+  i::Handle<i::Object> value(script->name(), isolate);
+  if (!value->IsString()) return MaybeLocal<String>();
+  return Utils::ToLocal(
+      handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+}
+
+MaybeLocal<String> DebugInterface::Script::SourceURL() const {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  i::HandleScope handle_scope(isolate);
+  i::Handle<i::Script> script = Utils::OpenHandle(this);
+  i::Handle<i::Object> value(script->source_url(), isolate);
+  if (!value->IsString()) return MaybeLocal<String>();
+  return Utils::ToLocal(
+      handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+}
+
+MaybeLocal<String> DebugInterface::Script::SourceMappingURL() const {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  i::HandleScope handle_scope(isolate);
+  i::Handle<i::Script> script = Utils::OpenHandle(this);
+  i::Handle<i::Object> value(script->source_mapping_url(), isolate);
+  if (!value->IsString()) return MaybeLocal<String>();
+  return Utils::ToLocal(
+      handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+}
+
+MaybeLocal<String> DebugInterface::Script::ContextData() const {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  i::HandleScope handle_scope(isolate);
+  i::Handle<i::Script> script = Utils::OpenHandle(this);
+  i::Handle<i::Object> value(script->context_data(), isolate);
+  if (!value->IsString()) return MaybeLocal<String>();
+  return Utils::ToLocal(
+      handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+}
+
+MaybeLocal<String> DebugInterface::Script::Source() const {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  i::HandleScope handle_scope(isolate);
+  i::Handle<i::Script> script = Utils::OpenHandle(this);
+  i::Handle<i::Object> value(script->source(), isolate);
+  if (!value->IsString()) return MaybeLocal<String>();
+  return Utils::ToLocal(
+      handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+}
+
+namespace {
+int GetSmiValue(i::Handle<i::FixedArray> array, int index) {
+  return i::Smi::cast(array->get(index))->value();
+}
+}  // namespace
+
+bool DebugInterface::Script::GetPossibleBreakpoints(
+    const Location& start, const Location& end,
+    std::vector<Location>* locations) const {
+  CHECK(!start.IsEmpty());
+  i::Handle<i::Script> script = Utils::OpenHandle(this);
+
+  i::Script::InitLineEnds(script);
+  CHECK(script->line_ends()->IsFixedArray());
+  i::Isolate* isolate = script->GetIsolate();
+  i::Handle<i::FixedArray> line_ends =
+      i::Handle<i::FixedArray>::cast(i::handle(script->line_ends(), isolate));
+  CHECK(line_ends->length());
+
+  int start_offset = GetSourcePosition(start);
+  int end_offset;
+  if (end.IsEmpty()) {
+    end_offset = GetSmiValue(line_ends, line_ends->length() - 1) + 1;
+  } else {
+    end_offset = GetSourcePosition(end);
+  }
+  if (start_offset >= end_offset) return true;
+
+  std::set<int> offsets;
+  if (!isolate->debug()->GetPossibleBreakpoints(script, start_offset,
+                                                end_offset, &offsets)) {
+    return false;
+  }
+
+  int current_line_end_index = 0;
+  for (const auto& it : offsets) {
+    int offset = it;
+    while (offset > GetSmiValue(line_ends, current_line_end_index)) {
+      ++current_line_end_index;
+      CHECK(current_line_end_index < line_ends->length());
+    }
+    int line_offset = 0;
+
+    if (current_line_end_index > 0) {
+      line_offset = GetSmiValue(line_ends, current_line_end_index - 1) + 1;
+    }
+    locations->push_back(Location(
+        current_line_end_index + script->line_offset(),
+        offset - line_offset +
+            (current_line_end_index == 0 ? script->column_offset() : 0)));
+  }
+  return true;
+}
+
+int DebugInterface::Script::GetSourcePosition(const Location& location) const {
+  i::Handle<i::Script> script = Utils::OpenHandle(this);
+
+  int line = std::max(location.GetLineNumber() - script->line_offset(), 0);
+  int column = location.GetColumnNumber();
+  if (line == 0) {
+    column = std::max(0, column - script->column_offset());
+  }
+
+  i::Script::InitLineEnds(script);
+  CHECK(script->line_ends()->IsFixedArray());
+  i::Handle<i::FixedArray> line_ends = i::Handle<i::FixedArray>::cast(
+      i::handle(script->line_ends(), script->GetIsolate()));
+  CHECK(line_ends->length());
+  if (line >= line_ends->length())
+    return GetSmiValue(line_ends, line_ends->length() - 1);
+  int line_offset = GetSmiValue(line_ends, line);
+  if (line == 0) return std::min(column, line_offset);
+  int prev_line_offset = GetSmiValue(line_ends, line - 1);
+  return std::min(prev_line_offset + column + 1, line_offset);
+}
+
+MaybeLocal<DebugInterface::Script> DebugInterface::Script::Wrap(
+    v8::Isolate* v8_isolate, v8::Local<v8::Object> script) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+  ENTER_V8(isolate);
+  i::HandleScope handle_scope(isolate);
+  i::Handle<i::JSReceiver> script_receiver(Utils::OpenHandle(*script));
+  if (!script_receiver->IsJSValue()) return MaybeLocal<Script>();
+  i::Handle<i::Object> script_value(
+      i::Handle<i::JSValue>::cast(script_receiver)->value(), isolate);
+  if (!script_value->IsScript()) {
+    return MaybeLocal<Script>();
+  }
+  i::Handle<i::Script> script_obj = i::Handle<i::Script>::cast(script_value);
+  if (script_obj->type() != i::Script::TYPE_NORMAL) return MaybeLocal<Script>();
+  return ToApiHandle<DebugInterface::Script>(
+      handle_scope.CloseAndEscape(script_obj));
+}
+
+DebugInterface::Location::Location(int lineNumber, int columnNumber)
+    : lineNumber_(lineNumber), columnNumber_(columnNumber) {
+  CHECK(lineNumber >= 0);
+  CHECK(columnNumber >= 0);
+}
+
+DebugInterface::Location::Location() : lineNumber_(-1), columnNumber_(-1) {}
+
+int DebugInterface::Location::GetLineNumber() const {
+  CHECK(lineNumber_ >= 0);
+  return lineNumber_;
+}
+
+int DebugInterface::Location::GetColumnNumber() const {
+  CHECK(columnNumber_ >= 0);
+  return columnNumber_;
+}
+
+bool DebugInterface::Location::IsEmpty() const {
+  return lineNumber_ == -1 && columnNumber_ == -1;
+}
+
+void DebugInterface::GetLoadedScripts(
+    v8::Isolate* v8_isolate,
+    PersistentValueVector<DebugInterface::Script>& scripts) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+  ENTER_V8(isolate);
+  // TODO(kozyatinskiy): remove this GC once tests are dealt with.
+  isolate->heap()->CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask,
+                                     i::GarbageCollectionReason::kDebugger);
+  {
+    i::DisallowHeapAllocation no_gc;
+    i::Script::Iterator iterator(isolate);
+    i::Script* script;
+    while ((script = iterator.Next())) {
+      if (script->type() != i::Script::TYPE_NORMAL) continue;
+      if (script->HasValidSource()) {
+        i::HandleScope handle_scope(isolate);
+        i::Handle<i::Script> script_handle(script, isolate);
+        scripts.Append(ToApiHandle<Script>(script_handle));
+      }
+    }
+  }
+}
 
 Local<String> CpuProfileNode::GetFunctionName() const {
   const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
diff --git a/src/api.h b/src/api.h
index 22c10dd..6fcaa90 100644
--- a/src/api.h
+++ b/src/api.h
@@ -7,6 +7,7 @@
 
 #include "include/v8-testing.h"
 #include "src/contexts.h"
+#include "src/debug/debug-interface.h"
 #include "src/factory.h"
 #include "src/isolate.h"
 #include "src/list.h"
@@ -28,7 +29,7 @@
 
 template <typename T> inline T ToCData(v8::internal::Object* obj) {
   STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
-  if (obj == v8::internal::Smi::FromInt(0)) return nullptr;
+  if (obj == v8::internal::Smi::kZero) return nullptr;
   return reinterpret_cast<T>(
       reinterpret_cast<intptr_t>(
           v8::internal::Foreign::cast(obj)->foreign_address()));
@@ -39,7 +40,7 @@
 inline v8::internal::Handle<v8::internal::Object> FromCData(
     v8::internal::Isolate* isolate, T obj) {
   STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
-  if (obj == nullptr) return handle(v8::internal::Smi::FromInt(0), isolate);
+  if (obj == nullptr) return handle(v8::internal::Smi::kZero, isolate);
   return isolate->factory()->NewForeign(
       reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
 }
@@ -108,7 +109,8 @@
   V(StackTrace, JSArray)                     \
   V(StackFrame, JSObject)                    \
   V(Proxy, JSProxy)                          \
-  V(NativeWeakMap, JSWeakMap)
+  V(NativeWeakMap, JSWeakMap)                \
+  V(DebugInterface::Script, Script)
 
 class Utils {
  public:
diff --git a/src/arguments.h b/src/arguments.h
index 92c7075..d5d2c02 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -96,8 +96,7 @@
   Type Name(int args_length, Object** args_object, Isolate* isolate) {        \
     DCHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
     CLOBBER_DOUBLE_REGISTERS();                                               \
-    if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||       \
-                    FLAG_runtime_call_stats)) {                               \
+    if (V8_UNLIKELY(FLAG_runtime_stats)) {                                    \
       return Stats_##Name(args_length, args_object, isolate);                 \
     }                                                                         \
     Arguments args(args_length, args_object);                                 \
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index ee02027..d90dc76 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -4280,10 +4280,10 @@
 
   // Deduplicate constants.
   int size_after_marker = estimated_size_after_marker;
-  for (int i = 0; i < pending_64_bit_constants_.size(); i++) {
+  for (size_t i = 0; i < pending_64_bit_constants_.size(); i++) {
     ConstantPoolEntry& entry = pending_64_bit_constants_[i];
     DCHECK(!entry.is_merged());
-    for (int j = 0; j < i; j++) {
+    for (size_t j = 0; j < i; j++) {
       if (entry.value64() == pending_64_bit_constants_[j].value64()) {
         DCHECK(!pending_64_bit_constants_[j].is_merged());
         entry.set_merged_index(j);
@@ -4293,11 +4293,11 @@
     }
   }
 
-  for (int i = 0; i < pending_32_bit_constants_.size(); i++) {
+  for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
     ConstantPoolEntry& entry = pending_32_bit_constants_[i];
     DCHECK(!entry.is_merged());
     if (!entry.sharing_ok()) continue;
-    for (int j = 0; j < i; j++) {
+    for (size_t j = 0; j < i; j++) {
       if (entry.value() == pending_32_bit_constants_[j].value()) {
         DCHECK(!pending_32_bit_constants_[j].is_merged());
         entry.set_merged_index(j);
@@ -4338,7 +4338,7 @@
 
     // Emit 64-bit constant pool entries first: their range is smaller than
     // 32-bit entries.
-    for (int i = 0; i < pending_64_bit_constants_.size(); i++) {
+    for (size_t i = 0; i < pending_64_bit_constants_.size(); i++) {
       ConstantPoolEntry& entry = pending_64_bit_constants_[i];
 
       Instr instr = instr_at(entry.position());
@@ -4367,7 +4367,7 @@
     }
 
     // Emit 32-bit constant pool entries.
-    for (int i = 0; i < pending_32_bit_constants_.size(); i++) {
+    for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
       ConstantPoolEntry& entry = pending_32_bit_constants_[i];
       Instr instr = instr_at(entry.position());
 
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index e5448f7..1283c39 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -1421,7 +1421,8 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+  void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+                         int id);
 
   // Record the emission of a constant pool.
   //
@@ -1585,7 +1586,8 @@
       // Check the constant pool hasn't been blocked for too long.
       DCHECK(pending_32_bit_constants_.empty() ||
              (start + pending_64_bit_constants_.size() * kDoubleSize <
-              (first_const_pool_32_use_ + kMaxDistToIntPool)));
+              static_cast<size_t>(first_const_pool_32_use_ +
+                                  kMaxDistToIntPool)));
       DCHECK(pending_64_bit_constants_.empty() ||
              (start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
 #endif
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index de6803f..59f304d 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -544,7 +544,7 @@
   // If either is a Smi (we know that not both are), then they can only
   // be strictly equal if the other is a HeapNumber.
   STATIC_ASSERT(kSmiTag == 0);
-  DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+  DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
   __ and_(r2, lhs, Operand(rhs));
   __ JumpIfNotSmi(r2, &not_smis);
   // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
@@ -1494,13 +1494,10 @@
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   __ add(r1, r1, Operand(2));  // r1 was a smi.
 
-  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
-  __ JumpIfSmi(r0, &runtime);
-  __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
-  __ b(ne, &runtime);
+  // Check that the last match info is a FixedArray.
+  __ ldr(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
+  __ JumpIfSmi(last_match_info_elements, &runtime);
   // Check that the object has fast elements.
-  __ ldr(last_match_info_elements,
-         FieldMemOperand(r0, JSArray::kElementsOffset));
   __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
   __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
   __ b(ne, &runtime);
@@ -1508,7 +1505,7 @@
   // additional information.
   __ ldr(r0,
          FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
-  __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
+  __ add(r2, r1, Operand(RegExpMatchInfo::kLastMatchOverhead));
   __ cmp(r2, Operand::SmiUntag(r0));
   __ b(gt, &runtime);
 
@@ -1517,28 +1514,20 @@
   // Store the capture count.
   __ SmiTag(r2, r1);
   __ str(r2, FieldMemOperand(last_match_info_elements,
-                             RegExpImpl::kLastCaptureCountOffset));
+                             RegExpMatchInfo::kNumberOfCapturesOffset));
   // Store last subject and last input.
-  __ str(subject,
-         FieldMemOperand(last_match_info_elements,
-                         RegExpImpl::kLastSubjectOffset));
+  __ str(subject, FieldMemOperand(last_match_info_elements,
+                                  RegExpMatchInfo::kLastSubjectOffset));
   __ mov(r2, subject);
   __ RecordWriteField(last_match_info_elements,
-                      RegExpImpl::kLastSubjectOffset,
-                      subject,
-                      r3,
-                      kLRHasNotBeenSaved,
-                      kDontSaveFPRegs);
+                      RegExpMatchInfo::kLastSubjectOffset, subject, r3,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs);
   __ mov(subject, r2);
-  __ str(subject,
-         FieldMemOperand(last_match_info_elements,
-                         RegExpImpl::kLastInputOffset));
+  __ str(subject, FieldMemOperand(last_match_info_elements,
+                                  RegExpMatchInfo::kLastInputOffset));
   __ RecordWriteField(last_match_info_elements,
-                      RegExpImpl::kLastInputOffset,
-                      subject,
-                      r3,
-                      kLRHasNotBeenSaved,
-                      kDontSaveFPRegs);
+                      RegExpMatchInfo::kLastInputOffset, subject, r3,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   ExternalReference address_of_static_offsets_vector =
@@ -1549,10 +1538,9 @@
   // r2: offsets vector
   Label next_capture, done;
   // Capture register counter starts from number of capture registers and
-  // counts down until wraping after zero.
-  __ add(r0,
-         last_match_info_elements,
-         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
+  // counts down until wrapping after zero.
+  __ add(r0, last_match_info_elements,
+         Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag));
   __ bind(&next_capture);
   __ sub(r1, r1, Operand(1), SetCC);
   __ b(mi, &done);
@@ -1565,7 +1553,7 @@
   __ bind(&done);
 
   // Return last match info.
-  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
+  __ mov(r0, last_match_info_elements);
   __ add(sp, sp, Operand(4 * kPointerSize));
   __ Ret();
 
@@ -1784,6 +1772,7 @@
 }
 
 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+  // r0 - number of arguments
   // r1 - function
   // r3 - slot id
   // r2 - vector
@@ -1792,25 +1781,22 @@
   __ cmp(r1, r5);
   __ b(ne, miss);
 
-  __ mov(r0, Operand(arg_count()));
-
   // Increment the call count for monomorphic function calls.
   IncrementCallCount(masm, r2, r3);
 
   __ mov(r2, r4);
   __ mov(r3, r1);
-  ArrayConstructorStub stub(masm->isolate(), arg_count());
+  ArrayConstructorStub stub(masm->isolate());
   __ TailCallStub(&stub);
 }
 
 
 void CallICStub::Generate(MacroAssembler* masm) {
+  // r0 - number of arguments
   // r1 - function
   // r3 - slot id (Smi)
   // r2 - vector
   Label extra_checks_or_miss, call, call_function, call_count_incremented;
-  int argc = arg_count();
-  ParameterCount actual(argc);
 
   // The checks. First, does r1 match the recorded monomorphic target?
   __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
@@ -1843,7 +1829,6 @@
   // Increment the call count for monomorphic function calls.
   IncrementCallCount(masm, r2, r3);
 
-  __ mov(r0, Operand(argc));
   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
                                                     tail_call_mode()),
           RelocInfo::CODE_TARGET);
@@ -1888,7 +1873,6 @@
   IncrementCallCount(masm, r2, r3);
 
   __ bind(&call_count_incremented);
-  __ mov(r0, Operand(argc));
   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
@@ -1921,13 +1905,12 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     CreateWeakCellStub create_stub(masm->isolate());
-    __ Push(r2);
-    __ Push(r3);
-    __ Push(cp, r1);
+    __ SmiTag(r0);
+    __ Push(r0, r2, r3, cp, r1);
     __ CallStub(&create_stub);
-    __ Pop(cp, r1);
-    __ Pop(r3);
-    __ Pop(r2);
+    __ Pop(r2, r3, cp, r1);
+    __ Pop(r0);
+    __ SmiUntag(r0);
   }
 
   __ jmp(&call_function);
@@ -1944,14 +1927,21 @@
 void CallICStub::GenerateMiss(MacroAssembler* masm) {
   FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
 
+  // Preserve the number of arguments as Smi.
+  __ SmiTag(r0);
+
   // Push the receiver and the function and feedback info.
-  __ Push(r1, r2, r3);
+  __ Push(r0, r1, r2, r3);
 
   // Call the entry.
   __ CallRuntime(Runtime::kCallIC_Miss);
 
   // Move result to edi and exit the internal frame.
   __ mov(r1, r0);
+
+  // Restore number of arguments.
+  __ Pop(r0);
+  __ SmiUntag(r0);
 }
 
 
@@ -3061,21 +3051,6 @@
   __ Ret();
 }
 
-
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate());
-  stub.GenerateForTrampoline(masm);
-}
-
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate());
-  stub.GenerateForTrampoline(masm);
-}
-
-
 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(r2);
   CallICStub stub(isolate(), state());
@@ -3083,14 +3058,6 @@
 }
 
 
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
                              Register receiver_map, Register scratch1,
                              Register scratch2, bool is_polymorphic,
@@ -3178,183 +3145,12 @@
   __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
 }
 
-
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // r1
-  Register name = LoadWithVectorDescriptor::NameRegister();          // r2
-  Register vector = LoadWithVectorDescriptor::VectorRegister();      // r3
-  Register slot = LoadWithVectorDescriptor::SlotRegister();          // r0
-  Register feedback = r4;
-  Register receiver_map = r5;
-  Register scratch1 = r6;
-
-  __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
-  __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
-  __ b(ne, &not_array);
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
-
-  __ bind(&not_array);
-  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
-  __ b(ne, &miss);
-  masm->isolate()->load_stub_cache()->GenerateProbe(
-      masm, receiver, name, feedback, receiver_map, scratch1, r9);
-
-  __ bind(&miss);
-  LoadIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
-  GenerateImpl(masm, false);
-}
-
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // r1
-  Register key = LoadWithVectorDescriptor::NameRegister();           // r2
-  Register vector = LoadWithVectorDescriptor::VectorRegister();      // r3
-  Register slot = LoadWithVectorDescriptor::SlotRegister();          // r0
-  Register feedback = r4;
-  Register receiver_map = r5;
-  Register scratch1 = r6;
-
-  __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
-  __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  __ bind(&try_array);
-  // Is it a fixed array?
-  __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
-  __ b(ne, &not_array);
-
-  // We have a polymorphic element handler.
-  Label polymorphic, try_poly_name;
-  __ bind(&polymorphic);
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
-
-  __ bind(&not_array);
-  // Is it generic?
-  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
-  __ b(ne, &try_poly_name);
-  Handle<Code> megamorphic_stub =
-      KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
-  __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
-  __ bind(&try_poly_name);
-  // We might have a name in feedback, and a fixed array in the next slot.
-  __ cmp(key, feedback);
-  __ b(ne, &miss);
-  // If the name comparison succeeded, we know we have a fixed array with
-  // at least one map/handler pair.
-  __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
-  __ ldr(feedback,
-         FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, false, &miss);
-
-  __ bind(&miss);
-  KeyedLoadIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
-  StoreICStub stub(isolate(), state());
-  stub.GenerateForTrampoline(masm);
-}
-
 void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
   KeyedStoreICStub stub(isolate(), state());
   stub.GenerateForTrampoline(masm);
 }
 
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // r1
-  Register key = StoreWithVectorDescriptor::NameRegister();           // r2
-  Register vector = StoreWithVectorDescriptor::VectorRegister();      // r3
-  Register slot = StoreWithVectorDescriptor::SlotRegister();          // r4
-  DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r0));          // r0
-  Register feedback = r5;
-  Register receiver_map = r6;
-  Register scratch1 = r9;
-
-  __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
-  __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
-  __ b(ne, &not_array);
-
-  // We are using register r8, which is used for the embedded constant pool
-  // when FLAG_enable_embedded_constant_pool is true.
-  DCHECK(!FLAG_enable_embedded_constant_pool);
-  Register scratch2 = r8;
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
-                   &miss);
-
-  __ bind(&not_array);
-  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
-  __ b(ne, &miss);
-  masm->isolate()->store_stub_cache()->GenerateProbe(
-      masm, receiver, key, feedback, receiver_map, scratch1, scratch2);
-
-  __ bind(&miss);
-  StoreIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
 void KeyedStoreICStub::Generate(MacroAssembler* masm) {
   GenerateImpl(masm, false);
 }
@@ -3706,30 +3502,19 @@
 void ArrayConstructorStub::GenerateDispatchToArrayStub(
     MacroAssembler* masm,
     AllocationSiteOverrideMode mode) {
-  if (argument_count() == ANY) {
-    Label not_zero_case, not_one_case;
-    __ tst(r0, r0);
-    __ b(ne, &not_zero_case);
-    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+  Label not_zero_case, not_one_case;
+  __ tst(r0, r0);
+  __ b(ne, &not_zero_case);
+  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
 
-    __ bind(&not_zero_case);
-    __ cmp(r0, Operand(1));
-    __ b(gt, &not_one_case);
-    CreateArrayDispatchOneArgument(masm, mode);
+  __ bind(&not_zero_case);
+  __ cmp(r0, Operand(1));
+  __ b(gt, &not_one_case);
+  CreateArrayDispatchOneArgument(masm, mode);
 
-    __ bind(&not_one_case);
-    ArrayNArgumentsConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else if (argument_count() == NONE) {
-    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-  } else if (argument_count() == ONE) {
-    CreateArrayDispatchOneArgument(masm, mode);
-  } else if (argument_count() == MORE_THAN_ONE) {
-    ArrayNArgumentsConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else {
-    UNREACHABLE();
-  }
+  __ bind(&not_one_case);
+  ArrayNArgumentsConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
 }
 
 
@@ -3781,21 +3566,8 @@
   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
 
   __ bind(&subclassing);
-  switch (argument_count()) {
-    case ANY:
-    case MORE_THAN_ONE:
-      __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
-      __ add(r0, r0, Operand(3));
-      break;
-    case NONE:
-      __ str(r1, MemOperand(sp, 0 * kPointerSize));
-      __ mov(r0, Operand(3));
-      break;
-    case ONE:
-      __ str(r1, MemOperand(sp, 1 * kPointerSize));
-      __ mov(r0, Operand(4));
-      break;
-  }
+  __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+  __ add(r0, r0, Operand(3));
   __ Push(r3, r2);
   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
 }
@@ -4216,7 +3988,7 @@
   const int kParameterMapHeaderSize =
       FixedArray::kHeaderSize + 2 * kPointerSize;
   // If there are no mapped parameters, we do not need the parameter_map.
-  __ cmp(r6, Operand(Smi::FromInt(0)));
+  __ cmp(r6, Operand(Smi::kZero));
   __ mov(r9, Operand::Zero(), LeaveCC, eq);
   __ mov(r9, Operand(r6, LSL, 1), LeaveCC, ne);
   __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
@@ -4273,7 +4045,7 @@
   // r6 = mapped parameter count (tagged)
   // Initialize parameter map. If there are no mapped arguments, we're done.
   Label skip_parameter_map;
-  __ cmp(r6, Operand(Smi::FromInt(0)));
+  __ cmp(r6, Operand(Smi::kZero));
   // Move backing store address to r1, because it is
   // expected there when filling in the unmapped arguments.
   __ mov(r1, r4, LeaveCC, eq);
@@ -4321,7 +4093,7 @@
   __ str(ip, MemOperand(r1, r0));
   __ add(r9, r9, Operand(Smi::FromInt(1)));
   __ bind(&parameters_test);
-  __ cmp(r5, Operand(Smi::FromInt(0)));
+  __ cmp(r5, Operand(Smi::kZero));
   __ b(ne, &parameters_loop);
 
   // Restore r0 = new object (tagged) and r5 = argument count (tagged).
@@ -4481,130 +4253,6 @@
 }
 
 
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register value = r0;
-  Register slot = r2;
-
-  Register cell = r1;
-  Register cell_details = r4;
-  Register cell_value = r5;
-  Register cell_value_map = r6;
-  Register scratch = r9;
-
-  Register context = cp;
-  Register context_temp = cell;
-
-  Label fast_heapobject_case, fast_smi_case, slow_case;
-
-  if (FLAG_debug_code) {
-    __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
-    __ Check(ne, kUnexpectedValue);
-  }
-
-  // Go up the context chain to the script context.
-  for (int i = 0; i < depth(); i++) {
-    __ ldr(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
-    context = context_temp;
-  }
-
-  // Load the PropertyCell at the specified slot.
-  __ add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
-  __ ldr(cell, ContextMemOperand(cell));
-
-  // Load PropertyDetails for the cell (actually only the cell_type and kind).
-  __ ldr(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
-  __ SmiUntag(cell_details);
-  __ and_(cell_details, cell_details,
-          Operand(PropertyDetails::PropertyCellTypeField::kMask |
-                  PropertyDetails::KindField::kMask |
-                  PropertyDetails::kAttributesReadOnlyMask));
-
-  // Check if PropertyCell holds mutable data.
-  Label not_mutable_data;
-  __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
-                                   PropertyCellType::kMutable) |
-                               PropertyDetails::KindField::encode(kData)));
-  __ b(ne, &not_mutable_data);
-  __ JumpIfSmi(value, &fast_smi_case);
-
-  __ bind(&fast_heapobject_case);
-  __ str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
-  // RecordWriteField clobbers the value register, so we copy it before the
-  // call.
-  __ mov(r4, Operand(value));
-  __ RecordWriteField(cell, PropertyCell::kValueOffset, r4, scratch,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ Ret();
-
-  __ bind(&not_mutable_data);
-  // Check if PropertyCell value matches the new value (relevant for Constant,
-  // ConstantType and Undefined cells).
-  Label not_same_value;
-  __ ldr(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
-  __ cmp(cell_value, value);
-  __ b(ne, &not_same_value);
-
-  // Make sure the PropertyCell is not marked READ_ONLY.
-  __ tst(cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask));
-  __ b(ne, &slow_case);
-
-  if (FLAG_debug_code) {
-    Label done;
-    // This can only be true for Constant, ConstantType and Undefined cells,
-    // because we never store the_hole via this stub.
-    __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
-                                     PropertyCellType::kConstant) |
-                                 PropertyDetails::KindField::encode(kData)));
-    __ b(eq, &done);
-    __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
-                                     PropertyCellType::kConstantType) |
-                                 PropertyDetails::KindField::encode(kData)));
-    __ b(eq, &done);
-    __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
-                                     PropertyCellType::kUndefined) |
-                                 PropertyDetails::KindField::encode(kData)));
-    __ Check(eq, kUnexpectedValue);
-    __ bind(&done);
-  }
-  __ Ret();
-  __ bind(&not_same_value);
-
-  // Check if PropertyCell contains data with constant type (and is not
-  // READ_ONLY).
-  __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
-                                   PropertyCellType::kConstantType) |
-                               PropertyDetails::KindField::encode(kData)));
-  __ b(ne, &slow_case);
-
-  // Now either both old and new values must be smis or both must be heap
-  // objects with same map.
-  Label value_is_heap_object;
-  __ JumpIfNotSmi(value, &value_is_heap_object);
-  __ JumpIfNotSmi(cell_value, &slow_case);
-  // Old and new values are smis, no need for a write barrier here.
-  __ bind(&fast_smi_case);
-  __ str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
-  __ Ret();
-
-  __ bind(&value_is_heap_object);
-  __ JumpIfSmi(cell_value, &slow_case);
-
-  __ ldr(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
-  __ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ cmp(cell_value_map, scratch);
-  __ b(eq, &fast_heapobject_case);
-
-  // Fallback to runtime.
-  __ bind(&slow_case);
-  __ SmiTag(slot);
-  __ Push(slot, value);
-  __ TailCallRuntime(is_strict(language_mode())
-                         ? Runtime::kStoreGlobalViaContext_Strict
-                         : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
-
 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   return ref0.address() - ref1.address();
 }
@@ -4881,7 +4529,7 @@
   __ Push(scratch, scratch);
   __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
   __ Push(scratch, holder);
-  __ Push(Smi::FromInt(0));  // should_throw_on_error -> false
+  __ Push(Smi::kZero);  // should_throw_on_error -> false
   __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
   __ push(scratch);
   // v8::PropertyCallbackInfo::args_ array and name handle.
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index e49fed9..1231355 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -329,7 +329,7 @@
     // in a separate table if necessary.
     Label high_fixes[256];
     int high_fix_max = (count() - 1) >> 8;
-    DCHECK_GT(arraysize(high_fixes), high_fix_max);
+    DCHECK_GT(arraysize(high_fixes), static_cast<size_t>(high_fix_max));
     for (int i = 0; i < count(); i++) {
       int start = masm()->pc_offset();
       USE(start);
diff --git a/src/arm/interface-descriptors-arm.cc b/src/arm/interface-descriptors-arm.cc
index a002b8d..75161af 100644
--- a/src/arm/interface-descriptors-arm.cc
+++ b/src/arm/interface-descriptors-arm.cc
@@ -31,9 +31,9 @@
 const Register LoadDescriptor::NameRegister() { return r2; }
 const Register LoadDescriptor::SlotRegister() { return r0; }
 
-
 const Register LoadWithVectorDescriptor::VectorRegister() { return r3; }
 
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return r4; }
 
 const Register StoreDescriptor::ReceiverRegister() { return r1; }
 const Register StoreDescriptor::NameRegister() { return r2; }
@@ -46,10 +46,6 @@
 const Register StoreTransitionDescriptor::VectorRegister() { return r3; }
 const Register StoreTransitionDescriptor::MapRegister() { return r5; }
 
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r2; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r0; }
-
-
 const Register StringCompareDescriptor::LeftRegister() { return r1; }
 const Register StringCompareDescriptor::RightRegister() { return r0; }
 
@@ -162,7 +158,7 @@
 
 void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {r1, r3, r2};
+  Register registers[] = {r1, r0, r3, r2};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -211,13 +207,6 @@
 }
 
 
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r2, r1, r0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 void TransitionElementsKindDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {r0, r1};
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 00f8ab5..c67fad8 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -663,7 +663,7 @@
 
   // Save caller-saved registers, which includes js_function.
   DCHECK((kCallerSaved & js_function.bit()) != 0);
-  DCHECK_EQ(kCallerSaved & code_entry.bit(), 0);
+  DCHECK_EQ(kCallerSaved & code_entry.bit(), 0u);
   stm(db_w, sp, (kCallerSaved | lr.bit()));
 
   int argument_count = 3;
@@ -1051,6 +1051,69 @@
   }
 }
 
+void MacroAssembler::VmovExtended(Register dst, int src_code) {
+  DCHECK_LE(32, src_code);
+  DCHECK_GT(64, src_code);
+  if (src_code & 0x1) {
+    VmovHigh(dst, DwVfpRegister::from_code(src_code / 2));
+  } else {
+    VmovLow(dst, DwVfpRegister::from_code(src_code / 2));
+  }
+}
+
+void MacroAssembler::VmovExtended(int dst_code, Register src) {
+  DCHECK_LE(32, dst_code);
+  DCHECK_GT(64, dst_code);
+  if (dst_code & 0x1) {
+    VmovHigh(DwVfpRegister::from_code(dst_code / 2), src);
+  } else {
+    VmovLow(DwVfpRegister::from_code(dst_code / 2), src);
+  }
+}
+
+void MacroAssembler::VmovExtended(int dst_code, int src_code,
+                                  Register scratch) {
+  if (src_code < 32 && dst_code < 32) {
+    // src and dst are both s-registers.
+    vmov(SwVfpRegister::from_code(dst_code),
+         SwVfpRegister::from_code(src_code));
+  } else if (src_code < 32) {
+    // src is an s-register.
+    vmov(scratch, SwVfpRegister::from_code(src_code));
+    VmovExtended(dst_code, scratch);
+  } else if (dst_code < 32) {
+    // dst is an s-register.
+    VmovExtended(scratch, src_code);
+    vmov(SwVfpRegister::from_code(dst_code), scratch);
+  } else {
+    // Neither src or dst are s-registers.
+    DCHECK_GT(64, src_code);
+    DCHECK_GT(64, dst_code);
+    VmovExtended(scratch, src_code);
+    VmovExtended(dst_code, scratch);
+  }
+}
+
+void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src,
+                                  Register scratch) {
+  if (dst_code >= 32) {
+    ldr(scratch, src);
+    VmovExtended(dst_code, scratch);
+  } else {
+    vldr(SwVfpRegister::from_code(dst_code), src);
+  }
+}
+
+void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code,
+                                  Register scratch) {
+  if (src_code >= 32) {
+    VmovExtended(scratch, src_code);
+    str(scratch, dst);
+  } else {
+    vstr(SwVfpRegister::from_code(src_code), dst);
+  }
+}
+
 void MacroAssembler::LslPair(Register dst_low, Register dst_high,
                              Register src_low, Register src_high,
                              Register scratch, Register shift) {
@@ -1767,90 +1830,6 @@
 }
 
 
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
-                                            Register scratch,
-                                            Label* miss) {
-  Label same_contexts;
-
-  DCHECK(!holder_reg.is(scratch));
-  DCHECK(!holder_reg.is(ip));
-  DCHECK(!scratch.is(ip));
-
-  // Load current lexical context from the active StandardFrame, which
-  // may require crawling past STUB frames.
-  Label load_context;
-  Label has_context;
-  DCHECK(!ip.is(scratch));
-  mov(ip, fp);
-  bind(&load_context);
-  ldr(scratch, MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
-  JumpIfNotSmi(scratch, &has_context);
-  ldr(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
-  b(&load_context);
-  bind(&has_context);
-
-  // In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
-  cmp(scratch, Operand::Zero());
-  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
-#endif
-
-  // Load the native context of the current context.
-  ldr(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
-
-  // Check the context is a native context.
-  if (emit_debug_code()) {
-    // Cannot use ip as a temporary in this verification code. Due to the fact
-    // that ip is clobbered as part of cmp with an object Operand.
-    push(holder_reg);  // Temporarily save holder on the stack.
-    // Read the first word and compare to the native_context_map.
-    ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
-    LoadRoot(ip, Heap::kNativeContextMapRootIndex);
-    cmp(holder_reg, ip);
-    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
-    pop(holder_reg);  // Restore holder.
-  }
-
-  // Check if both contexts are the same.
-  ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-  cmp(scratch, Operand(ip));
-  b(eq, &same_contexts);
-
-  // Check the context is a native context.
-  if (emit_debug_code()) {
-    // Cannot use ip as a temporary in this verification code. Due to the fact
-    // that ip is clobbered as part of cmp with an object Operand.
-    push(holder_reg);  // Temporarily save holder on the stack.
-    mov(holder_reg, ip);  // Move ip to its holding place.
-    LoadRoot(ip, Heap::kNullValueRootIndex);
-    cmp(holder_reg, ip);
-    Check(ne, kJSGlobalProxyContextShouldNotBeNull);
-
-    ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
-    LoadRoot(ip, Heap::kNativeContextMapRootIndex);
-    cmp(holder_reg, ip);
-    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
-    // Restore ip is not needed. ip is reloaded below.
-    pop(holder_reg);  // Restore holder.
-    // Restore ip to holder's context.
-    ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-  }
-
-  // Check that the security token in the calling global object is
-  // compatible with the security token in the receiving global
-  // object.
-  int token_offset = Context::kHeaderSize +
-                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
-  ldr(scratch, FieldMemOperand(scratch, token_offset));
-  ldr(ip, FieldMemOperand(ip, token_offset));
-  cmp(scratch, Operand(ip));
-  b(ne, miss);
-
-  bind(&same_contexts);
-}
-
-
 // Compute the hash code from the untagged key.  This must be kept in sync with
 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
 // code-stub-hydrogen.cc
@@ -1883,85 +1862,6 @@
   bic(t0, t0, Operand(0xc0000000u));
 }
 
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
-                                              Register elements,
-                                              Register key,
-                                              Register result,
-                                              Register t0,
-                                              Register t1,
-                                              Register t2) {
-  // Register use:
-  //
-  // elements - holds the slow-case elements of the receiver on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // key      - holds the smi key on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // result   - holds the result on exit if the load succeeded.
-  //            Allowed to be the same as 'key' or 'result'.
-  //            Unchanged on bailout so 'key' or 'result' can be used
-  //            in further computation.
-  //
-  // Scratch registers:
-  //
-  // t0 - holds the untagged key on entry and holds the hash once computed.
-  //
-  // t1 - used to hold the capacity mask of the dictionary
-  //
-  // t2 - used for the index into the dictionary.
-  Label done;
-
-  GetNumberHash(t0, t1);
-
-  // Compute the capacity mask.
-  ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
-  SmiUntag(t1);
-  sub(t1, t1, Operand(1));
-
-  // Generate an unrolled loop that performs a few probes before giving up.
-  for (int i = 0; i < kNumberDictionaryProbes; i++) {
-    // Use t2 for index calculations and keep the hash intact in t0.
-    mov(t2, t0);
-    // Compute the masked index: (hash + i + i * i) & mask.
-    if (i > 0) {
-      add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
-    }
-    and_(t2, t2, Operand(t1));
-
-    // Scale the index by multiplying by the element size.
-    DCHECK(SeededNumberDictionary::kEntrySize == 3);
-    add(t2, t2, Operand(t2, LSL, 1));  // t2 = t2 * 3
-
-    // Check if the key is identical to the name.
-    add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
-    ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
-    cmp(key, Operand(ip));
-    if (i != kNumberDictionaryProbes - 1) {
-      b(eq, &done);
-    } else {
-      b(ne, miss);
-    }
-  }
-
-  bind(&done);
-  // Check that the value is a field property.
-  // t2: elements + (index * kPointerSize)
-  const int kDetailsOffset =
-      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  ldr(t1, FieldMemOperand(t2, kDetailsOffset));
-  DCHECK_EQ(DATA, 0);
-  tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
-  b(ne, miss);
-
-  // Get the value at the masked, scaled index and return.
-  const int kValueOffset =
-      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
-  ldr(result, FieldMemOperand(t2, kValueOffset));
-}
-
-
 void MacroAssembler::Allocate(int object_size,
                               Register result,
                               Register scratch1,
@@ -2414,20 +2314,6 @@
   cmp(obj, ip);
 }
 
-
-void MacroAssembler::CheckFastElements(Register map,
-                                       Register scratch,
-                                       Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  STATIC_ASSERT(FAST_ELEMENTS == 2);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
-  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
-  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
-  b(hi, fail);
-}
-
-
 void MacroAssembler::CheckFastObjectElements(Register map,
                                              Register scratch,
                                              Label* fail) {
@@ -2630,18 +2516,6 @@
   return has_frame_ || !stub->SometimesSetsUpAFrame();
 }
 
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
-  // If the hash field contains an array index pick it out. The assert checks
-  // that the constants for the maximum number of digits for an array index
-  // cached in the hash field and the number of bits reserved for it does not
-  // conflict.
-  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
-         (1 << String::kArrayIndexValueBits));
-  DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
-}
-
-
 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
   if (CpuFeatures::IsSupported(VFPv3)) {
     CpuFeatureScope scope(this, VFPv3);
@@ -3346,50 +3220,6 @@
   STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
 }
 
-
-void MacroAssembler::CopyBytes(Register src,
-                               Register dst,
-                               Register length,
-                               Register scratch) {
-  Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
-
-  // Align src before copying in word size chunks.
-  cmp(length, Operand(kPointerSize));
-  b(le, &byte_loop);
-
-  bind(&align_loop_1);
-  tst(src, Operand(kPointerSize - 1));
-  b(eq, &word_loop);
-  ldrb(scratch, MemOperand(src, 1, PostIndex));
-  strb(scratch, MemOperand(dst, 1, PostIndex));
-  sub(length, length, Operand(1), SetCC);
-  b(&align_loop_1);
-  // Copy bytes in word size chunks.
-  bind(&word_loop);
-  if (emit_debug_code()) {
-    tst(src, Operand(kPointerSize - 1));
-    Assert(eq, kExpectingAlignmentForCopyBytes);
-  }
-  cmp(length, Operand(kPointerSize));
-  b(lt, &byte_loop);
-  ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
-  str(scratch, MemOperand(dst, kPointerSize, PostIndex));
-  sub(length, length, Operand(kPointerSize));
-  b(&word_loop);
-
-  // Copy the last bytes if any left.
-  bind(&byte_loop);
-  cmp(length, Operand::Zero());
-  b(eq, &done);
-  bind(&byte_loop_1);
-  ldrb(scratch, MemOperand(src, 1, PostIndex));
-  strb(scratch, MemOperand(dst, 1, PostIndex));
-  sub(length, length, Operand(1), SetCC);
-  b(ne, &byte_loop_1);
-  bind(&done);
-}
-
-
 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
                                                 Register end_address,
                                                 Register filler) {
@@ -3649,7 +3479,7 @@
   cmp(index, ip);
   Check(lt, kIndexIsTooLarge);
 
-  cmp(index, Operand(Smi::FromInt(0)));
+  cmp(index, Operand(Smi::kZero));
   Check(ge, kIndexIsNegative);
 
   SmiUntag(index, index);
@@ -3939,7 +3769,7 @@
 
   // For all objects but the receiver, check that the cache is empty.
   EnumLength(r3, r1);
-  cmp(r3, Operand(Smi::FromInt(0)));
+  cmp(r3, Operand(Smi::kZero));
   b(ne, call_runtime);
 
   bind(&start);
@@ -3970,13 +3800,14 @@
   ExternalReference new_space_allocation_top_adr =
       ExternalReference::new_space_allocation_top_address(isolate());
   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
-  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+  const int kMementoLastWordOffset =
+      kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
 
   // Bail out if the object is not in new space.
   JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
   // If the object is in new space, we need to check whether it is on the same
   // page as the current top.
-  add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
   mov(ip, Operand(new_space_allocation_top_adr));
   ldr(ip, MemOperand(ip));
   eor(scratch_reg, scratch_reg, Operand(ip));
@@ -3985,7 +3816,7 @@
   // The object is on a different page than allocation top. Bail out if the
   // object sits on the page boundary as no memento can follow and we cannot
   // touch the memory following it.
-  add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
   eor(scratch_reg, scratch_reg, Operand(receiver_reg));
   tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
   b(ne, no_memento_found);
@@ -3994,11 +3825,11 @@
   // If top is on the same page as the current object, we need to check whether
   // we are below top.
   bind(&top_check);
-  add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
   mov(ip, Operand(new_space_allocation_top_adr));
   ldr(ip, MemOperand(ip));
   cmp(scratch_reg, ip);
-  b(gt, no_memento_found);
+  b(ge, no_memento_found);
   // Memento map check.
   bind(&map_check);
   ldr(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index d524d84..4f0ee82 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -549,6 +549,14 @@
   void VmovLow(Register dst, DwVfpRegister src);
   void VmovLow(DwVfpRegister dst, Register src);
 
+  // Simulate s-register moves for imaginary s32 - s63 registers.
+  void VmovExtended(Register dst, int src_code);
+  void VmovExtended(int dst_code, Register src);
+  // Move between s-registers and imaginary s-registers.
+  void VmovExtended(int dst_code, int src_code, Register scratch);
+  void VmovExtended(int dst_code, const MemOperand& src, Register scratch);
+  void VmovExtended(const MemOperand& dst, int src_code, Register scratch);
+
   void LslPair(Register dst_low, Register dst_high, Register src_low,
                Register src_high, Register scratch, Register shift);
   void LslPair(Register dst_low, Register dst_high, Register src_low,
@@ -720,24 +728,8 @@
   // ---------------------------------------------------------------------------
   // Inline caching support
 
-  // Generate code for checking access rights - used for security checks
-  // on access to global objects across environments. The holder register
-  // is left untouched, whereas both scratch registers are clobbered.
-  void CheckAccessGlobalProxy(Register holder_reg,
-                              Register scratch,
-                              Label* miss);
-
   void GetNumberHash(Register t0, Register scratch);
 
-  void LoadFromNumberDictionary(Label* miss,
-                                Register elements,
-                                Register key,
-                                Register result,
-                                Register t0,
-                                Register t1,
-                                Register t2);
-
-
   inline void MarkCode(NopMarkerTypes type) {
     nop(type);
   }
@@ -850,14 +842,6 @@
                        Register scratch1, Register scratch2,
                        Label* gc_required);
 
-  // Copies a number of bytes from src to dst. All registers are clobbered. On
-  // exit src and dst will point to the place just after where the last byte was
-  // read or written and length will be zero.
-  void CopyBytes(Register src,
-                 Register dst,
-                 Register length,
-                 Register scratch);
-
   // Initialize fields with filler values.  Fields starting at |current_address|
   // not including |end_address| are overwritten with the value in |filler|.  At
   // the end the loop, |current_address| takes the value of |end_address|.
@@ -900,13 +884,6 @@
                            Register type_reg,
                            InstanceType type);
 
-
-  // Check if a map for a JSObject indicates that the object has fast elements.
-  // Jump to the specified label if it does not.
-  void CheckFastElements(Register map,
-                         Register scratch,
-                         Label* fail);
-
   // Check if a map for a JSObject indicates that the object can have both smi
   // and HeapObject elements.  Jump to the specified label if it does not.
   void CheckFastObjectElements(Register map,
@@ -1016,12 +993,6 @@
   }
 
 
-  // Picks out an array index from the hash field.
-  // Register use:
-  //   hash - holds the index's hash. Clobbered.
-  //   index - holds the overwritten index on exit.
-  void IndexFromHash(Register hash, Register index);
-
   // Get the number of least significant bits from a register
   void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
   void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
diff --git a/src/arm64/assembler-arm64.cc b/src/arm64/assembler-arm64.cc
index 9ee20d4..37fdb26 100644
--- a/src/arm64/assembler-arm64.cc
+++ b/src/arm64/assembler-arm64.cc
@@ -306,8 +306,9 @@
   DCHECK(mode != RelocInfo::COMMENT && mode != RelocInfo::CONST_POOL &&
          mode != RelocInfo::VENEER_POOL &&
          mode != RelocInfo::CODE_AGE_SEQUENCE &&
-         mode != RelocInfo::DEOPT_POSITION && mode != RelocInfo::DEOPT_REASON &&
-         mode != RelocInfo::DEOPT_ID);
+         mode != RelocInfo::DEOPT_SCRIPT_OFFSET &&
+         mode != RelocInfo::DEOPT_INLINING_ID &&
+         mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID);
   uint64_t raw_data = static_cast<uint64_t>(data);
   int offset = assm_->pc_offset();
   if (IsEmpty()) {
@@ -2947,7 +2948,8 @@
        (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) ||
       (rmode == RelocInfo::INTERNAL_REFERENCE) ||
       (rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
-      (rmode == RelocInfo::DEOPT_POSITION) ||
+      (rmode == RelocInfo::DEOPT_SCRIPT_OFFSET) ||
+      (rmode == RelocInfo::DEOPT_INLINING_ID) ||
       (rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID) ||
       (rmode == RelocInfo::GENERATOR_CONTINUATION)) {
     // Adjust code for new modes.
diff --git a/src/arm64/assembler-arm64.h b/src/arm64/assembler-arm64.h
index 16b7eae..d5c2936 100644
--- a/src/arm64/assembler-arm64.h
+++ b/src/arm64/assembler-arm64.h
@@ -933,7 +933,8 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+  void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+                         int id);
 
   int buffer_space() const;
 
diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc
index ca5ea80..c0d700c 100644
--- a/src/arm64/code-stubs-arm64.cc
+++ b/src/arm64/code-stubs-arm64.cc
@@ -1649,15 +1649,12 @@
   __ Add(x10, x10, x10);
   __ Add(number_of_capture_registers, x10, 2);
 
-  // Check that the fourth object is a JSObject.
+  // Check that the last match info is a FixedArray.
   DCHECK(jssp.Is(__ StackPointer()));
-  __ Peek(x10, kLastMatchInfoOffset);
-  __ JumpIfSmi(x10, &runtime);
-  __ JumpIfNotObjectType(x10, x11, x11, JS_OBJECT_TYPE, &runtime);
+  __ Peek(last_match_info_elements, kLastMatchInfoOffset);
+  __ JumpIfSmi(last_match_info_elements, &runtime);
 
   // Check that the object has fast elements.
-  __ Ldr(last_match_info_elements,
-         FieldMemOperand(x10, JSObject::kElementsOffset));
   __ Ldr(x10,
          FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
   __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
@@ -1670,38 +1667,29 @@
   __ Ldrsw(x10,
            UntagSmiFieldMemOperand(last_match_info_elements,
                                    FixedArray::kLengthOffset));
-  __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead);
+  __ Add(x11, number_of_capture_registers, RegExpMatchInfo::kLastMatchOverhead);
   __ Cmp(x11, x10);
   __ B(gt, &runtime);
 
   // Store the capture count.
   __ SmiTag(x10, number_of_capture_registers);
-  __ Str(x10,
-         FieldMemOperand(last_match_info_elements,
-                         RegExpImpl::kLastCaptureCountOffset));
+  __ Str(x10, FieldMemOperand(last_match_info_elements,
+                              RegExpMatchInfo::kNumberOfCapturesOffset));
   // Store last subject and last input.
-  __ Str(subject,
-         FieldMemOperand(last_match_info_elements,
-                         RegExpImpl::kLastSubjectOffset));
+  __ Str(subject, FieldMemOperand(last_match_info_elements,
+                                  RegExpMatchInfo::kLastSubjectOffset));
   // Use x10 as the subject string in order to only need
   // one RecordWriteStub.
   __ Mov(x10, subject);
   __ RecordWriteField(last_match_info_elements,
-                      RegExpImpl::kLastSubjectOffset,
-                      x10,
-                      x11,
-                      kLRHasNotBeenSaved,
-                      kDontSaveFPRegs);
-  __ Str(subject,
-         FieldMemOperand(last_match_info_elements,
-                         RegExpImpl::kLastInputOffset));
+                      RegExpMatchInfo::kLastSubjectOffset, x10, x11,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ Str(subject, FieldMemOperand(last_match_info_elements,
+                                  RegExpMatchInfo::kLastInputOffset));
   __ Mov(x10, subject);
   __ RecordWriteField(last_match_info_elements,
-                      RegExpImpl::kLastInputOffset,
-                      x10,
-                      x11,
-                      kLRHasNotBeenSaved,
-                      kDontSaveFPRegs);
+                      RegExpMatchInfo::kLastInputOffset, x10, x11,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs);
 
   Register last_match_offsets = x13;
   Register offsets_vector_index = x14;
@@ -1716,9 +1704,8 @@
   Label next_capture, done;
   // Capture register counter starts from number of capture registers and
   // iterates down to zero (inclusive).
-  __ Add(last_match_offsets,
-         last_match_info_elements,
-         RegExpImpl::kFirstCaptureOffset - kHeapObjectTag);
+  __ Add(last_match_offsets, last_match_info_elements,
+         RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag);
   __ Bind(&next_capture);
   __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
   __ B(mi, &done);
@@ -1738,7 +1725,7 @@
   __ Bind(&done);
 
   // Return last match info.
-  __ Peek(x0, kLastMatchInfoOffset);
+  __ Mov(x0, last_match_info_elements);
   // Drop the 4 arguments of the stub from the stack.
   __ Drop(4);
   __ Ret();
@@ -1997,6 +1984,7 @@
 }
 
 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+  // x0 - number of arguments
   // x1 - function
   // x3 - slot id
   // x2 - vector
@@ -2011,8 +1999,6 @@
   __ Cmp(function, scratch);
   __ B(ne, miss);
 
-  __ Mov(x0, Operand(arg_count()));
-
   // Increment the call count for monomorphic function calls.
   IncrementCallCount(masm, feedback_vector, index);
 
@@ -2021,7 +2007,7 @@
   Register new_target_arg = index;
   __ Mov(allocation_site_arg, allocation_site);
   __ Mov(new_target_arg, function);
-  ArrayConstructorStub stub(masm->isolate(), arg_count());
+  ArrayConstructorStub stub(masm->isolate());
   __ TailCallStub(&stub);
 }
 
@@ -2029,12 +2015,11 @@
 void CallICStub::Generate(MacroAssembler* masm) {
   ASM_LOCATION("CallICStub");
 
+  // x0 - number of arguments
   // x1 - function
   // x3 - slot id (Smi)
   // x2 - vector
   Label extra_checks_or_miss, call, call_function, call_count_incremented;
-  int argc = arg_count();
-  ParameterCount actual(argc);
 
   Register function = x1;
   Register feedback_vector = x2;
@@ -2072,7 +2057,6 @@
   // Increment the call count for monomorphic function calls.
   IncrementCallCount(masm, feedback_vector, index);
 
-  __ Mov(x0, argc);
   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
                                                     tail_call_mode()),
           RelocInfo::CODE_TARGET);
@@ -2113,7 +2097,6 @@
   IncrementCallCount(masm, feedback_vector, index);
 
   __ Bind(&call_count_incremented);
-  __ Mov(x0, argc);
   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
@@ -2142,9 +2125,12 @@
   // x2 - vector
   // x3 - slot
   // x1 - function
+  // x0 - number of arguments
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     CreateWeakCellStub create_stub(masm->isolate());
+    __ SmiTag(x0);
+    __ Push(x0);
     __ Push(feedback_vector, index);
 
     __ Push(cp, function);
@@ -2152,6 +2138,8 @@
     __ Pop(cp, function);
 
     __ Pop(feedback_vector, index);
+    __ Pop(x0);
+    __ SmiUntag(x0);
   }
 
   __ B(&call_function);
@@ -2171,14 +2159,21 @@
 
   FrameScope scope(masm, StackFrame::INTERNAL);
 
+  // Preserve the number of arguments as Smi.
+  __ SmiTag(x0);
+
   // Push the receiver and the function and feedback info.
-  __ Push(x1, x2, x3);
+  __ Push(x0, x1, x2, x3);
 
   // Call the entry.
   __ CallRuntime(Runtime::kCallIC_Miss);
 
   // Move result to edi and exit the internal frame.
   __ Mov(x1, x0);
+
+  // Restore number of arguments.
+  __ Pop(x0);
+  __ SmiUntag(x0);
 }
 
 
@@ -2979,21 +2974,6 @@
   __ Ret();
 }
 
-
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate());
-  stub.GenerateForTrampoline(masm);
-}
-
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate());
-  stub.GenerateForTrampoline(masm);
-}
-
-
 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(x2);
   CallICStub stub(isolate(), state());
@@ -3001,14 +2981,6 @@
 }
 
 
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
                              Register receiver_map, Register scratch1,
                              Register scratch2, bool is_polymorphic,
@@ -3099,172 +3071,12 @@
   __ Jump(handler);
 }
 
-
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // x1
-  Register name = LoadWithVectorDescriptor::NameRegister();          // x2
-  Register vector = LoadWithVectorDescriptor::VectorRegister();      // x3
-  Register slot = LoadWithVectorDescriptor::SlotRegister();          // x0
-  Register feedback = x4;
-  Register receiver_map = x5;
-  Register scratch1 = x6;
-
-  __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
-  __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  // Is it a fixed array?
-  __ Bind(&try_array);
-  __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, true, &miss);
-
-  __ Bind(&not_array);
-  __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
-  masm->isolate()->load_stub_cache()->GenerateProbe(
-      masm, receiver, name, feedback, receiver_map, scratch1, x7);
-
-  __ Bind(&miss);
-  LoadIC::GenerateMiss(masm);
-
-  __ Bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
-  GenerateImpl(masm, false);
-}
-
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // x1
-  Register key = LoadWithVectorDescriptor::NameRegister();           // x2
-  Register vector = LoadWithVectorDescriptor::VectorRegister();      // x3
-  Register slot = LoadWithVectorDescriptor::SlotRegister();          // x0
-  Register feedback = x4;
-  Register receiver_map = x5;
-  Register scratch1 = x6;
-
-  __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
-  __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  __ Bind(&try_array);
-  // Is it a fixed array?
-  __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
-
-  // We have a polymorphic element handler.
-  Label polymorphic, try_poly_name;
-  __ Bind(&polymorphic);
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, true, &miss);
-
-  __ Bind(&not_array);
-  // Is it generic?
-  __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
-                   &try_poly_name);
-  Handle<Code> megamorphic_stub =
-      KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
-  __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
-  __ Bind(&try_poly_name);
-  // We might have a name in feedback, and a fixed array in the next slot.
-  __ Cmp(key, feedback);
-  __ B(ne, &miss);
-  // If the name comparison succeeded, we know we have a fixed array with
-  // at least one map/handler pair.
-  __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
-  __ Ldr(feedback,
-         FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, false, &miss);
-
-  __ Bind(&miss);
-  KeyedLoadIC::GenerateMiss(masm);
-
-  __ Bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
-  StoreICStub stub(isolate(), state());
-  stub.GenerateForTrampoline(masm);
-}
-
 void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
   KeyedStoreICStub stub(isolate(), state());
   stub.GenerateForTrampoline(masm);
 }
 
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // x1
-  Register key = StoreWithVectorDescriptor::NameRegister();           // x2
-  Register vector = StoreWithVectorDescriptor::VectorRegister();      // x3
-  Register slot = StoreWithVectorDescriptor::SlotRegister();          // x4
-  DCHECK(StoreWithVectorDescriptor::ValueRegister().is(x0));          // x0
-  Register feedback = x5;
-  Register receiver_map = x6;
-  Register scratch1 = x7;
-
-  __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
-  __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  // Is it a fixed array?
-  __ Bind(&try_array);
-  __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, true, &miss);
-
-  __ Bind(&not_array);
-  __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
-  masm->isolate()->store_stub_cache()->GenerateProbe(
-      masm, receiver, key, feedback, receiver_map, scratch1, x8);
-
-  __ Bind(&miss);
-  StoreIC::GenerateMiss(masm);
-
-  __ Bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
 void KeyedStoreICStub::Generate(MacroAssembler* masm) {
   GenerateImpl(masm, false);
 }
@@ -3895,33 +3707,22 @@
     MacroAssembler* masm,
     AllocationSiteOverrideMode mode) {
   Register argc = x0;
-  if (argument_count() == ANY) {
-    Label zero_case, n_case;
-    __ Cbz(argc, &zero_case);
-    __ Cmp(argc, 1);
-    __ B(ne, &n_case);
+  Label zero_case, n_case;
+  __ Cbz(argc, &zero_case);
+  __ Cmp(argc, 1);
+  __ B(ne, &n_case);
 
-    // One argument.
-    CreateArrayDispatchOneArgument(masm, mode);
+  // One argument.
+  CreateArrayDispatchOneArgument(masm, mode);
 
-    __ Bind(&zero_case);
-    // No arguments.
-    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+  __ Bind(&zero_case);
+  // No arguments.
+  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
 
-    __ Bind(&n_case);
-    // N arguments.
-    ArrayNArgumentsConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else if (argument_count() == NONE) {
-    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-  } else if (argument_count() == ONE) {
-    CreateArrayDispatchOneArgument(masm, mode);
-  } else if (argument_count() == MORE_THAN_ONE) {
-    ArrayNArgumentsConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else {
-    UNREACHABLE();
-  }
+  __ Bind(&n_case);
+  // N arguments.
+  ArrayNArgumentsConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
 }
 
 
@@ -3981,21 +3782,8 @@
 
   // Subclassing support.
   __ Bind(&subclassing);
-  switch (argument_count()) {
-    case ANY:
-    case MORE_THAN_ONE:
-      __ Poke(constructor, Operand(x0, LSL, kPointerSizeLog2));
-      __ Add(x0, x0, Operand(3));
-      break;
-    case NONE:
-      __ Poke(constructor, 0 * kPointerSize);
-      __ Mov(x0, Operand(3));
-      break;
-    case ONE:
-      __ Poke(constructor, 1 * kPointerSize);
-      __ Mov(x0, Operand(4));
-      break;
-  }
+  __ Poke(constructor, Operand(x0, LSL, kPointerSizeLog2));
+  __ Add(x0, x0, Operand(3));
   __ Push(new_target, allocation_site);
   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
 }
@@ -4271,7 +4059,7 @@
     __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
     __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset));
     __ Str(x1, FieldMemOperand(x0, JSArray::kElementsOffset));
-    __ Mov(x1, Smi::FromInt(0));
+    __ Mov(x1, Smi::kZero);
     __ Str(x1, FieldMemOperand(x0, JSArray::kLengthOffset));
     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
     __ Ret();
@@ -4783,126 +4571,6 @@
 }
 
 
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register context = cp;
-  Register value = x0;
-  Register slot = x2;
-  Register context_temp = x10;
-  Register cell = x10;
-  Register cell_details = x11;
-  Register cell_value = x12;
-  Register cell_value_map = x13;
-  Register value_map = x14;
-  Label fast_heapobject_case, fast_smi_case, slow_case;
-
-  if (FLAG_debug_code) {
-    __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
-    __ Check(ne, kUnexpectedValue);
-  }
-
-  // Go up the context chain to the script context.
-  for (int i = 0; i < depth(); i++) {
-    __ Ldr(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
-    context = context_temp;
-  }
-
-  // Load the PropertyCell at the specified slot.
-  __ Add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
-  __ Ldr(cell, ContextMemOperand(cell));
-
-  // Load PropertyDetails for the cell (actually only the cell_type and kind).
-  __ Ldr(cell_details,
-         UntagSmiFieldMemOperand(cell, PropertyCell::kDetailsOffset));
-  __ And(cell_details, cell_details,
-         PropertyDetails::PropertyCellTypeField::kMask |
-             PropertyDetails::KindField::kMask |
-             PropertyDetails::kAttributesReadOnlyMask);
-
-  // Check if PropertyCell holds mutable data.
-  Label not_mutable_data;
-  __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
-                           PropertyCellType::kMutable) |
-                           PropertyDetails::KindField::encode(kData));
-  __ B(ne, &not_mutable_data);
-  __ JumpIfSmi(value, &fast_smi_case);
-  __ Bind(&fast_heapobject_case);
-  __ Str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
-  // RecordWriteField clobbers the value register, so we copy it before the
-  // call.
-  __ Mov(x11, value);
-  __ RecordWriteField(cell, PropertyCell::kValueOffset, x11, x12,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ Ret();
-
-  __ Bind(&not_mutable_data);
-  // Check if PropertyCell value matches the new value (relevant for Constant,
-  // ConstantType and Undefined cells).
-  Label not_same_value;
-  __ Ldr(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
-  __ Cmp(cell_value, value);
-  __ B(ne, &not_same_value);
-
-  // Make sure the PropertyCell is not marked READ_ONLY.
-  __ Tst(cell_details, PropertyDetails::kAttributesReadOnlyMask);
-  __ B(ne, &slow_case);
-
-  if (FLAG_debug_code) {
-    Label done;
-    // This can only be true for Constant, ConstantType and Undefined cells,
-    // because we never store the_hole via this stub.
-    __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
-                             PropertyCellType::kConstant) |
-                             PropertyDetails::KindField::encode(kData));
-    __ B(eq, &done);
-    __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
-                             PropertyCellType::kConstantType) |
-                             PropertyDetails::KindField::encode(kData));
-    __ B(eq, &done);
-    __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
-                             PropertyCellType::kUndefined) |
-                             PropertyDetails::KindField::encode(kData));
-    __ Check(eq, kUnexpectedValue);
-    __ Bind(&done);
-  }
-  __ Ret();
-  __ Bind(&not_same_value);
-
-  // Check if PropertyCell contains data with constant type (and is not
-  // READ_ONLY).
-  __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
-                           PropertyCellType::kConstantType) |
-                           PropertyDetails::KindField::encode(kData));
-  __ B(ne, &slow_case);
-
-  // Now either both old and new values must be smis or both must be heap
-  // objects with same map.
-  Label value_is_heap_object;
-  __ JumpIfNotSmi(value, &value_is_heap_object);
-  __ JumpIfNotSmi(cell_value, &slow_case);
-  // Old and new values are smis, no need for a write barrier here.
-  __ Bind(&fast_smi_case);
-  __ Str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
-  __ Ret();
-
-  __ Bind(&value_is_heap_object);
-  __ JumpIfSmi(cell_value, &slow_case);
-
-  __ Ldr(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
-  __ Ldr(value_map, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ Cmp(cell_value_map, value_map);
-  __ B(eq, &fast_heapobject_case);
-
-  // Fall back to the runtime.
-  __ Bind(&slow_case);
-  __ SmiTag(slot);
-  __ Push(slot, value);
-  __ TailCallRuntime(is_strict(language_mode())
-                         ? Runtime::kStoreGlobalViaContext_Strict
-                         : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
-
 // The number of register that CallApiFunctionAndReturn will need to save on
 // the stack. The space for these registers need to be allocated in the
 // ExitFrame before calling CallApiFunctionAndReturn.
@@ -5202,7 +4870,7 @@
   __ Mov(scratch2, Operand(ExternalReference::isolate_address(isolate())));
   __ Ldr(scratch3, FieldMemOperand(callback, AccessorInfo::kDataOffset));
   __ Push(scratch3, scratch, scratch, scratch2, holder);
-  __ Push(Smi::FromInt(0));  // should_throw_on_error -> false
+  __ Push(Smi::kZero);  // should_throw_on_error -> false
   __ Ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
   __ Push(scratch);
 
diff --git a/src/arm64/interface-descriptors-arm64.cc b/src/arm64/interface-descriptors-arm64.cc
index d7bc3de..13ecc2b 100644
--- a/src/arm64/interface-descriptors-arm64.cc
+++ b/src/arm64/interface-descriptors-arm64.cc
@@ -31,9 +31,9 @@
 const Register LoadDescriptor::NameRegister() { return x2; }
 const Register LoadDescriptor::SlotRegister() { return x0; }
 
-
 const Register LoadWithVectorDescriptor::VectorRegister() { return x3; }
 
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return x4; }
 
 const Register StoreDescriptor::ReceiverRegister() { return x1; }
 const Register StoreDescriptor::NameRegister() { return x2; }
@@ -46,10 +46,6 @@
 const Register StoreTransitionDescriptor::VectorRegister() { return x3; }
 const Register StoreTransitionDescriptor::MapRegister() { return x5; }
 
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return x2; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return x0; }
-
-
 const Register StringCompareDescriptor::LeftRegister() { return x1; }
 const Register StringCompareDescriptor::RightRegister() { return x0; }
 
@@ -181,7 +177,7 @@
 
 void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {x1, x3, x2};
+  Register registers[] = {x1, x0, x3, x2};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -230,16 +226,6 @@
 }
 
 
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // x2: length
-  // x1: index (of last match)
-  // x0: string
-  Register registers[] = {x2, x1, x0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 void TransitionElementsKindDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // x0: value (js_array)
diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc
index 87ea1eb..bc7a281 100644
--- a/src/arm64/macro-assembler-arm64.cc
+++ b/src/arm64/macro-assembler-arm64.cc
@@ -1532,11 +1532,12 @@
   ExternalReference new_space_allocation_top_adr =
       ExternalReference::new_space_allocation_top_address(isolate());
   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
-  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+  const int kMementoLastWordOffset =
+      kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
 
   // Bail out if the object is not in new space.
   JumpIfNotInNewSpace(receiver, no_memento_found);
-  Add(scratch1, receiver, kMementoEndOffset);
+  Add(scratch1, receiver, kMementoLastWordOffset);
   // If the object is in new space, we need to check whether it is on the same
   // page as the current top.
   Mov(scratch2, new_space_allocation_top_adr);
@@ -1558,7 +1559,7 @@
   Mov(scratch2, new_space_allocation_top_adr);
   Ldr(scratch2, MemOperand(scratch2));
   Cmp(scratch1, scratch2);
-  B(gt, no_memento_found);
+  B(ge, no_memento_found);
   // Memento map check.
   bind(&map_check);
   Ldr(scratch1, MemOperand(receiver, kMementoMapOffset));
@@ -2169,62 +2170,6 @@
   Fcvtnu(output, dbl_scratch);
 }
 
-
-void MacroAssembler::CopyBytes(Register dst,
-                               Register src,
-                               Register length,
-                               Register scratch,
-                               CopyHint hint) {
-  UseScratchRegisterScope temps(this);
-  Register tmp1 = temps.AcquireX();
-  Register tmp2 = temps.AcquireX();
-  DCHECK(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
-  DCHECK(!AreAliased(src, dst, csp));
-
-  if (emit_debug_code()) {
-    // Check copy length.
-    Cmp(length, 0);
-    Assert(ge, kUnexpectedNegativeValue);
-
-    // Check src and dst buffers don't overlap.
-    Add(scratch, src, length);  // Calculate end of src buffer.
-    Cmp(scratch, dst);
-    Add(scratch, dst, length);  // Calculate end of dst buffer.
-    Ccmp(scratch, src, ZFlag, gt);
-    Assert(le, kCopyBuffersOverlap);
-  }
-
-  Label short_copy, short_loop, bulk_loop, done;
-
-  if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
-    Register bulk_length = scratch;
-    int pair_size = 2 * kXRegSize;
-    int pair_mask = pair_size - 1;
-
-    Bic(bulk_length, length, pair_mask);
-    Cbz(bulk_length, &short_copy);
-    Bind(&bulk_loop);
-    Sub(bulk_length, bulk_length, pair_size);
-    Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
-    Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
-    Cbnz(bulk_length, &bulk_loop);
-
-    And(length, length, pair_mask);
-  }
-
-  Bind(&short_copy);
-  Cbz(length, &done);
-  Bind(&short_loop);
-  Sub(length, length, 1);
-  Ldrb(tmp1, MemOperand(src, 1, PostIndex));
-  Strb(tmp1, MemOperand(dst, 1, PostIndex));
-  Cbnz(length, &short_loop);
-
-
-  Bind(&done);
-}
-
-
 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
                                                 Register end_address,
                                                 Register filler) {
@@ -3719,20 +3664,6 @@
   }
 }
 
-
-void MacroAssembler::CheckFastElements(Register map,
-                                       Register scratch,
-                                       Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  STATIC_ASSERT(FAST_ELEMENTS == 2);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
-  Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
-  Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
-  B(hi, fail);
-}
-
-
 void MacroAssembler::CheckFastObjectElements(Register map,
                                              Register scratch,
                                              Label* fail) {
@@ -3790,19 +3721,6 @@
   return has_frame_ || !stub->SometimesSetsUpAFrame();
 }
 
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
-  // If the hash field contains an array index pick it out. The assert checks
-  // that the constants for the maximum number of digits for an array index
-  // cached in the hash field and the number of bits reserved for it does not
-  // conflict.
-  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
-         (1 << String::kArrayIndexValueBits));
-  DecodeField<String::ArrayIndexValueBits>(index, hash);
-  SmiTag(index, index);
-}
-
-
 void MacroAssembler::EmitSeqStringSetCharCheck(
     Register string,
     Register index,
@@ -3830,85 +3748,12 @@
   Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
   Check(lt, kIndexIsTooLarge);
 
-  DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+  DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
   Cmp(index, 0);
   Check(ge, kIndexIsNegative);
 }
 
 
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
-                                            Register scratch1,
-                                            Register scratch2,
-                                            Label* miss) {
-  DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
-  Label same_contexts;
-
-  // Load current lexical context from the active StandardFrame, which
-  // may require crawling past STUB frames.
-  Label load_context;
-  Label has_context;
-  Mov(scratch2, fp);
-  bind(&load_context);
-  Ldr(scratch1,
-      MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
-  JumpIfNotSmi(scratch1, &has_context);
-  Ldr(scratch2, MemOperand(scratch2, CommonFrameConstants::kCallerFPOffset));
-  B(&load_context);
-  bind(&has_context);
-
-  // In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
-  Cmp(scratch1, 0);
-  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
-#endif
-
-  // Load the native context of the current context.
-  Ldr(scratch1, ContextMemOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
-
-  // Check the context is a native context.
-  if (emit_debug_code()) {
-    // Read the first word and compare to the native_context_map.
-    Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
-    CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
-    Check(eq, kExpectedNativeContext);
-  }
-
-  // Check if both contexts are the same.
-  Ldr(scratch2, FieldMemOperand(holder_reg,
-                                JSGlobalProxy::kNativeContextOffset));
-  Cmp(scratch1, scratch2);
-  B(&same_contexts, eq);
-
-  // Check the context is a native context.
-  if (emit_debug_code()) {
-    // We're short on scratch registers here, so use holder_reg as a scratch.
-    Push(holder_reg);
-    Register scratch3 = holder_reg;
-
-    CompareRoot(scratch2, Heap::kNullValueRootIndex);
-    Check(ne, kExpectedNonNullContext);
-
-    Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
-    CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
-    Check(eq, kExpectedNativeContext);
-    Pop(holder_reg);
-  }
-
-  // Check that the security token in the calling global object is
-  // compatible with the security token in the receiving global
-  // object.
-  int token_offset = Context::kHeaderSize +
-                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
-  Ldr(scratch1, FieldMemOperand(scratch1, token_offset));
-  Ldr(scratch2, FieldMemOperand(scratch2, token_offset));
-  Cmp(scratch1, scratch2);
-  B(miss, ne);
-
-  Bind(&same_contexts);
-}
-
-
 // Compute the hash code from the untagged key. This must be kept in sync with
 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
 // code-stub-hydrogen.cc
@@ -3944,69 +3789,6 @@
   Bic(key, key, Operand(0xc0000000u));
 }
 
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
-                                              Register elements,
-                                              Register key,
-                                              Register result,
-                                              Register scratch0,
-                                              Register scratch1,
-                                              Register scratch2,
-                                              Register scratch3) {
-  DCHECK(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
-
-  Label done;
-
-  SmiUntag(scratch0, key);
-  GetNumberHash(scratch0, scratch1);
-
-  // Compute the capacity mask.
-  Ldrsw(scratch1,
-        UntagSmiFieldMemOperand(elements,
-                                SeededNumberDictionary::kCapacityOffset));
-  Sub(scratch1, scratch1, 1);
-
-  // Generate an unrolled loop that performs a few probes before giving up.
-  for (int i = 0; i < kNumberDictionaryProbes; i++) {
-    // Compute the masked index: (hash + i + i * i) & mask.
-    if (i > 0) {
-      Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
-    } else {
-      Mov(scratch2, scratch0);
-    }
-    And(scratch2, scratch2, scratch1);
-
-    // Scale the index by multiplying by the element size.
-    DCHECK(SeededNumberDictionary::kEntrySize == 3);
-    Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
-
-    // Check if the key is identical to the name.
-    Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
-    Ldr(scratch3,
-        FieldMemOperand(scratch2,
-                        SeededNumberDictionary::kElementsStartOffset));
-    Cmp(key, scratch3);
-    if (i != (kNumberDictionaryProbes - 1)) {
-      B(eq, &done);
-    } else {
-      B(ne, miss);
-    }
-  }
-
-  Bind(&done);
-  // Check that the value is a field property.
-  const int kDetailsOffset =
-      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
-  DCHECK_EQ(DATA, 0);
-  TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
-
-  // Get the value at the masked, scaled index and return.
-  const int kValueOffset =
-      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
-  Ldr(result, FieldMemOperand(scratch2, kValueOffset));
-}
-
 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
                                                Register code_entry,
                                                Register scratch) {
diff --git a/src/arm64/macro-assembler-arm64.h b/src/arm64/macro-assembler-arm64.h
index 37e9926..a89c106 100644
--- a/src/arm64/macro-assembler-arm64.h
+++ b/src/arm64/macro-assembler-arm64.h
@@ -1099,16 +1099,6 @@
   void InitializeFieldsWithFiller(Register current_address,
                                   Register end_address, Register filler);
 
-  // Copies a number of bytes from src to dst. All passed registers are
-  // clobbered. On exit src and dst will point to the place just after where the
-  // last byte was read or written and length will be zero. Hint may be used to
-  // determine which is the most efficient algorithm to use for copying.
-  void CopyBytes(Register dst,
-                 Register src,
-                 Register length,
-                 Register scratch,
-                 CopyHint hint = kCopyUnknown);
-
   // ---- String Utilities ----
 
 
@@ -1576,10 +1566,6 @@
                     Label* if_any_set,
                     Label* fall_through);
 
-  // Check if a map for a JSObject indicates that the object has fast elements.
-  // Jump to the specified label if it does not.
-  void CheckFastElements(Register map, Register scratch, Label* fail);
-
   // Check if a map for a JSObject indicates that the object can have both smi
   // and HeapObject elements.  Jump to the specified label if it does not.
   void CheckFastObjectElements(Register map, Register scratch, Label* fail);
@@ -1595,12 +1581,6 @@
                                    Label* fail,
                                    int elements_offset = 0);
 
-  // Picks out an array index from the hash field.
-  // Register use:
-  //   hash - holds the index's hash. Clobbered.
-  //   index - holds the overwritten index on exit.
-  void IndexFromHash(Register hash, Register index);
-
   // ---------------------------------------------------------------------------
   // Inline caching support.
 
@@ -1610,39 +1590,10 @@
                                  Register scratch,
                                  uint32_t encoding_mask);
 
-  // Generate code for checking access rights - used for security checks
-  // on access to global objects across environments. The holder register
-  // is left untouched, whereas both scratch registers are clobbered.
-  void CheckAccessGlobalProxy(Register holder_reg,
-                              Register scratch1,
-                              Register scratch2,
-                              Label* miss);
-
   // Hash the interger value in 'key' register.
   // It uses the same algorithm as ComputeIntegerHash in utils.h.
   void GetNumberHash(Register key, Register scratch);
 
-  // Load value from the dictionary.
-  //
-  // elements - holds the slow-case elements of the receiver on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // key      - holds the smi key on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // result   - holds the result on exit if the load succeeded.
-  //            Allowed to be the same as 'key' or 'result'.
-  //            Unchanged on bailout so 'key' or 'result' can be used
-  //            in further computation.
-  void LoadFromNumberDictionary(Label* miss,
-                                Register elements,
-                                Register key,
-                                Register result,
-                                Register scratch0,
-                                Register scratch1,
-                                Register scratch2,
-                                Register scratch3);
-
   // ---------------------------------------------------------------------------
   // Frames.
 
diff --git a/src/asmjs/asm-js.cc b/src/asmjs/asm-js.cc
index a1af1af..13f936d 100644
--- a/src/asmjs/asm-js.cc
+++ b/src/asmjs/asm-js.cc
@@ -20,6 +20,7 @@
 #include "src/wasm/wasm-js.h"
 #include "src/wasm/wasm-module-builder.h"
 #include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
 #include "src/wasm/wasm-result.h"
 
 typedef uint8_t byte;
@@ -162,11 +163,14 @@
   v8::internal::wasm::AsmWasmBuilder builder(info->isolate(), info->zone(),
                                              info->literal(), &typer);
   i::Handle<i::FixedArray> foreign_globals;
-  auto module = builder.Run(&foreign_globals);
+  auto asm_wasm_result = builder.Run(&foreign_globals);
+  wasm::ZoneBuffer* module = asm_wasm_result.module_bytes;
+  wasm::ZoneBuffer* asm_offsets = asm_wasm_result.asm_offset_table;
 
   i::MaybeHandle<i::JSObject> compiled = wasm::CreateModuleObjectFromBytes(
       info->isolate(), module->begin(), module->end(), &thrower,
-      internal::wasm::kAsmJsOrigin);
+      internal::wasm::kAsmJsOrigin, info->script(), asm_offsets->begin(),
+      asm_offsets->end());
   DCHECK(!compiled.is_null());
 
   wasm::AsmTyper::StdlibSet uses = typer.StdlibUses();
diff --git a/src/asmjs/asm-typer.cc b/src/asmjs/asm-typer.cc
index 94cc4db..55b5fc7 100644
--- a/src/asmjs/asm-typer.cc
+++ b/src/asmjs/asm-typer.cc
@@ -567,7 +567,7 @@
   module_name_ = fun->name();
 
   // Allowed parameters: Stdlib, FFI, Mem
-  static const uint32_t MaxModuleParameters = 3;
+  static const int MaxModuleParameters = 3;
   if (scope->num_parameters() > MaxModuleParameters) {
     FAIL(fun, "asm.js modules may not have more than three parameters.");
   }
@@ -647,11 +647,8 @@
     FAIL(current, "Invalid top-level statement in asm.js module.");
   }
 
-  ZoneList<Declaration*>* decls = scope->declarations();
-
-  for (int ii = 0; ii < decls->length(); ++ii) {
-    Declaration* decl = decls->at(ii);
-
+  Declaration::List* decls = scope->declarations();
+  for (Declaration* decl : *decls) {
     if (FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration()) {
       RECURSE(ValidateFunction(fun_decl));
       source_layout.AddFunction(*fun_decl);
@@ -664,9 +661,7 @@
     source_layout.AddTable(*function_table);
   }
 
-  for (int ii = 0; ii < decls->length(); ++ii) {
-    Declaration* decl = decls->at(ii);
-
+  for (Declaration* decl : *decls) {
     if (decl->IsFunctionDeclaration()) {
       continue;
     }
@@ -762,9 +757,15 @@
   bool global_variable = false;
   if (value->IsLiteral() || value->IsCall()) {
     AsmType* type = nullptr;
-    RECURSE(type = VariableTypeAnnotations(value, true));
+    VariableInfo::Mutability mutability;
+    if (target_variable->mode() == CONST) {
+      mutability = VariableInfo::kConstGlobal;
+    } else {
+      mutability = VariableInfo::kMutableGlobal;
+    }
+    RECURSE(type = VariableTypeAnnotations(value, mutability));
     target_info = new (zone_) VariableInfo(type);
-    target_info->set_mutability(VariableInfo::kMutableGlobal);
+    target_info->set_mutability(mutability);
     global_variable = true;
   } else if (value->IsProperty()) {
     target_info = ImportLookup(value->AsProperty());
@@ -828,6 +829,23 @@
     RECURSE(type = NewHeapView(value->AsCallNew()));
     target_info = new (zone_) VariableInfo(type);
     target_info->set_mutability(VariableInfo::kImmutableGlobal);
+  } else if (auto* proxy = value->AsVariableProxy()) {
+    auto* var_info = Lookup(proxy->var());
+
+    if (var_info == nullptr) {
+      FAIL(value, "Undeclared identifier in global initializer");
+    }
+
+    if (var_info->mutability() != VariableInfo::kConstGlobal) {
+      FAIL(value, "Identifier used to initialize a global must be a const");
+    }
+
+    target_info = new (zone_) VariableInfo(var_info->type());
+    if (target_variable->mode() == CONST) {
+      target_info->set_mutability(VariableInfo::kConstGlobal);
+    } else {
+      target_info->set_mutability(VariableInfo::kMutableGlobal);
+    }
   }
 
   if (target_info == nullptr) {
@@ -999,7 +1017,7 @@
     FAIL(assign, "Identifier redefined (function table name).");
   }
 
-  if (target_info_table->length() != pointers->length()) {
+  if (static_cast<int>(target_info_table->length()) != pointers->length()) {
     FAIL(assign, "Function table size mismatch.");
   }
 
@@ -1053,7 +1071,7 @@
     }
     auto* param = proxy->var();
     if (param->location() != VariableLocation::PARAMETER ||
-        param->index() != annotated_parameters) {
+        param->index() != static_cast<int>(annotated_parameters)) {
       // Done with parameters.
       break;
     }
@@ -1075,7 +1093,7 @@
     SetTypeOf(expr, type);
   }
 
-  if (annotated_parameters != fun->parameter_count()) {
+  if (static_cast<int>(annotated_parameters) != fun->parameter_count()) {
     FAIL(fun_decl, "Incorrect parameter type annotations.");
   }
 
@@ -1138,7 +1156,7 @@
 
   DCHECK(return_type_->IsReturnType());
 
-  for (auto* decl : *fun->scope()->declarations()) {
+  for (Declaration* decl : *fun->scope()->declarations()) {
     auto* var_decl = decl->AsVariableDeclaration();
     if (var_decl == nullptr) {
       FAIL(decl, "Functions may only define inner variables.");
@@ -1642,7 +1660,15 @@
   auto* right = comma->right();
   AsmType* right_type = nullptr;
   if (auto* right_as_call = right->AsCall()) {
-    RECURSE(right_type = ValidateCall(AsmType::Void(), right_as_call));
+    RECURSE(right_type = ValidateFloatCoercion(right_as_call));
+    if (right_type != AsmType::Float()) {
+      // right_type == nullptr <-> right_as_call is not a call to fround.
+      DCHECK(right_type == nullptr);
+      RECURSE(right_type = ValidateCall(AsmType::Void(), right_as_call));
+      // Unnanotated function call to something that's not fround must be a call
+      // to a void function.
+      DCHECK_EQ(right_type, AsmType::Void());
+    }
   } else {
     RECURSE(right_type = ValidateExpression(right));
   }
@@ -1674,7 +1700,7 @@
     if (!literal->value()->ToInt32(&value)) {
       FAIL(literal, "Integer literal is out of range.");
     }
-    // *VIOLATION* Not really a violation, but rather a different in the
+    // *VIOLATION* Not really a violation, but rather a difference in
     // validation. The spec handles -NumericLiteral in ValidateUnaryExpression,
     // but V8's AST represents the negative literals as Literals.
     return AsmType::Signed();
@@ -2676,13 +2702,31 @@
     FAIL(statement, "Invalid literal in return statement.");
   }
 
+  if (auto* proxy = ret_expr->AsVariableProxy()) {
+    auto* var_info = Lookup(proxy->var());
+
+    if (var_info == nullptr) {
+      FAIL(statement, "Undeclared identifier in return statement.");
+    }
+
+    if (var_info->mutability() != VariableInfo::kConstGlobal) {
+      FAIL(statement, "Identifier in return statement is not const.");
+    }
+
+    if (!var_info->type()->IsReturnType()) {
+      FAIL(statement, "Constant in return must be signed, float, or double.");
+    }
+
+    return var_info->type();
+  }
+
   FAIL(statement, "Invalid return type expression.");
 }
 
 // 5.4 VariableTypeAnnotations
 // Also used for 5.5 GlobalVariableTypeAnnotations
-AsmType* AsmTyper::VariableTypeAnnotations(Expression* initializer,
-                                           bool global) {
+AsmType* AsmTyper::VariableTypeAnnotations(
+    Expression* initializer, VariableInfo::Mutability mutability_type) {
   if (auto* literal = initializer->AsLiteral()) {
     if (literal->raw_value()->ContainsDot()) {
       SetTypeOf(initializer, AsmType::Double());
@@ -2690,24 +2734,50 @@
     }
     int32_t i32;
     uint32_t u32;
+
+    AsmType* initializer_type = nullptr;
     if (literal->value()->ToUint32(&u32)) {
       if (u32 > LargestFixNum) {
-        SetTypeOf(initializer, AsmType::Unsigned());
+        initializer_type = AsmType::Unsigned();
+        SetTypeOf(initializer, initializer_type);
       } else {
-        SetTypeOf(initializer, AsmType::FixNum());
+        initializer_type = AsmType::FixNum();
+        SetTypeOf(initializer, initializer_type);
+        initializer_type = AsmType::Signed();
       }
     } else if (literal->value()->ToInt32(&i32)) {
-      SetTypeOf(initializer, AsmType::Signed());
+      initializer_type = AsmType::Signed();
+      SetTypeOf(initializer, initializer_type);
     } else {
       FAIL(initializer, "Invalid type annotation - forbidden literal.");
     }
-    return AsmType::Int();
+    if (mutability_type != VariableInfo::kConstGlobal) {
+      return AsmType::Int();
+    }
+    return initializer_type;
+  }
+
+  if (auto* proxy = initializer->AsVariableProxy()) {
+    auto* var_info = Lookup(proxy->var());
+
+    if (var_info == nullptr) {
+      FAIL(initializer,
+           "Undeclared identifier in variable declaration initializer.");
+    }
+
+    if (var_info->mutability() != VariableInfo::kConstGlobal) {
+      FAIL(initializer,
+           "Identifier in variable declaration initializer must be const.");
+    }
+
+    SetTypeOf(initializer, var_info->type());
+    return var_info->type();
   }
 
   auto* call = initializer->AsCall();
   if (call == nullptr) {
     FAIL(initializer,
-         "Invalid variable initialization - it should be a literal, or "
+         "Invalid variable initialization - it should be a literal, const, or "
          "fround(literal).");
   }
 
@@ -2724,7 +2794,7 @@
   }
 
   // Float constants must contain dots in local, but not in globals.
-  if (!global) {
+  if (mutability_type == VariableInfo::kLocal) {
     if (!src_expr->raw_value()->ContainsDot()) {
       FAIL(initializer,
            "Invalid float type annotation - expected literal argument to be a "
diff --git a/src/asmjs/asm-typer.h b/src/asmjs/asm-typer.h
index 942ca21..2c66948 100644
--- a/src/asmjs/asm-typer.h
+++ b/src/asmjs/asm-typer.h
@@ -102,6 +102,13 @@
       kInvalidMutability,
       kLocal,
       kMutableGlobal,
+      // *VIOLATION* We support const variables in asm.js, as per the
+      //
+      // https://discourse.wicg.io/t/allow-const-global-variables/684
+      //
+      // Global const variables are treated as if they were numeric literals,
+      // and can be used anywhere a literal can be used.
+      kConstGlobal,
       kImmutableGlobal,
     };
 
@@ -114,7 +121,8 @@
     }
 
     bool IsGlobal() const {
-      return mutability_ == kImmutableGlobal || mutability_ == kMutableGlobal;
+      return mutability_ == kImmutableGlobal || mutability_ == kConstGlobal ||
+             mutability_ == kMutableGlobal;
     }
 
     bool IsStdlib() const { return standard_member_ == kStdlib; }
@@ -307,8 +315,9 @@
   AsmType* ReturnTypeAnnotations(ReturnStatement* statement);
   // 5.4 VariableTypeAnnotations
   // 5.5 GlobalVariableTypeAnnotations
-  AsmType* VariableTypeAnnotations(Expression* initializer,
-                                   bool global = false);
+  AsmType* VariableTypeAnnotations(
+      Expression* initializer,
+      VariableInfo::Mutability global = VariableInfo::kLocal);
   AsmType* ImportExpression(Property* import);
   AsmType* NewHeapView(CallNew* new_heap_view);
 
diff --git a/src/asmjs/asm-types.h b/src/asmjs/asm-types.h
index 6fe4201..882e328 100644
--- a/src/asmjs/asm-types.h
+++ b/src/asmjs/asm-types.h
@@ -7,7 +7,9 @@
 
 #include <string>
 
+#include "src/base/compiler-specific.h"
 #include "src/base/macros.h"
+#include "src/globals.h"
 #include "src/zone/zone-containers.h"
 #include "src/zone/zone.h"
 
@@ -92,7 +94,7 @@
   }
 
   static AsmType* New(bitset_t bits) {
-    DCHECK_EQ((bits & kAsmValueTypeTag), 0);
+    DCHECK_EQ((bits & kAsmValueTypeTag), 0u);
     return reinterpret_cast<AsmType*>(
         static_cast<uintptr_t>(bits | kAsmValueTypeTag));
   }
@@ -101,7 +103,7 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(AsmValueType);
 };
 
-class AsmCallableType : public ZoneObject {
+class V8_EXPORT_PRIVATE AsmCallableType : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   virtual std::string Name() = 0;
 
@@ -124,7 +126,7 @@
   DISALLOW_COPY_AND_ASSIGN(AsmCallableType);
 };
 
-class AsmFunctionType final : public AsmCallableType {
+class V8_EXPORT_PRIVATE AsmFunctionType final : public AsmCallableType {
  public:
   AsmFunctionType* AsFunctionType() final { return this; }
 
@@ -151,7 +153,8 @@
   DISALLOW_COPY_AND_ASSIGN(AsmFunctionType);
 };
 
-class AsmOverloadedFunctionType final : public AsmCallableType {
+class V8_EXPORT_PRIVATE AsmOverloadedFunctionType final
+    : public AsmCallableType {
  public:
   AsmOverloadedFunctionType* AsOverloadedFunctionType() override {
     return this;
@@ -173,7 +176,7 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(AsmOverloadedFunctionType);
 };
 
-class AsmFFIType final : public AsmCallableType {
+class V8_EXPORT_PRIVATE AsmFFIType final : public AsmCallableType {
  public:
   AsmFFIType* AsFFIType() override { return this; }
 
@@ -189,7 +192,7 @@
   DISALLOW_COPY_AND_ASSIGN(AsmFFIType);
 };
 
-class AsmFunctionTableType : public AsmCallableType {
+class V8_EXPORT_PRIVATE AsmFunctionTableType : public AsmCallableType {
  public:
   AsmFunctionTableType* AsFunctionTableType() override { return this; }
 
@@ -212,7 +215,7 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(AsmFunctionTableType);
 };
 
-class AsmType {
+class V8_EXPORT_PRIVATE AsmType {
  public:
 #define DEFINE_CONSTRUCTOR(CamelName, string_name, number, parent_types) \
   static AsmType* CamelName() {                                          \
diff --git a/src/asmjs/asm-wasm-builder.cc b/src/asmjs/asm-wasm-builder.cc
index 091f793..cac6fbd 100644
--- a/src/asmjs/asm-wasm-builder.cc
+++ b/src/asmjs/asm-wasm-builder.cc
@@ -12,13 +12,13 @@
 
 #include "src/asmjs/asm-types.h"
 #include "src/asmjs/asm-wasm-builder.h"
-#include "src/wasm/switch-logic.h"
+#include "src/asmjs/switch-logic.h"
+
 #include "src/wasm/wasm-macro-gen.h"
 #include "src/wasm/wasm-opcodes.h"
 
 #include "src/ast/ast.h"
 #include "src/ast/scopes.h"
-#include "src/codegen.h"
 
 namespace v8 {
 namespace internal {
@@ -81,14 +81,8 @@
          ++i) {
       b.AddParam(i->type);
     }
-    foreign_init_function_->SetExported();
-    std::string raw_name = "__foreign_init__";
-    foreign_init_function_->SetName(
-        AsmWasmBuilder::foreign_init_name,
-        static_cast<int>(strlen(AsmWasmBuilder::foreign_init_name)));
-
-    foreign_init_function_->SetName(raw_name.data(),
-                                    static_cast<int>(raw_name.size()));
+    foreign_init_function_->ExportAs(
+        CStrVector(AsmWasmBuilder::foreign_init_name));
     foreign_init_function_->SetSignature(b.Build());
     for (size_t pos = 0; pos < foreign_variables_.size(); ++pos) {
       foreign_init_function_->EmitGetLocal(static_cast<uint32_t>(pos));
@@ -563,10 +557,7 @@
       Variable* var = expr->var();
       DCHECK(var->is_function());
       WasmFunctionBuilder* function = LookupOrInsertFunction(var);
-      function->SetExported();
-      function->SetName(
-          AsmWasmBuilder::single_function_name,
-          static_cast<int>(strlen(AsmWasmBuilder::single_function_name)));
+      function->ExportAs(CStrVector(AsmWasmBuilder::single_function_name));
     }
   }
 
@@ -650,9 +641,9 @@
       const AstRawString* raw_name = name->AsRawPropertyName();
       if (var->is_function()) {
         WasmFunctionBuilder* function = LookupOrInsertFunction(var);
-        function->SetExported();
-        function->SetName(reinterpret_cast<const char*>(raw_name->raw_data()),
-                          raw_name->length());
+        function->Export();
+        function->SetName({reinterpret_cast<const char*>(raw_name->raw_data()),
+                           raw_name->length()});
       }
     }
   }
@@ -763,7 +754,7 @@
     }
   };
 
-  void EmitAssignmentLhs(Expression* target, MachineType* mtype) {
+  void EmitAssignmentLhs(Expression* target, AsmType** atype) {
     // Match the left hand side of the assignment.
     VariableProxy* target_var = target->AsVariableProxy();
     if (target_var != nullptr) {
@@ -774,7 +765,7 @@
     Property* target_prop = target->AsProperty();
     if (target_prop != nullptr) {
       // Left hand side is a property access, i.e. the asm.js heap.
-      VisitPropertyAndEmitIndex(target_prop, mtype);
+      VisitPropertyAndEmitIndex(target_prop, atype);
       return;
     }
 
@@ -822,7 +813,7 @@
     RECURSE(Visit(value));
   }
 
-  void EmitAssignment(Assignment* expr, MachineType type, ValueFate fate) {
+  void EmitAssignment(Assignment* expr, AsmType* type, ValueFate fate) {
     // Match the left hand side of the assignment.
     VariableProxy* target_var = expr->target()->AsVariableProxy();
     if (target_var != nullptr) {
@@ -857,21 +848,21 @@
       }
       // Note that unlike StoreMem, AsmjsStoreMem ignores out-of-bounds writes.
       WasmOpcode opcode;
-      if (type == MachineType::Int8()) {
+      if (type == AsmType::Int8Array()) {
         opcode = kExprI32AsmjsStoreMem8;
-      } else if (type == MachineType::Uint8()) {
+      } else if (type == AsmType::Uint8Array()) {
         opcode = kExprI32AsmjsStoreMem8;
-      } else if (type == MachineType::Int16()) {
+      } else if (type == AsmType::Int16Array()) {
         opcode = kExprI32AsmjsStoreMem16;
-      } else if (type == MachineType::Uint16()) {
+      } else if (type == AsmType::Uint16Array()) {
         opcode = kExprI32AsmjsStoreMem16;
-      } else if (type == MachineType::Int32()) {
+      } else if (type == AsmType::Int32Array()) {
         opcode = kExprI32AsmjsStoreMem;
-      } else if (type == MachineType::Uint32()) {
+      } else if (type == AsmType::Uint32Array()) {
         opcode = kExprI32AsmjsStoreMem;
-      } else if (type == MachineType::Float32()) {
+      } else if (type == AsmType::Float32Array()) {
         opcode = kExprF32AsmjsStoreMem;
-      } else if (type == MachineType::Float64()) {
+      } else if (type == AsmType::Float64Array()) {
         opcode = kExprF64AsmjsStoreMem;
       } else {
         UNREACHABLE();
@@ -938,12 +929,12 @@
     }
 
     if (as_init) LoadInitFunction();
-    MachineType mtype = MachineType::None();
+    AsmType* atype = AsmType::None();
     bool is_nop = false;
-    EmitAssignmentLhs(expr->target(), &mtype);
+    EmitAssignmentLhs(expr->target(), &atype);
     EmitAssignmentRhs(expr->target(), expr->value(), &is_nop);
     if (!is_nop) {
-      EmitAssignment(expr, mtype, fate);
+      EmitAssignment(expr, atype, fate);
     }
     if (as_init) UnLoadInitFunction();
   }
@@ -967,40 +958,10 @@
     }
   }
 
-  void VisitPropertyAndEmitIndex(Property* expr, MachineType* mtype) {
+  void VisitPropertyAndEmitIndex(Property* expr, AsmType** atype) {
     Expression* obj = expr->obj();
-    AsmType* type = typer_->TypeOf(obj);
-    int size;
-    if (type->IsA(AsmType::Uint8Array())) {
-      *mtype = MachineType::Uint8();
-      size = 1;
-    } else if (type->IsA(AsmType::Int8Array())) {
-      *mtype = MachineType::Int8();
-      size = 1;
-    } else if (type->IsA(AsmType::Uint16Array())) {
-      *mtype = MachineType::Uint16();
-      size = 2;
-    } else if (type->IsA(AsmType::Int16Array())) {
-      *mtype = MachineType::Int16();
-      size = 2;
-    } else if (type->IsA(AsmType::Uint32Array())) {
-      *mtype = MachineType::Uint32();
-      size = 4;
-    } else if (type->IsA(AsmType::Int32Array())) {
-      *mtype = MachineType::Int32();
-      size = 4;
-    } else if (type->IsA(AsmType::Uint32Array())) {
-      *mtype = MachineType::Uint32();
-      size = 4;
-    } else if (type->IsA(AsmType::Float32Array())) {
-      *mtype = MachineType::Float32();
-      size = 4;
-    } else if (type->IsA(AsmType::Float64Array())) {
-      *mtype = MachineType::Float64();
-      size = 8;
-    } else {
-      UNREACHABLE();
-    }
+    *atype = typer_->TypeOf(obj);
+    int size = (*atype)->ElementSizeInBytes();
     if (size == 1) {
       // Allow more general expression in byte arrays than the spec
       // strictly permits.
@@ -1038,24 +999,24 @@
   }
 
   void VisitProperty(Property* expr) {
-    MachineType type;
+    AsmType* type = AsmType::None();
     VisitPropertyAndEmitIndex(expr, &type);
     WasmOpcode opcode;
-    if (type == MachineType::Int8()) {
+    if (type == AsmType::Int8Array()) {
       opcode = kExprI32AsmjsLoadMem8S;
-    } else if (type == MachineType::Uint8()) {
+    } else if (type == AsmType::Uint8Array()) {
       opcode = kExprI32AsmjsLoadMem8U;
-    } else if (type == MachineType::Int16()) {
+    } else if (type == AsmType::Int16Array()) {
       opcode = kExprI32AsmjsLoadMem16S;
-    } else if (type == MachineType::Uint16()) {
+    } else if (type == AsmType::Uint16Array()) {
       opcode = kExprI32AsmjsLoadMem16U;
-    } else if (type == MachineType::Int32()) {
+    } else if (type == AsmType::Int32Array()) {
       opcode = kExprI32AsmjsLoadMem;
-    } else if (type == MachineType::Uint32()) {
+    } else if (type == AsmType::Uint32Array()) {
       opcode = kExprI32AsmjsLoadMem;
-    } else if (type == MachineType::Float32()) {
+    } else if (type == AsmType::Float32Array()) {
       opcode = kExprF32AsmjsLoadMem;
-    } else if (type == MachineType::Float64()) {
+    } else if (type == AsmType::Float64Array()) {
       opcode = kExprF64AsmjsLoadMem;
     } else {
       UNREACHABLE();
@@ -1367,11 +1328,13 @@
           uint32_t index = imported_function_table_.LookupOrInsertImport(
               vp->var(), sig.Build());
           VisitCallArgs(expr);
+          current_function_builder_->AddAsmWasmOffset(expr->position());
           current_function_builder_->Emit(kExprCallFunction);
           current_function_builder_->EmitVarInt(index);
         } else {
           WasmFunctionBuilder* function = LookupOrInsertFunction(vp->var());
           VisitCallArgs(expr);
+          current_function_builder_->AddAsmWasmOffset(expr->position());
           current_function_builder_->Emit(kExprCallFunction);
           current_function_builder_->EmitDirectCallIndex(
               function->func_index());
@@ -1397,8 +1360,10 @@
         VisitCallArgs(expr);
 
         current_function_builder_->EmitGetLocal(tmp.index());
+        current_function_builder_->AddAsmWasmOffset(expr->position());
         current_function_builder_->Emit(kExprCallIndirect);
         current_function_builder_->EmitVarInt(indices->signature_index);
+        current_function_builder_->EmitVarInt(0);  // table index
         returns_value =
             builder_->GetSignature(indices->signature_index)->return_count() >
             0;
@@ -1726,9 +1691,8 @@
 
   void VisitThisFunction(ThisFunction* expr) { UNREACHABLE(); }
 
-  void VisitDeclarations(ZoneList<Declaration*>* decls) {
-    for (int i = 0; i < decls->length(); ++i) {
-      Declaration* decl = decls->at(i);
+  void VisitDeclarations(Declaration::List* decls) {
+    for (Declaration* decl : *decls) {
       RECURSE(Visit(decl));
     }
   }
@@ -1821,8 +1785,8 @@
       entry = functions_.LookupOrInsert(v, ComputePointerHash(v),
                                         ZoneAllocationPolicy(zone()));
       function->SetName(
-          reinterpret_cast<const char*>(v->raw_name()->raw_data()),
-          v->raw_name()->length());
+          {reinterpret_cast<const char*>(v->raw_name()->raw_data()),
+           v->raw_name()->length()});
       entry->value = function;
     }
     return (reinterpret_cast<WasmFunctionBuilder*>(entry->value));
@@ -1878,13 +1842,16 @@
 
 // TODO(aseemgarg): probably should take zone (to write wasm to) as input so
 // that zone in constructor may be thrown away once wasm module is written.
-ZoneBuffer* AsmWasmBuilder::Run(i::Handle<i::FixedArray>* foreign_args) {
+AsmWasmBuilder::Result AsmWasmBuilder::Run(
+    i::Handle<i::FixedArray>* foreign_args) {
   AsmWasmBuilderImpl impl(isolate_, zone_, literal_, typer_);
   impl.Build();
   *foreign_args = impl.GetForeignArgs();
-  ZoneBuffer* buffer = new (zone_) ZoneBuffer(zone_);
-  impl.builder_->WriteTo(*buffer);
-  return buffer;
+  ZoneBuffer* module_buffer = new (zone_) ZoneBuffer(zone_);
+  impl.builder_->WriteTo(*module_buffer);
+  ZoneBuffer* asm_offsets_buffer = new (zone_) ZoneBuffer(zone_);
+  impl.builder_->WriteAsmJsOffsetTable(*asm_offsets_buffer);
+  return {module_buffer, asm_offsets_buffer};
 }
 
 const char* AsmWasmBuilder::foreign_init_name = "__foreign_init__";
diff --git a/src/asmjs/asm-wasm-builder.h b/src/asmjs/asm-wasm-builder.h
index 9f85dfa..f234abd 100644
--- a/src/asmjs/asm-wasm-builder.h
+++ b/src/asmjs/asm-wasm-builder.h
@@ -20,9 +20,14 @@
 
 class AsmWasmBuilder {
  public:
+  struct Result {
+    ZoneBuffer* module_bytes;
+    ZoneBuffer* asm_offset_table;
+  };
+
   explicit AsmWasmBuilder(Isolate* isolate, Zone* zone, FunctionLiteral* root,
                           AsmTyper* typer);
-  ZoneBuffer* Run(Handle<FixedArray>* foreign_args);
+  Result Run(Handle<FixedArray>* foreign_args);
 
   static const char* foreign_init_name;
   static const char* single_function_name;
diff --git a/src/wasm/switch-logic.cc b/src/asmjs/switch-logic.cc
similarity index 97%
rename from src/wasm/switch-logic.cc
rename to src/asmjs/switch-logic.cc
index 9ebc0b3..93544da 100644
--- a/src/wasm/switch-logic.cc
+++ b/src/asmjs/switch-logic.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/wasm/switch-logic.h"
+#include "src/asmjs/switch-logic.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/wasm/switch-logic.h b/src/asmjs/switch-logic.h
similarity index 86%
rename from src/wasm/switch-logic.h
rename to src/asmjs/switch-logic.h
index 160e0d6..4e967ae 100644
--- a/src/wasm/switch-logic.h
+++ b/src/asmjs/switch-logic.h
@@ -5,6 +5,7 @@
 #ifndef V8_WASM_SWITCH_LOGIC_H
 #define V8_WASM_SWITCH_LOGIC_H
 
+#include "src/globals.h"
 #include "src/zone/zone-containers.h"
 #include "src/zone/zone.h"
 
@@ -23,7 +24,7 @@
   }
 };
 
-CaseNode* OrderCases(ZoneVector<int>* cases, Zone* zone);
+V8_EXPORT_PRIVATE CaseNode* OrderCases(ZoneVector<int>* cases, Zone* zone);
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/assembler.cc b/src/assembler.cc
index b44bc06..a2c0ebe 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -351,10 +351,8 @@
                                            icache_flush_mode);
   } else if (IsWasmMemorySizeReference(rmode_)) {
     uint32_t current_size_reference = wasm_memory_size_reference();
-    DCHECK(old_size == 0 || current_size_reference <= old_size);
-    uint32_t offset = old_size - current_size_reference;
-    DCHECK_GE(new_size, offset);
-    uint32_t updated_size_reference = new_size - offset;
+    uint32_t updated_size_reference =
+        new_size + (current_size_reference - old_size);
     unchecked_update_wasm_memory_size(updated_size_reference,
                                       icache_flush_mode);
   } else {
@@ -762,8 +760,10 @@
       return "internal reference";
     case INTERNAL_REFERENCE_ENCODED:
       return "encoded internal reference";
-    case DEOPT_POSITION:
-      return "deopt position";
+    case DEOPT_SCRIPT_OFFSET:
+      return "deopt script offset";
+    case DEOPT_INLINING_ID:
+      return "deopt inlining id";
     case DEOPT_REASON:
       return "deopt reason";
     case DEOPT_ID:
@@ -803,7 +803,7 @@
   os << static_cast<const void*>(pc_) << "  " << RelocModeName(rmode_);
   if (IsComment(rmode_)) {
     os << "  (" << reinterpret_cast<char*>(data_) << ")";
-  } else if (rmode_ == DEOPT_POSITION) {
+  } else if (rmode_ == DEOPT_SCRIPT_OFFSET || rmode_ == DEOPT_INLINING_ID) {
     os << "  (" << data() << ")";
   } else if (rmode_ == DEOPT_REASON) {
     os << "  ("
@@ -874,7 +874,8 @@
     case RUNTIME_ENTRY:
     case COMMENT:
     case EXTERNAL_REFERENCE:
-    case DEOPT_POSITION:
+    case DEOPT_SCRIPT_OFFSET:
+    case DEOPT_INLINING_ID:
     case DEOPT_REASON:
     case DEOPT_ID:
     case CONST_POOL:
@@ -1216,19 +1217,6 @@
       Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal)));
 }
 
-
-ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
-  return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
-}
-
-
-ExternalReference ExternalReference::keyed_lookup_cache_field_offsets(
-    Isolate* isolate) {
-  return ExternalReference(
-      isolate->keyed_lookup_cache()->field_offsets_address());
-}
-
-
 ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
   return ExternalReference(isolate->heap()->roots_array_start());
 }
@@ -1906,11 +1894,12 @@
 
 // Platform specific but identical code for all the platforms.
 
-void Assembler::RecordDeoptReason(DeoptimizeReason reason, int raw_position,
-                                  int id) {
+void Assembler::RecordDeoptReason(DeoptimizeReason reason,
+                                  SourcePosition position, int id) {
   if (FLAG_trace_deopt || isolate()->is_profiling()) {
     EnsureSpace ensure_space(this);
-    RecordRelocInfo(RelocInfo::DEOPT_POSITION, raw_position);
+    RecordRelocInfo(RelocInfo::DEOPT_SCRIPT_OFFSET, position.ScriptOffset());
+    RecordRelocInfo(RelocInfo::DEOPT_INLINING_ID, position.InliningId());
     RecordRelocInfo(RelocInfo::DEOPT_REASON, static_cast<int>(reason));
     RecordRelocInfo(RelocInfo::DEOPT_ID, id);
   }
diff --git a/src/assembler.h b/src/assembler.h
index a925032..2169b15 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -38,6 +38,7 @@
 #include "src/allocation.h"
 #include "src/builtins/builtins.h"
 #include "src/deoptimize-reason.h"
+#include "src/globals.h"
 #include "src/isolate.h"
 #include "src/log.h"
 #include "src/register-configuration.h"
@@ -51,6 +52,7 @@
 namespace internal {
 
 // Forward declarations.
+class SourcePosition;
 class StatsCounter;
 
 // -----------------------------------------------------------------------------
@@ -419,9 +421,10 @@
     CONST_POOL,
     VENEER_POOL,
 
-    DEOPT_POSITION,  // Deoptimization source position.
-    DEOPT_REASON,    // Deoptimization reason index.
-    DEOPT_ID,        // Deoptimization inlining id.
+    DEOPT_SCRIPT_OFFSET,
+    DEOPT_INLINING_ID,  // Deoptimization source position.
+    DEOPT_REASON,       // Deoptimization reason index.
+    DEOPT_ID,           // Deoptimization inlining id.
 
     // This is not an actual reloc mode, but used to encode a long pc jump that
     // cannot be encoded as part of another record.
@@ -479,7 +482,7 @@
     return mode == VENEER_POOL;
   }
   static inline bool IsDeoptPosition(Mode mode) {
-    return mode == DEOPT_POSITION;
+    return mode == DEOPT_SCRIPT_OFFSET || mode == DEOPT_INLINING_ID;
   }
   static inline bool IsDeoptReason(Mode mode) {
     return mode == DEOPT_REASON;
@@ -950,10 +953,6 @@
   static ExternalReference log_enter_external_function(Isolate* isolate);
   static ExternalReference log_leave_external_function(Isolate* isolate);
 
-  // Static data in the keyed lookup cache.
-  static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
-  static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
-
   // Static variable Heap::roots_array_start()
   static ExternalReference roots_array_start(Isolate* isolate);
 
@@ -961,7 +960,8 @@
   static ExternalReference allocation_sites_list_address(Isolate* isolate);
 
   // Static variable StackGuard::address_of_jslimit()
-  static ExternalReference address_of_stack_limit(Isolate* isolate);
+  V8_EXPORT_PRIVATE static ExternalReference address_of_stack_limit(
+      Isolate* isolate);
 
   // Static variable StackGuard::address_of_real_jslimit()
   static ExternalReference address_of_real_stack_limit(Isolate* isolate);
@@ -1047,7 +1047,8 @@
   static ExternalReference invoke_function_callback(Isolate* isolate);
   static ExternalReference invoke_accessor_getter_callback(Isolate* isolate);
 
-  static ExternalReference runtime_function_table_address(Isolate* isolate);
+  V8_EXPORT_PRIVATE static ExternalReference runtime_function_table_address(
+      Isolate* isolate);
 
   Address address() const { return reinterpret_cast<Address>(address_); }
 
@@ -1107,12 +1108,12 @@
   void* address_;
 };
 
-bool operator==(ExternalReference, ExternalReference);
+V8_EXPORT_PRIVATE bool operator==(ExternalReference, ExternalReference);
 bool operator!=(ExternalReference, ExternalReference);
 
 size_t hash_value(ExternalReference);
 
-std::ostream& operator<<(std::ostream&, ExternalReference);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ExternalReference);
 
 // -----------------------------------------------------------------------------
 // Utility functions
diff --git a/src/ast/ast-expression-rewriter.cc b/src/ast/ast-expression-rewriter.cc
index c4fa71b..d0db9ea 100644
--- a/src/ast/ast-expression-rewriter.cc
+++ b/src/ast/ast-expression-rewriter.cc
@@ -19,11 +19,10 @@
   } while (false)
 #define NOTHING() DCHECK_NULL(replacement_)
 
-
-void AstExpressionRewriter::VisitDeclarations(
-    ZoneList<Declaration*>* declarations) {
-  for (int i = 0; i < declarations->length(); i++) {
-    AST_REWRITE_LIST_ELEMENT(Declaration, declarations, i);
+void AstExpressionRewriter::VisitDeclarations(Declaration::List* declarations) {
+  for (Declaration::List::Iterator it = declarations->begin();
+       it != declarations->end(); ++it) {
+    AST_REWRITE(Declaration, *it, it = replacement);
   }
 }
 
diff --git a/src/ast/ast-expression-rewriter.h b/src/ast/ast-expression-rewriter.h
index dfed3e1..26eef24 100644
--- a/src/ast/ast-expression-rewriter.h
+++ b/src/ast/ast-expression-rewriter.h
@@ -29,7 +29,7 @@
   }
   virtual ~AstExpressionRewriter() {}
 
-  virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
+  virtual void VisitDeclarations(Declaration::List* declarations);
   virtual void VisitStatements(ZoneList<Statement*>* statements);
   virtual void VisitExpressions(ZoneList<Expression*>* expressions);
 
diff --git a/src/ast/ast-numbering.cc b/src/ast/ast-numbering.cc
index e1b11f6..82f9767 100644
--- a/src/ast/ast-numbering.cc
+++ b/src/ast/ast-numbering.cc
@@ -37,7 +37,7 @@
   void VisitReference(Expression* expr);
 
   void VisitStatements(ZoneList<Statement*>* statements);
-  void VisitDeclarations(ZoneList<Declaration*>* declarations);
+  void VisitDeclarations(Declaration::List* declarations);
   void VisitArguments(ZoneList<Expression*>* arguments);
   void VisitLiteralProperty(LiteralProperty* property);
 
@@ -147,8 +147,15 @@
 
 void AstNumberingVisitor::VisitVariableProxyReference(VariableProxy* node) {
   IncrementNodeCount();
-  if (node->var()->IsLookupSlot()) {
-    DisableCrankshaft(kReferenceToAVariableWhichRequiresDynamicLookup);
+  switch (node->var()->location()) {
+    case VariableLocation::LOOKUP:
+      DisableCrankshaft(kReferenceToAVariableWhichRequiresDynamicLookup);
+      break;
+    case VariableLocation::MODULE:
+      DisableCrankshaft(kReferenceToModuleVariable);
+      break;
+    default:
+      break;
   }
   node->set_base_id(ReserveIdRange(VariableProxy::num_ids()));
 }
@@ -547,12 +554,8 @@
   }
 }
 
-
-void AstNumberingVisitor::VisitDeclarations(
-    ZoneList<Declaration*>* declarations) {
-  for (int i = 0; i < declarations->length(); i++) {
-    Visit(declarations->at(i));
-  }
+void AstNumberingVisitor::VisitDeclarations(Declaration::List* decls) {
+  for (Declaration* decl : *decls) Visit(decl);
 }
 
 
@@ -592,12 +595,11 @@
   }
 
   if (IsGeneratorFunction(node->kind()) || IsAsyncFunction(node->kind())) {
-    // Generators can be optimized if --turbo-from-bytecode is set.
-    if (FLAG_turbo_from_bytecode) {
-      DisableCrankshaft(kGenerator);
-    } else {
-      DisableOptimization(kGenerator);
-    }
+    DisableCrankshaft(kGenerator);
+  }
+
+  if (IsClassConstructor(node->kind())) {
+    DisableCrankshaft(kClassConstructorFunction);
   }
 
   VisitDeclarations(scope->declarations());
diff --git a/src/ast/ast-traversal-visitor.h b/src/ast/ast-traversal-visitor.h
index e0f88e1..d93e02f 100644
--- a/src/ast/ast-traversal-visitor.h
+++ b/src/ast/ast-traversal-visitor.h
@@ -40,7 +40,7 @@
   bool VisitExpression(Expression* node) { return true; }
 
   // Iteration left-to-right.
-  void VisitDeclarations(ZoneList<Declaration*>* declarations);
+  void VisitDeclarations(Declaration::List* declarations);
   void VisitStatements(ZoneList<Statement*>* statements);
 
 // Individual nodes
@@ -104,9 +104,8 @@
 
 template <class Subclass>
 void AstTraversalVisitor<Subclass>::VisitDeclarations(
-    ZoneList<Declaration*>* decls) {
-  for (int i = 0; i < decls->length(); ++i) {
-    Declaration* decl = decls->at(i);
+    Declaration::List* decls) {
+  for (Declaration* decl : *decls) {
     RECURSE(Visit(decl));
   }
 }
@@ -288,6 +287,8 @@
   PROCESS_EXPRESSION(expr);
   DeclarationScope* scope = expr->scope();
   RECURSE_EXPRESSION(VisitDeclarations(scope->declarations()));
+  // A lazily parsed function literal won't have a body.
+  if (expr->scope()->is_lazily_parsed()) return;
   RECURSE_EXPRESSION(VisitStatements(expr->body()));
 }
 
diff --git a/src/ast/ast-types.cc b/src/ast/ast-types.cc
index a075e8e..49551dd 100644
--- a/src/ast/ast-types.cc
+++ b/src/ast/ast-types.cc
@@ -208,6 +208,8 @@
     case JS_DATE_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
+    case JS_MODULE_NAMESPACE_TYPE:
+    case JS_FIXED_ARRAY_ITERATOR_TYPE:
     case JS_ARRAY_BUFFER_TYPE:
     case JS_ARRAY_TYPE:
     case JS_REGEXP_TYPE:  // TODO(rossberg): there should be a RegExp type.
@@ -218,6 +220,43 @@
     case JS_SET_ITERATOR_TYPE:
     case JS_MAP_ITERATOR_TYPE:
     case JS_STRING_ITERATOR_TYPE:
+
+    case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
+
     case JS_WEAK_MAP_TYPE:
     case JS_WEAK_SET_TYPE:
     case JS_PROMISE_TYPE:
@@ -245,6 +284,7 @@
     case CODE_TYPE:
     case PROPERTY_CELL_TYPE:
     case MODULE_TYPE:
+    case MODULE_INFO_ENTRY_TYPE:
       return kOtherInternal & kTaggedPointer;
 
     // Remaining instance types are unsupported for now. If any of them do
@@ -260,7 +300,8 @@
     case ACCESS_CHECK_INFO_TYPE:
     case INTERCEPTOR_INFO_TYPE:
     case CALL_HANDLER_INFO_TYPE:
-    case PROMISE_CONTAINER_TYPE:
+    case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
+    case PROMISE_REACTION_JOB_INFO_TYPE:
     case FUNCTION_TEMPLATE_INFO_TYPE:
     case OBJECT_TEMPLATE_INFO_TYPE:
     case SIGNATURE_INFO_TYPE:
@@ -274,6 +315,7 @@
     case CELL_TYPE:
     case WEAK_CELL_TYPE:
     case PROTOTYPE_INFO_TYPE:
+    case TUPLE3_TYPE:
     case CONTEXT_EXTENSION_TYPE:
       UNREACHABLE();
       return kNone;
diff --git a/src/ast/ast-value-factory.cc b/src/ast/ast-value-factory.cc
index 33ccec7..ed2976f 100644
--- a/src/ast/ast-value-factory.cc
+++ b/src/ast/ast-value-factory.cc
@@ -98,10 +98,10 @@
 
 void AstRawString::Internalize(Isolate* isolate) {
   if (literal_bytes_.length() == 0) {
-    string_ = isolate->factory()->empty_string();
+    set_string(isolate->factory()->empty_string());
   } else {
     AstRawStringInternalizationKey key(this);
-    string_ = StringTable::LookupKey(isolate, &key);
+    set_string(StringTable::LookupKey(isolate, &key));
   }
 }
 
@@ -131,9 +131,9 @@
 void AstConsString::Internalize(Isolate* isolate) {
   // AstRawStrings are internalized before AstConsStrings so left and right are
   // already internalized.
-  string_ = isolate->factory()
-                ->NewConsString(left_->string(), right_->string())
-                .ToHandleChecked();
+  set_string(isolate->factory()
+                 ->NewConsString(left_->string(), right_->string())
+                 .ToHandleChecked());
 }
 
 bool AstValue::IsPropertyName() const {
@@ -177,44 +177,44 @@
 void AstValue::Internalize(Isolate* isolate) {
   switch (type_) {
     case STRING:
-      DCHECK(string_ != NULL);
+      DCHECK_NOT_NULL(string_);
       // Strings are already internalized.
       DCHECK(!string_->string().is_null());
       break;
     case SYMBOL:
       if (symbol_name_[0] == 'i') {
         DCHECK_EQ(0, strcmp(symbol_name_, "iterator_symbol"));
-        value_ = isolate->factory()->iterator_symbol();
+        set_value(isolate->factory()->iterator_symbol());
       } else if (strcmp(symbol_name_, "hasInstance_symbol") == 0) {
-        value_ = isolate->factory()->has_instance_symbol();
+        set_value(isolate->factory()->has_instance_symbol());
       } else {
         DCHECK_EQ(0, strcmp(symbol_name_, "home_object_symbol"));
-        value_ = isolate->factory()->home_object_symbol();
+        set_value(isolate->factory()->home_object_symbol());
       }
       break;
     case NUMBER_WITH_DOT:
     case NUMBER:
-      value_ = isolate->factory()->NewNumber(number_, TENURED);
+      set_value(isolate->factory()->NewNumber(number_, TENURED));
       break;
     case SMI_WITH_DOT:
     case SMI:
-      value_ = handle(Smi::FromInt(smi_), isolate);
+      set_value(handle(Smi::FromInt(smi_), isolate));
       break;
     case BOOLEAN:
       if (bool_) {
-        value_ = isolate->factory()->true_value();
+        set_value(isolate->factory()->true_value());
       } else {
-        value_ = isolate->factory()->false_value();
+        set_value(isolate->factory()->false_value());
       }
       break;
     case NULL_TYPE:
-      value_ = isolate->factory()->null_value();
+      set_value(isolate->factory()->null_value());
       break;
     case THE_HOLE:
-      value_ = isolate->factory()->the_hole_value();
+      set_value(isolate->factory()->the_hole_value());
       break;
     case UNDEFINED:
-      value_ = isolate->factory()->undefined_value();
+      set_value(isolate->factory()->undefined_value());
       break;
   }
 }
@@ -301,6 +301,7 @@
     current->Internalize(isolate);
     current = next;
   }
+
   for (AstValue* current = values_; current != nullptr;) {
     AstValue* next = current->next();
     current->Internalize(isolate);
@@ -313,7 +314,7 @@
 
 const AstValue* AstValueFactory::NewString(const AstRawString* string) {
   AstValue* value = new (zone_) AstValue(string);
-  CHECK(string != nullptr);
+  CHECK_NOT_NULL(string);
   return AddValue(value);
 }
 
@@ -329,10 +330,12 @@
   return AddValue(value);
 }
 
+const AstValue* AstValueFactory::NewSmi(uint32_t number) {
+  bool cacheable_smi = number <= kMaxCachedSmi;
+  if (cacheable_smi && smis_[number] != nullptr) return smis_[number];
 
-const AstValue* AstValueFactory::NewSmi(int number) {
-  AstValue* value =
-      new (zone_) AstValue(AstValue::SMI, number);
+  AstValue* value = new (zone_) AstValue(AstValue::SMI, number);
+  if (cacheable_smi) smis_[number] = value;
   return AddValue(value);
 }
 
@@ -383,9 +386,9 @@
     memcpy(new_literal_bytes, literal_bytes.start(), length);
     AstRawString* new_string = new (zone_) AstRawString(
         is_one_byte, Vector<const byte>(new_literal_bytes, length), hash);
-    CHECK(new_string != nullptr);
-    entry->key = new_string;
+    CHECK_NOT_NULL(new_string);
     AddString(new_string);
+    entry->key = new_string;
     entry->value = reinterpret_cast<void*>(1);
   }
   return reinterpret_cast<AstRawString*>(entry->key);
diff --git a/src/ast/ast-value-factory.h b/src/ast/ast-value-factory.h
index bc3eca2..4ce480f 100644
--- a/src/ast/ast-value-factory.h
+++ b/src/ast/ast-value-factory.h
@@ -30,6 +30,7 @@
 
 #include "src/api.h"
 #include "src/base/hashmap.h"
+#include "src/globals.h"
 #include "src/utils.h"
 
 // AstString, AstValue and AstValueFactory are for storing strings and values
@@ -53,17 +54,21 @@
 
   // This function can be called after internalizing.
   V8_INLINE Handle<String> string() const {
-    DCHECK(!string_.is_null());
-    return string_;
+    DCHECK_NOT_NULL(string_);
+    return Handle<String>(string_);
   }
 
+  AstString* next() { return next_; }
   AstString** next_location() { return &next_; }
-  AstString* next() const { return next_; }
 
  protected:
-  // Handle<String>::null() until internalized.
-  Handle<String> string_;
-  AstString* next_;
+  void set_string(Handle<String> string) { string_ = string.location(); }
+  // {string_} is stored as String** instead of a Handle<String> so it can be
+  // stored in a union with {next_}.
+  union {
+    AstString* next_;
+    String** string_;
+  };
   // Poor-man's virtual dispatch to AstRawString / AstConsString. Takes less
   // memory.
   class IsRawStringBits : public BitField<bool, 0, 1> {};
@@ -203,13 +208,14 @@
     if (type_ == STRING) {
       return string_->string();
     }
-    DCHECK(!value_.is_null());
-    return value_;
+    DCHECK_NOT_NULL(value_);
+    return Handle<Object>(value_);
   }
   AstValue* next() const { return next_; }
   void set_next(AstValue* next) { next_ = next; }
 
  private:
+  void set_value(Handle<Object> object) { value_ = object.location(); }
   friend class AstValueFactory;
 
   enum Type {
@@ -257,19 +263,21 @@
 
   Type type_;
 
+  // {value_} is stored as Object** instead of a Handle<Object> so it can be
+  // stored in a union with {next_}.
+  union {
+    Object** value_;  // if internalized
+    AstValue* next_;  // if !internalized
+  };
+
   // Uninternalized value.
   union {
     const AstRawString* string_;
     double number_;
     int smi_;
     bool bool_;
-    const AstRawString* strings_;
     const char* symbol_name_;
   };
-
-  // Handle<String>::null() until internalized.
-  Handle<Object> value_;
-  AstValue* next_;
 };
 
 
@@ -324,16 +332,18 @@
   AstValueFactory(Zone* zone, uint32_t hash_seed)
       : string_table_(AstRawStringCompare),
         values_(nullptr),
+        smis_(),
+        strings_(nullptr),
         strings_end_(&strings_),
         zone_(zone),
         hash_seed_(hash_seed) {
-    ResetStrings();
 #define F(name, str) name##_string_ = NULL;
     STRING_CONSTANTS(F)
 #undef F
 #define F(name) name##_ = NULL;
     OTHER_CONSTANTS(F)
 #undef F
+    std::fill(smis_, smis_ + arraysize(smis_), nullptr);
   }
 
   Zone* zone() const { return zone_; }
@@ -373,7 +383,7 @@
   // A JavaScript symbol (ECMA-262 edition 6).
   const AstValue* NewSymbol(const char* name);
   const AstValue* NewNumber(double number, bool with_dot = false);
-  const AstValue* NewSmi(int number);
+  const AstValue* NewSmi(uint32_t number);
   const AstValue* NewBoolean(bool b);
   const AstValue* NewStringList(ZoneList<const AstRawString*>* strings);
   const AstValue* NewNull();
@@ -381,6 +391,10 @@
   const AstValue* NewTheHole();
 
  private:
+  static const uint32_t kMaxCachedSmi = 1 << 10;
+
+  STATIC_ASSERT(kMaxCachedSmi <= Smi::kMaxValue);
+
   AstValue* AddValue(AstValue* value) {
     value->set_next(values_);
     values_ = value;
@@ -395,7 +409,8 @@
     strings_ = nullptr;
     strings_end_ = &strings_;
   }
-  AstRawString* GetOneByteStringInternal(Vector<const uint8_t> literal);
+  V8_EXPORT_PRIVATE AstRawString* GetOneByteStringInternal(
+      Vector<const uint8_t> literal);
   AstRawString* GetTwoByteStringInternal(Vector<const uint16_t> literal);
   AstRawString* GetString(uint32_t hash, bool is_one_byte,
                           Vector<const byte> literal_bytes);
@@ -407,8 +422,10 @@
   // For keeping track of all AstValues and AstRawStrings we've created (so that
   // they can be internalized later).
   AstValue* values_;
-  // We need to keep track of strings_ in order, since cons strings require
-  // their members to be internalized first.
+
+  AstValue* smis_[kMaxCachedSmi + 1];
+  // We need to keep track of strings_ in order since cons strings require their
+  // members to be internalized first.
   AstString* strings_;
   AstString** strings_end_;
   Zone* zone_;
diff --git a/src/ast/ast.cc b/src/ast/ast.cc
index 97d1f9d..fc8bd8a 100644
--- a/src/ast/ast.cc
+++ b/src/ast/ast.cc
@@ -14,6 +14,7 @@
 #include "src/code-stubs.h"
 #include "src/contexts.h"
 #include "src/conversions.h"
+#include "src/elements.h"
 #include "src/property-details.h"
 #include "src/property.h"
 #include "src/string-stream.h"
@@ -159,31 +160,30 @@
   }
 }
 
-VariableProxy::VariableProxy(Variable* var, int start_position,
-                             int end_position)
+VariableProxy::VariableProxy(Variable* var, int start_position)
     : Expression(start_position, kVariableProxy),
-      end_position_(end_position),
       raw_name_(var->raw_name()),
       next_unresolved_(nullptr) {
   bit_field_ |= IsThisField::encode(var->is_this()) |
-                IsAssignedField::encode(false) | IsResolvedField::encode(false);
+                IsAssignedField::encode(false) |
+                IsResolvedField::encode(false) |
+                HoleCheckModeField::encode(HoleCheckMode::kElided);
   BindTo(var);
 }
 
 VariableProxy::VariableProxy(const AstRawString* name,
-                             VariableKind variable_kind, int start_position,
-                             int end_position)
+                             VariableKind variable_kind, int start_position)
     : Expression(start_position, kVariableProxy),
-      end_position_(end_position),
       raw_name_(name),
       next_unresolved_(nullptr) {
   bit_field_ |= IsThisField::encode(variable_kind == THIS_VARIABLE) |
-                IsAssignedField::encode(false) | IsResolvedField::encode(false);
+                IsAssignedField::encode(false) |
+                IsResolvedField::encode(false) |
+                HoleCheckModeField::encode(HoleCheckMode::kElided);
 }
 
 VariableProxy::VariableProxy(const VariableProxy* copy_from)
     : Expression(copy_from->position(), kVariableProxy),
-      end_position_(copy_from->end_position_),
       next_unresolved_(nullptr) {
   bit_field_ = copy_from->bit_field_;
   DCHECK(!copy_from->is_resolved());
@@ -288,17 +288,19 @@
   return Token::ILLEGAL;
 }
 
+bool FunctionLiteral::ShouldEagerCompile() const {
+  return scope()->ShouldEagerCompile();
+}
+
+void FunctionLiteral::SetShouldEagerCompile() {
+  scope()->set_should_eager_compile();
+}
 
 bool FunctionLiteral::AllowsLazyCompilation() {
   return scope()->AllowsLazyCompilation();
 }
 
 
-bool FunctionLiteral::AllowsLazyCompilationWithoutContext() {
-  return scope()->AllowsLazyCompilationWithoutContext();
-}
-
-
 int FunctionLiteral::start_position() const {
   return scope()->start_position();
 }
@@ -510,7 +512,7 @@
       continue;
     }
 
-    if (position == boilerplate_properties_ * 2) {
+    if (static_cast<uint32_t>(position) == boilerplate_properties_ * 2) {
       DCHECK(property->is_computed_name());
       is_simple = false;
       break;
@@ -579,11 +581,9 @@
   if (!constant_elements_.is_null()) return;
 
   int constants_length = values()->length();
-
-  // Allocate a fixed array to hold all the object literals.
-  Handle<JSArray> array = isolate->factory()->NewJSArray(
-      FAST_HOLEY_SMI_ELEMENTS, constants_length, constants_length,
-      INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+  ElementsKind kind = FIRST_FAST_ELEMENTS_KIND;
+  Handle<FixedArray> fixed_array =
+      isolate->factory()->NewFixedArrayWithHoles(constants_length);
 
   // Fill in the literals.
   bool is_simple = true;
@@ -610,33 +610,38 @@
     }
 
     if (boilerplate_value->IsUninitialized(isolate)) {
-      boilerplate_value = handle(Smi::FromInt(0), isolate);
+      boilerplate_value = handle(Smi::kZero, isolate);
       is_simple = false;
     }
 
-    JSObject::AddDataElement(array, array_index, boilerplate_value, NONE)
-        .Assert();
+    kind = GetMoreGeneralElementsKind(kind,
+                                      boilerplate_value->OptimalElementsKind());
+    fixed_array->set(array_index, *boilerplate_value);
   }
 
-  JSObject::ValidateElements(array);
-  Handle<FixedArrayBase> element_values(array->elements());
+  if (is_holey) kind = GetHoleyElementsKind(kind);
 
   // Simple and shallow arrays can be lazily copied, we transform the
   // elements array to a copy-on-write array.
   if (is_simple && depth_acc == 1 && array_index > 0 &&
-      array->HasFastSmiOrObjectElements()) {
-    element_values->set_map(isolate->heap()->fixed_cow_array_map());
+      IsFastSmiOrObjectElementsKind(kind)) {
+    fixed_array->set_map(isolate->heap()->fixed_cow_array_map());
+  }
+
+  Handle<FixedArrayBase> elements = fixed_array;
+  if (IsFastDoubleElementsKind(kind)) {
+    ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
+    elements = isolate->factory()->NewFixedDoubleArray(constants_length);
+    // We are copying from non-fast-double to fast-double.
+    ElementsKind from_kind = TERMINAL_FAST_ELEMENTS_KIND;
+    accessor->CopyElements(fixed_array, from_kind, elements, constants_length);
   }
 
   // Remember both the literal's constant values as well as the ElementsKind
   // in a 2-element FixedArray.
   Handle<FixedArray> literals = isolate->factory()->NewFixedArray(2, TENURED);
-
-  ElementsKind kind = array->GetElementsKind();
-  kind = is_holey ? GetHoleyElementsKind(kind) : GetPackedElementsKind(kind);
-
   literals->set(0, Smi::FromInt(kind));
-  literals->set(1, *element_values);
+  literals->set(1, *elements);
 
   constant_elements_ = literals;
   set_is_simple(is_simple);
@@ -887,36 +892,20 @@
   }
 }
 
-bool Call::IsUsingCallFeedbackICSlot() const {
-  return GetCallType() != POSSIBLY_EVAL_CALL;
-}
-
-bool Call::IsUsingCallFeedbackSlot() const {
-  // SuperConstructorCall uses a CallConstructStub, which wants
-  // a Slot, in addition to any IC slots requested elsewhere.
-  return GetCallType() == SUPER_CALL;
-}
-
-
 void Call::AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
                                      FeedbackVectorSlotCache* cache) {
-  if (IsUsingCallFeedbackICSlot()) {
-    ic_slot_ = spec->AddCallICSlot();
-  }
-  if (IsUsingCallFeedbackSlot()) {
-    stub_slot_ = spec->AddGeneralSlot();
-  }
+  ic_slot_ = spec->AddCallICSlot();
 }
 
 Call::CallType Call::GetCallType() const {
   VariableProxy* proxy = expression()->AsVariableProxy();
   if (proxy != NULL) {
-    if (is_possibly_eval()) {
-      return POSSIBLY_EVAL_CALL;
-    } else if (proxy->var()->IsUnallocated()) {
+    if (proxy->var()->IsUnallocated()) {
       return GLOBAL_CALL;
     } else if (proxy->var()->IsLookupSlot()) {
-      return LOOKUP_SLOT_CALL;
+      // Calls going through 'with' always use DYNAMIC rather than DYNAMIC_LOCAL
+      // or DYNAMIC_GLOBAL.
+      return proxy->var()->mode() == DYNAMIC ? WITH_CALL : OTHER_CALL;
     }
   }
 
diff --git a/src/ast/ast.h b/src/ast/ast.h
index a6661be..99e0672 100644
--- a/src/ast/ast.h
+++ b/src/ast/ast.h
@@ -509,20 +509,25 @@
 
 class Declaration : public AstNode {
  public:
+  typedef ThreadedList<Declaration> List;
+
   VariableProxy* proxy() const { return proxy_; }
   Scope* scope() const { return scope_; }
 
  protected:
   Declaration(VariableProxy* proxy, Scope* scope, int pos, NodeType type)
-      : AstNode(pos, type), proxy_(proxy), scope_(scope) {}
+      : AstNode(pos, type), proxy_(proxy), scope_(scope), next_(nullptr) {}
 
   static const uint8_t kNextBitFieldIndex = AstNode::kNextBitFieldIndex;
 
  private:
   VariableProxy* proxy_;
-
   // Nested scope from which the declaration originated.
   Scope* scope_;
+  // Declarations list threaded through the declarations.
+  Declaration** next() { return &next_; }
+  Declaration* next_;
+  friend List;
 };
 
 
@@ -751,7 +756,6 @@
   BailoutId FilterId() const { return BailoutId(local_id(4)); }
   BailoutId AssignmentId() const { return BailoutId(local_id(5)); }
   BailoutId IncrementId() const { return BailoutId(local_id(6)); }
-  BailoutId ContinueId() const { return EntryId(); }
   BailoutId StackCheckId() const { return BodyId(); }
 
  private:
@@ -1671,7 +1675,13 @@
     bit_field_ = IsNewTargetField::update(bit_field_, true);
   }
 
-  int end_position() const { return end_position_; }
+  HoleCheckMode hole_check_mode() const {
+    return HoleCheckModeField::decode(bit_field_);
+  }
+  void set_needs_hole_check() {
+    bit_field_ =
+        HoleCheckModeField::update(bit_field_, HoleCheckMode::kRequired);
+  }
 
   // Bind this proxy to the variable var.
   void BindTo(Variable* var);
@@ -1693,9 +1703,9 @@
  private:
   friend class AstNodeFactory;
 
-  VariableProxy(Variable* var, int start_position, int end_position);
+  VariableProxy(Variable* var, int start_position);
   VariableProxy(const AstRawString* name, VariableKind variable_kind,
-                int start_position, int end_position);
+                int start_position);
   explicit VariableProxy(const VariableProxy* copy_from);
 
   static int parent_num_ids() { return Expression::num_ids(); }
@@ -1706,11 +1716,9 @@
   class IsAssignedField : public BitField<bool, IsThisField::kNext, 1> {};
   class IsResolvedField : public BitField<bool, IsAssignedField::kNext, 1> {};
   class IsNewTargetField : public BitField<bool, IsResolvedField::kNext, 1> {};
+  class HoleCheckModeField
+      : public BitField<HoleCheckMode, IsNewTargetField::kNext, 1> {};
 
-  // Position is stored in the AstNode superclass, but VariableProxy needs to
-  // know its end position too (for error messages). It cannot be inferred from
-  // the variable name length because it can contain escapes.
-  int end_position_;
   FeedbackVectorSlot variable_feedback_slot_;
   union {
     const AstRawString* raw_name_;  // if !is_resolved_
@@ -1839,8 +1847,6 @@
   void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
                                  FeedbackVectorSlotCache* cache);
 
-  FeedbackVectorSlot CallFeedbackSlot() const { return stub_slot_; }
-
   FeedbackVectorSlot CallFeedbackICSlot() const { return ic_slot_; }
 
   SmallMapList* GetReceiverTypes() {
@@ -1894,9 +1900,8 @@
   void MarkTail() { bit_field_ = IsTailField::update(bit_field_, true); }
 
   enum CallType {
-    POSSIBLY_EVAL_CALL,
     GLOBAL_CALL,
-    LOOKUP_SLOT_CALL,
+    WITH_CALL,
     NAMED_PROPERTY_CALL,
     KEYED_PROPERTY_CALL,
     NAMED_SUPER_PROPERTY_CALL,
@@ -1912,8 +1917,6 @@
 
   // Helpers to determine how to handle the call.
   CallType GetCallType() const;
-  bool IsUsingCallFeedbackSlot() const;
-  bool IsUsingCallFeedbackICSlot() const;
 
 #ifdef DEBUG
   // Used to assert that the FullCodeGenerator records the return site.
@@ -1946,7 +1949,6 @@
   class IsPossiblyEvalField : public BitField<bool, IsTailField::kNext, 1> {};
 
   FeedbackVectorSlot ic_slot_;
-  FeedbackVectorSlot stub_slot_;
   Expression* expression_;
   ZoneList<Expression*>* arguments_;
   Handle<JSFunction> target_;
@@ -2597,9 +2599,9 @@
   int materialized_literal_count() { return materialized_literal_count_; }
   int expected_property_count() { return expected_property_count_; }
   int parameter_count() { return parameter_count_; }
+  int function_length() { return function_length_; }
 
   bool AllowsLazyCompilation();
-  bool AllowsLazyCompilationWithoutContext();
 
   Handle<String> debug_name() const {
     if (raw_name_ != NULL && !raw_name_->IsEmpty()) {
@@ -2649,12 +2651,8 @@
   // function will be called immediately:
   // - (function() { ... })();
   // - var x = function() { ... }();
-  bool should_eager_compile() const {
-    return ShouldEagerCompile::decode(bit_field_);
-  }
-  void set_should_eager_compile() {
-    bit_field_ = ShouldEagerCompile::update(bit_field_, true);
-  }
+  bool ShouldEagerCompile() const;
+  void SetShouldEagerCompile();
 
   // A hint that we expect this function to be called (exactly) once,
   // i.e. we suspect it's an initialization function.
@@ -2708,6 +2706,10 @@
         IsClassFieldInitializer::update(bit_field_, is_class_field_initializer);
   }
 
+  int return_position() {
+    return std::max(start_position(), end_position() - (has_braces_ ? 1 : 0));
+  }
+
  private:
   friend class AstNodeFactory;
 
@@ -2715,16 +2717,18 @@
                   AstValueFactory* ast_value_factory, DeclarationScope* scope,
                   ZoneList<Statement*>* body, int materialized_literal_count,
                   int expected_property_count, int parameter_count,
-                  FunctionType function_type,
+                  int function_length, FunctionType function_type,
                   ParameterFlag has_duplicate_parameters,
                   EagerCompileHint eager_compile_hint, int position,
-                  bool is_function)
+                  bool is_function, bool has_braces)
       : Expression(position, kFunctionLiteral),
         materialized_literal_count_(materialized_literal_count),
         expected_property_count_(expected_property_count),
         parameter_count_(parameter_count),
+        function_length_(function_length),
         function_token_position_(kNoSourcePosition),
         yield_count_(0),
+        has_braces_(has_braces),
         raw_name_(name),
         scope_(scope),
         body_(body),
@@ -2735,11 +2739,11 @@
         HasDuplicateParameters::encode(has_duplicate_parameters ==
                                        kHasDuplicateParameters) |
         IsFunction::encode(is_function) |
-        ShouldEagerCompile::encode(eager_compile_hint == kShouldEagerCompile) |
         RequiresClassFieldInit::encode(false) |
         ShouldNotBeUsedOnceHintField::encode(false) |
         DontOptimizeReasonField::encode(kNoReason) |
         IsClassFieldInitializer::encode(false);
+    if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile();
   }
 
   class FunctionTypeBits
@@ -2747,9 +2751,8 @@
   class Pretenure : public BitField<bool, FunctionTypeBits::kNext, 1> {};
   class HasDuplicateParameters : public BitField<bool, Pretenure::kNext, 1> {};
   class IsFunction : public BitField<bool, HasDuplicateParameters::kNext, 1> {};
-  class ShouldEagerCompile : public BitField<bool, IsFunction::kNext, 1> {};
   class ShouldNotBeUsedOnceHintField
-      : public BitField<bool, ShouldEagerCompile::kNext, 1> {};
+      : public BitField<bool, IsFunction::kNext, 1> {};
   class RequiresClassFieldInit
       : public BitField<bool, ShouldNotBeUsedOnceHintField::kNext, 1> {};
   class IsClassFieldInitializer
@@ -2760,8 +2763,10 @@
   int materialized_literal_count_;
   int expected_property_count_;
   int parameter_count_;
+  int function_length_;
   int function_token_position_;
   int yield_count_;
+  bool has_braces_;
 
   const AstString* raw_name_;
   DeclarationScope* scope_;
@@ -2962,10 +2967,8 @@
  public:
   void Visit(AstNode* node) { impl()->Visit(node); }
 
-  void VisitDeclarations(ZoneList<Declaration*>* declarations) {
-    for (int i = 0; i < declarations->length(); i++) {
-      Visit(declarations->at(i));
-    }
+  void VisitDeclarations(Declaration::List* declarations) {
+    for (Declaration* decl : *declarations) Visit(decl);
   }
 
   void VisitStatements(ZoneList<Statement*>* statements) {
@@ -3279,7 +3282,7 @@
         Literal(ast_value_factory_->NewNumber(number, with_dot), pos);
   }
 
-  Literal* NewSmiLiteral(int number, int pos) {
+  Literal* NewSmiLiteral(uint32_t number, int pos) {
     return new (zone_) Literal(ast_value_factory_->NewSmi(number), pos);
   }
 
@@ -3339,18 +3342,15 @@
   }
 
   VariableProxy* NewVariableProxy(Variable* var,
-                                  int start_position = kNoSourcePosition,
-                                  int end_position = kNoSourcePosition) {
-    return new (zone_) VariableProxy(var, start_position, end_position);
+                                  int start_position = kNoSourcePosition) {
+    return new (zone_) VariableProxy(var, start_position);
   }
 
   VariableProxy* NewVariableProxy(const AstRawString* name,
                                   VariableKind variable_kind,
-                                  int start_position = kNoSourcePosition,
-                                  int end_position = kNoSourcePosition) {
+                                  int start_position = kNoSourcePosition) {
     DCHECK_NOT_NULL(name);
-    return new (zone_)
-        VariableProxy(name, variable_kind, start_position, end_position);
+    return new (zone_) VariableProxy(name, variable_kind, start_position);
   }
 
   // Recreates the VariableProxy in this Zone.
@@ -3459,15 +3459,16 @@
   FunctionLiteral* NewFunctionLiteral(
       const AstRawString* name, DeclarationScope* scope,
       ZoneList<Statement*>* body, int materialized_literal_count,
-      int expected_property_count, int parameter_count,
+      int expected_property_count, int parameter_count, int function_length,
       FunctionLiteral::ParameterFlag has_duplicate_parameters,
       FunctionLiteral::FunctionType function_type,
-      FunctionLiteral::EagerCompileHint eager_compile_hint, int position) {
-    return new (zone_) FunctionLiteral(zone_, name, ast_value_factory_, scope,
-                                       body, materialized_literal_count,
-                                       expected_property_count, parameter_count,
-                                       function_type, has_duplicate_parameters,
-                                       eager_compile_hint, position, true);
+      FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
+      bool has_braces) {
+    return new (zone_) FunctionLiteral(
+        zone_, name, ast_value_factory_, scope, body,
+        materialized_literal_count, expected_property_count, parameter_count,
+        function_length, function_type, has_duplicate_parameters,
+        eager_compile_hint, position, true, has_braces);
   }
 
   // Creates a FunctionLiteral representing a top-level script, the
@@ -3480,9 +3481,9 @@
     return new (zone_) FunctionLiteral(
         zone_, ast_value_factory_->empty_string(), ast_value_factory_, scope,
         body, materialized_literal_count, expected_property_count,
-        parameter_count, FunctionLiteral::kAnonymousExpression,
+        parameter_count, parameter_count, FunctionLiteral::kAnonymousExpression,
         FunctionLiteral::kNoDuplicateParameters,
-        FunctionLiteral::kShouldLazyCompile, 0, false);
+        FunctionLiteral::kShouldLazyCompile, 0, false, true);
   }
 
   ClassLiteral::Property* NewClassLiteralProperty(
diff --git a/src/ast/modules.cc b/src/ast/modules.cc
index 2d28d55..339d64c 100644
--- a/src/ast/modules.cc
+++ b/src/ast/modules.cc
@@ -87,8 +87,8 @@
   return ModuleInfoEntry::New(
       isolate, ToStringOrUndefined(isolate, export_name),
       ToStringOrUndefined(isolate, local_name),
-      ToStringOrUndefined(isolate, import_name),
-      Handle<Object>(Smi::FromInt(module_request), isolate));
+      ToStringOrUndefined(isolate, import_name), module_request, cell_index,
+      location.beg_pos, location.end_pos);
 }
 
 ModuleDescriptor::Entry* ModuleDescriptor::Entry::Deserialize(
@@ -101,7 +101,8 @@
       isolate, avfactory, handle(entry->local_name(), isolate));
   result->import_name = FromStringOrUndefined(
       isolate, avfactory, handle(entry->import_name(), isolate));
-  result->module_request = Smi::cast(entry->module_request())->value();
+  result->module_request = entry->module_request();
+  result->cell_index = entry->cell_index();
   return result;
 }
 
@@ -111,57 +112,68 @@
   // local names and for each local name immediately access all its export
   // names.  (Regular exports have neither import name nor module request.)
 
-  ZoneVector<Handle<Object>> data(zone);
-  data.reserve(2 * regular_exports_.size());
+  ZoneVector<Handle<Object>> data(
+      ModuleInfo::kRegularExportLength * regular_exports_.size(), zone);
+  int index = 0;
 
   for (auto it = regular_exports_.begin(); it != regular_exports_.end();) {
     // Find out how many export names this local name has.
     auto next = it;
-    int size = 0;
+    int count = 0;
     do {
+      DCHECK_EQ(it->second->local_name, next->second->local_name);
+      DCHECK_EQ(it->second->cell_index, next->second->cell_index);
       ++next;
-      ++size;
+      ++count;
     } while (next != regular_exports_.end() && next->first == it->first);
 
-    Handle<FixedArray> export_names = isolate->factory()->NewFixedArray(size);
-    data.push_back(it->second->local_name->string());
-    data.push_back(export_names);
+    Handle<FixedArray> export_names = isolate->factory()->NewFixedArray(count);
+    data[index + ModuleInfo::kRegularExportLocalNameOffset] =
+        it->second->local_name->string();
+    data[index + ModuleInfo::kRegularExportCellIndexOffset] =
+        handle(Smi::FromInt(it->second->cell_index), isolate);
+    data[index + ModuleInfo::kRegularExportExportNamesOffset] = export_names;
+    index += ModuleInfo::kRegularExportLength;
 
     // Collect the export names.
     int i = 0;
     for (; it != next; ++it) {
       export_names->set(i++, *it->second->export_name->string());
     }
-    DCHECK_EQ(i, size);
+    DCHECK_EQ(i, count);
 
     // Continue with the next distinct key.
     DCHECK(it == next);
   }
+  DCHECK_LE(index, static_cast<int>(data.size()));
+  data.resize(index);
 
   // We cannot create the FixedArray earlier because we only now know the
-  // precise size (the number of unique keys in regular_exports).
-  int size = static_cast<int>(data.size());
-  Handle<FixedArray> result = isolate->factory()->NewFixedArray(size);
-  for (int i = 0; i < size; ++i) {
+  // precise size.
+  Handle<FixedArray> result = isolate->factory()->NewFixedArray(index);
+  for (int i = 0; i < index; ++i) {
     result->set(i, *data[i]);
   }
   return result;
 }
 
-void ModuleDescriptor::DeserializeRegularExports(Isolate* isolate,
-                                                 AstValueFactory* avfactory,
-                                                 Handle<FixedArray> data) {
-  for (int i = 0, length_i = data->length(); i < length_i;) {
-    Handle<String> local_name(String::cast(data->get(i++)), isolate);
-    Handle<FixedArray> export_names(FixedArray::cast(data->get(i++)), isolate);
+void ModuleDescriptor::DeserializeRegularExports(
+    Isolate* isolate, AstValueFactory* avfactory,
+    Handle<ModuleInfo> module_info) {
+  for (int i = 0, count = module_info->RegularExportCount(); i < count; ++i) {
+    Handle<String> local_name(module_info->RegularExportLocalName(i), isolate);
+    int cell_index = module_info->RegularExportCellIndex(i);
+    Handle<FixedArray> export_names(module_info->RegularExportExportNames(i),
+                                    isolate);
 
-    for (int j = 0, length_j = export_names->length(); j < length_j; ++j) {
+    for (int j = 0, length = export_names->length(); j < length; ++j) {
       Handle<String> export_name(String::cast(export_names->get(j)), isolate);
 
       Entry* entry =
           new (avfactory->zone()) Entry(Scanner::Location::invalid());
       entry->local_name = avfactory->GetString(local_name);
       entry->export_name = avfactory->GetString(export_name);
+      entry->cell_index = cell_index;
 
       AddRegularExport(entry);
     }
@@ -184,6 +196,13 @@
                 static_cast<int>(module_requests_.size()));
       entry->import_name = import->second->import_name;
       entry->module_request = import->second->module_request;
+      // Hack: When the indirect export cannot be resolved, we want the error
+      // message to point at the import statement, not at the export statement.
+      // Therefore we overwrite [entry]'s location here.  Note that Validate()
+      // has already checked for duplicate exports, so it's guaranteed that we
+      // won't need to report any error pointing at the (now lost) export
+      // location.
+      entry->location = import->second->location;
       entry->local_name = nullptr;
       AddSpecialExport(entry, zone);
       it = regular_exports_.erase(it);
@@ -193,6 +212,43 @@
   }
 }
 
+ModuleDescriptor::CellIndexKind ModuleDescriptor::GetCellIndexKind(
+    int cell_index) {
+  if (cell_index > 0) return kExport;
+  if (cell_index < 0) return kImport;
+  return kInvalid;
+}
+
+void ModuleDescriptor::AssignCellIndices() {
+  int export_index = 1;
+  for (auto it = regular_exports_.begin(); it != regular_exports_.end();) {
+    auto current_key = it->first;
+    // This local name may be exported under multiple export names.  Assign the
+    // same index to each such entry.
+    do {
+      Entry* entry = it->second;
+      DCHECK_NOT_NULL(entry->local_name);
+      DCHECK_NULL(entry->import_name);
+      DCHECK_LT(entry->module_request, 0);
+      DCHECK_EQ(entry->cell_index, 0);
+      entry->cell_index = export_index;
+      it++;
+    } while (it != regular_exports_.end() && it->first == current_key);
+    export_index++;
+  }
+
+  int import_index = -1;
+  for (const auto& elem : regular_imports_) {
+    Entry* entry = elem.second;
+    DCHECK_NOT_NULL(entry->local_name);
+    DCHECK_NOT_NULL(entry->import_name);
+    DCHECK_LE(0, entry->module_request);
+    DCHECK_EQ(entry->cell_index, 0);
+    entry->cell_index = import_index;
+    import_index--;
+  }
+}
+
 namespace {
 
 const ModuleDescriptor::Entry* BetterDuplicate(
@@ -259,6 +315,7 @@
   }
 
   MakeIndirectExportsExplicit(zone);
+  AssignCellIndices();
   return true;
 }
 
diff --git a/src/ast/modules.h b/src/ast/modules.h
index 4d36735..94550fb 100644
--- a/src/ast/modules.h
+++ b/src/ast/modules.h
@@ -14,6 +14,7 @@
 
 
 class AstRawString;
+class ModuleInfo;
 class ModuleInfoEntry;
 
 class ModuleDescriptor : public ZoneObject {
@@ -73,23 +74,34 @@
                 PendingCompilationErrorHandler* error_handler, Zone* zone);
 
   struct Entry : public ZoneObject {
-    const Scanner::Location location;
+    Scanner::Location location;
     const AstRawString* export_name;
     const AstRawString* local_name;
     const AstRawString* import_name;
+
     // The module_request value records the order in which modules are
     // requested. It also functions as an index into the ModuleInfo's array of
     // module specifiers and into the Module's array of requested modules.  A
     // negative value means no module request.
     int module_request;
 
+    // Import/export entries that are associated with a MODULE-allocated
+    // variable (i.e. regular_imports and regular_exports after Validate) use
+    // the cell_index value to encode the location of their cell.  During
+    // variable allocation, this will be be copied into the variable's index
+    // field.
+    // Entries that are not associated with a MODULE-allocated variable have
+    // GetCellIndexKind(cell_index) == kInvalid.
+    int cell_index;
+
     // TODO(neis): Remove local_name component?
     explicit Entry(Scanner::Location loc)
         : location(loc),
           export_name(nullptr),
           local_name(nullptr),
           import_name(nullptr),
-          module_request(-1) {}
+          module_request(-1),
+          cell_index(0) {}
 
     // (De-)serialization support.
     // Note that the location value is not preserved as it's only needed by the
@@ -99,6 +111,9 @@
                               Handle<ModuleInfoEntry> entry);
   };
 
+  enum CellIndexKind { kInvalid, kExport, kImport };
+  static CellIndexKind GetCellIndexKind(int cell_index);
+
   // Module requests.
   const ZoneMap<const AstRawString*, int>& module_requests() const {
     return module_requests_;
@@ -110,7 +125,7 @@
   }
 
   // All the remaining imports, indexed by local name.
-  const ZoneMap<const AstRawString*, const Entry*>& regular_imports() const {
+  const ZoneMap<const AstRawString*, Entry*>& regular_imports() const {
     return regular_imports_;
   }
 
@@ -139,7 +154,7 @@
     special_exports_.Add(entry, zone);
   }
 
-  void AddRegularImport(const Entry* entry) {
+  void AddRegularImport(Entry* entry) {
     DCHECK_NOT_NULL(entry->import_name);
     DCHECK_NOT_NULL(entry->local_name);
     DCHECK_NULL(entry->export_name);
@@ -160,7 +175,7 @@
   Handle<FixedArray> SerializeRegularExports(Isolate* isolate,
                                              Zone* zone) const;
   void DeserializeRegularExports(Isolate* isolate, AstValueFactory* avfactory,
-                                 Handle<FixedArray> data);
+                                 Handle<ModuleInfo> module_info);
 
  private:
   // TODO(neis): Use STL datastructure instead of ZoneList?
@@ -168,7 +183,7 @@
   ZoneList<const Entry*> special_exports_;
   ZoneList<const Entry*> namespace_imports_;
   ZoneMultimap<const AstRawString*, Entry*> regular_exports_;
-  ZoneMap<const AstRawString*, const Entry*> regular_imports_;
+  ZoneMap<const AstRawString*, Entry*> regular_imports_;
 
   // If there are multiple export entries with the same export name, return the
   // last of them (in source order).  Otherwise return nullptr.
@@ -192,6 +207,11 @@
   // (The import entry is never deleted.)
   void MakeIndirectExportsExplicit(Zone* zone);
 
+  // Assign a cell_index of -1,-2,... to regular imports.
+  // Assign a cell_index of +1,+2,... to regular (local) exports.
+  // Assign a cell_index of 0 to anything else.
+  void AssignCellIndices();
+
   int AddModuleRequest(const AstRawString* specifier) {
     DCHECK_NOT_NULL(specifier);
     auto it = module_requests_
diff --git a/src/ast/prettyprinter.cc b/src/ast/prettyprinter.cc
index 874c159..a3fc50a 100644
--- a/src/ast/prettyprinter.cc
+++ b/src/ast/prettyprinter.cc
@@ -529,6 +529,18 @@
     }
   } else if (object->IsFixedArray()) {
     Print("FixedArray");
+  } else if (object->IsSymbol()) {
+    // Symbols can only occur as literals if they were inserted by the parser.
+    Symbol* symbol = Symbol::cast(object);
+    if (symbol->name()->IsString()) {
+      int length = 0;
+      String* string = String::cast(symbol->name());
+      std::unique_ptr<char[]> desc = string->ToCString(
+          ALLOW_NULLS, FAST_STRING_TRAVERSAL, 0, string->length(), &length);
+      Print("Symbol(%*s)", length, desc.get());
+    } else {
+      Print("Symbol()");
+    }
   } else {
     Print("<unknown literal %p>", static_cast<void*>(object));
   }
@@ -650,13 +662,10 @@
   PrintF("%s", printer.output_);
 }
 
-
-void AstPrinter::PrintDeclarations(ZoneList<Declaration*>* declarations) {
-  if (declarations->length() > 0) {
+void AstPrinter::PrintDeclarations(Declaration::List* declarations) {
+  if (!declarations->is_empty()) {
     IndentedScope indent(this, "DECLS");
-    for (int i = 0; i < declarations->length(); i++) {
-      Visit(declarations->at(i));
-    }
+    for (Declaration* decl : *declarations) Visit(decl);
   }
 }
 
diff --git a/src/ast/prettyprinter.h b/src/ast/prettyprinter.h
index 2d553ba..b56c834 100644
--- a/src/ast/prettyprinter.h
+++ b/src/ast/prettyprinter.h
@@ -84,7 +84,7 @@
   void PrintIndentedVisit(const char* s, AstNode* node);
 
   void PrintStatements(ZoneList<Statement*>* statements);
-  void PrintDeclarations(ZoneList<Declaration*>* declarations);
+  void PrintDeclarations(Declaration::List* declarations);
   void PrintParameters(DeclarationScope* scope);
   void PrintArguments(ZoneList<Expression*>* arguments);
   void PrintCaseClause(CaseClause* clause);
diff --git a/src/ast/scopeinfo.cc b/src/ast/scopeinfo.cc
index 5354b8d..3a3ea03 100644
--- a/src/ast/scopeinfo.cc
+++ b/src/ast/scopeinfo.cc
@@ -58,7 +58,6 @@
 Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
                                     MaybeHandle<ScopeInfo> outer_scope) {
   // Collect variables.
-  ZoneList<Variable*>* locals = scope->locals();
   int stack_local_count = 0;
   int context_local_count = 0;
   int module_vars_count = 0;
@@ -67,8 +66,7 @@
   // slot index indicates at which offset a particular scope starts in the
   // parent declaration scope.
   int first_slot_index = 0;
-  for (int i = 0; i < locals->length(); i++) {
-    Variable* var = locals->at(i);
+  for (Variable* var : *scope->locals()) {
     switch (var->location()) {
       case VariableLocation::LOCAL:
         if (stack_local_count == 0) first_slot_index = var->index();
@@ -198,8 +196,7 @@
   int context_local_info_base = context_local_base + context_local_count;
   int module_var_entry = scope_info->ModuleVariablesIndex();
 
-  for (int i = 0; i < locals->length(); ++i) {
-    Variable* var = locals->at(i);
+  for (Variable* var : *scope->locals()) {
     switch (var->location()) {
       case VariableLocation::LOCAL: {
         int local_index = var->index() - first_slot_index;
@@ -315,7 +312,7 @@
   int index = kVariablePartIndex;
   DCHECK_EQ(index, scope_info->ParameterNamesIndex());
   DCHECK_EQ(index, scope_info->StackLocalFirstSlotIndex());
-  scope_info->set(index++, Smi::FromInt(0));
+  scope_info->set(index++, Smi::kZero);
   DCHECK_EQ(index, scope_info->StackLocalNamesIndex());
   DCHECK_EQ(index, scope_info->ReceiverInfoIndex());
   DCHECK_EQ(index, scope_info->FunctionNameInfoIndex());
@@ -373,7 +370,7 @@
 
   // Here we add info for context-allocated "this".
   DCHECK_EQ(index, scope_info->ContextLocalNamesIndex());
-  scope_info->set(index++, *isolate->factory()->this_string());
+  scope_info->set(index++, isolate->heap()->this_string());
   DCHECK_EQ(index, scope_info->ContextLocalInfosIndex());
   const uint32_t value = VariableModeField::encode(CONST) |
                          InitFlagField::encode(kCreatedInitialized) |
@@ -647,18 +644,14 @@
   int entry = ModuleVariablesIndex();
   for (int i = 0; i < module_vars_count; ++i) {
     if (*name == get(entry + kModuleVariableNameOffset)) {
-      int index = Smi::cast(get(entry + kModuleVariableIndexOffset))->value();
-      int properties =
-          Smi::cast(get(entry + kModuleVariablePropertiesOffset))->value();
-      *mode = VariableModeField::decode(properties);
-      *init_flag = InitFlagField::decode(properties);
-      *maybe_assigned_flag = MaybeAssignedFlagField::decode(properties);
+      int index;
+      ModuleVariable(i, nullptr, &index, mode, init_flag, maybe_assigned_flag);
       return index;
     }
     entry += kModuleVariableEntryLength;
   }
 
-  return -1;
+  return 0;
 }
 
 int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
@@ -794,6 +787,35 @@
 
 int ScopeInfo::ModuleVariablesIndex() { return ModuleVariableCountIndex() + 1; }
 
+void ScopeInfo::ModuleVariable(int i, String** name, int* index,
+                               VariableMode* mode,
+                               InitializationFlag* init_flag,
+                               MaybeAssignedFlag* maybe_assigned_flag) {
+  DCHECK_LE(0, i);
+  DCHECK_LT(i, Smi::cast(get(ModuleVariableCountIndex()))->value());
+
+  int entry = ModuleVariablesIndex() + i * kModuleVariableEntryLength;
+  int properties =
+      Smi::cast(get(entry + kModuleVariablePropertiesOffset))->value();
+
+  if (name != nullptr) {
+    *name = String::cast(get(entry + kModuleVariableNameOffset));
+  }
+  if (index != nullptr) {
+    *index = Smi::cast(get(entry + kModuleVariableIndexOffset))->value();
+    DCHECK_NE(*index, 0);
+  }
+  if (mode != nullptr) {
+    *mode = VariableModeField::decode(properties);
+  }
+  if (init_flag != nullptr) {
+    *init_flag = InitFlagField::decode(properties);
+  }
+  if (maybe_assigned_flag != nullptr) {
+    *maybe_assigned_flag = MaybeAssignedFlagField::decode(properties);
+  }
+}
+
 #ifdef DEBUG
 
 static void PrintList(const char* list_name,
@@ -843,12 +865,17 @@
                                              Handle<Object> export_name,
                                              Handle<Object> local_name,
                                              Handle<Object> import_name,
-                                             Handle<Object> module_request) {
-  Handle<ModuleInfoEntry> result = isolate->factory()->NewModuleInfoEntry();
-  result->set(kExportNameIndex, *export_name);
-  result->set(kLocalNameIndex, *local_name);
-  result->set(kImportNameIndex, *import_name);
-  result->set(kModuleRequestIndex, *module_request);
+                                             int module_request, int cell_index,
+                                             int beg_pos, int end_pos) {
+  Handle<ModuleInfoEntry> result = Handle<ModuleInfoEntry>::cast(
+      isolate->factory()->NewStruct(MODULE_INFO_ENTRY_TYPE));
+  result->set_export_name(*export_name);
+  result->set_local_name(*local_name);
+  result->set_import_name(*import_name);
+  result->set_module_request(module_request);
+  result->set_cell_index(cell_index);
+  result->set_beg_pos(beg_pos);
+  result->set_end_pos(end_pos);
   return result;
 }
 
@@ -867,7 +894,8 @@
   {
     int i = 0;
     for (auto entry : descr->special_exports()) {
-      special_exports->set(i++, *entry->Serialize(isolate));
+      Handle<ModuleInfoEntry> serialized_entry = entry->Serialize(isolate);
+      special_exports->set(i++, *serialized_entry);
     }
   }
 
@@ -877,7 +905,8 @@
   {
     int i = 0;
     for (auto entry : descr->namespace_imports()) {
-      namespace_imports->set(i++, *entry->Serialize(isolate));
+      Handle<ModuleInfoEntry> serialized_entry = entry->Serialize(isolate);
+      namespace_imports->set(i++, *serialized_entry);
     }
   }
 
@@ -891,7 +920,9 @@
   {
     int i = 0;
     for (const auto& elem : descr->regular_imports()) {
-      regular_imports->set(i++, *elem.second->Serialize(isolate));
+      Handle<ModuleInfoEntry> serialized_entry =
+          elem.second->Serialize(isolate);
+      regular_imports->set(i++, *serialized_entry);
     }
   }
 
@@ -904,5 +935,41 @@
   return result;
 }
 
+int ModuleInfo::RegularExportCount() const {
+  DCHECK_EQ(regular_exports()->length() % kRegularExportLength, 0);
+  return regular_exports()->length() / kRegularExportLength;
+}
+
+String* ModuleInfo::RegularExportLocalName(int i) const {
+  return String::cast(regular_exports()->get(i * kRegularExportLength +
+                                             kRegularExportLocalNameOffset));
+}
+
+int ModuleInfo::RegularExportCellIndex(int i) const {
+  return Smi::cast(regular_exports()->get(i * kRegularExportLength +
+                                          kRegularExportCellIndexOffset))
+      ->value();
+}
+
+FixedArray* ModuleInfo::RegularExportExportNames(int i) const {
+  return FixedArray::cast(regular_exports()->get(
+      i * kRegularExportLength + kRegularExportExportNamesOffset));
+}
+
+Handle<ModuleInfoEntry> ModuleInfo::LookupRegularImport(
+    Handle<ModuleInfo> info, Handle<String> local_name) {
+  Isolate* isolate = info->GetIsolate();
+  Handle<FixedArray> regular_imports(info->regular_imports(), isolate);
+  for (int i = 0, n = regular_imports->length(); i < n; ++i) {
+    Handle<ModuleInfoEntry> entry(
+        ModuleInfoEntry::cast(regular_imports->get(i)), isolate);
+    if (String::cast(entry->local_name())->Equals(*local_name)) {
+      return entry;
+    }
+  }
+  UNREACHABLE();
+  return Handle<ModuleInfoEntry>();
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/ast/scopes.cc b/src/ast/scopes.cc
index c531ef5..c1679a4 100644
--- a/src/ast/scopes.cc
+++ b/src/ast/scopes.cc
@@ -96,8 +96,6 @@
     : zone_(zone),
       outer_scope_(nullptr),
       variables_(zone),
-      locals_(4, zone),
-      decls_(4, zone),
       scope_type_(SCRIPT_SCOPE) {
   SetDefaults();
 }
@@ -106,8 +104,6 @@
     : zone_(zone),
       outer_scope_(outer_scope),
       variables_(zone),
-      locals_(4, zone),
-      decls_(4, zone),
       scope_type_(scope_type) {
   DCHECK_NE(SCRIPT_SCOPE, scope_type);
   SetDefaults();
@@ -121,8 +117,8 @@
     : outer_scope_(scope),
       top_inner_scope_(scope->inner_scope_),
       top_unresolved_(scope->unresolved_),
-      top_local_(scope->GetClosureScope()->locals_.length()),
-      top_decl_(scope->GetClosureScope()->decls_.length()) {}
+      top_local_(scope->GetClosureScope()->locals_.end()),
+      top_decl_(scope->GetClosureScope()->decls_.end()) {}
 
 DeclarationScope::DeclarationScope(Zone* zone,
                                    AstValueFactory* ast_value_factory)
@@ -164,7 +160,7 @@
                          AstValueFactory* avfactory)
     : DeclarationScope(avfactory->zone(), MODULE_SCOPE, scope_info) {
   Zone* zone = avfactory->zone();
-  ModuleInfo* module_info = scope_info->ModuleDescriptorInfo();
+  Handle<ModuleInfo> module_info(scope_info->ModuleDescriptorInfo(), isolate);
 
   set_language_mode(STRICT);
   module_descriptor_ = new (zone) ModuleDescriptor(zone);
@@ -181,9 +177,8 @@
   }
 
   // Deserialize regular exports.
-  Handle<FixedArray> regular_exports(module_info->regular_exports(), isolate);
   module_descriptor_->DeserializeRegularExports(isolate, avfactory,
-                                                regular_exports);
+                                                module_info);
 
   // Deserialize namespace imports.
   Handle<FixedArray> namespace_imports(module_info->namespace_imports(),
@@ -211,8 +206,6 @@
     : zone_(zone),
       outer_scope_(nullptr),
       variables_(zone),
-      locals_(0, zone),
-      decls_(0, zone),
       scope_info_(scope_info),
       scope_type_(scope_type) {
   DCHECK(!scope_info.is_null());
@@ -241,8 +234,6 @@
     : zone_(zone),
       outer_scope_(nullptr),
       variables_(zone),
-      locals_(0, zone),
-      decls_(0, zone),
       scope_info_(scope_info),
       scope_type_(CATCH_SCOPE) {
   SetDefaults();
@@ -271,7 +262,8 @@
   function_ = nullptr;
   arguments_ = nullptr;
   this_function_ = nullptr;
-  arity_ = 0;
+  should_eager_compile_ = false;
+  is_lazily_parsed_ = false;
 }
 
 void Scope::SetDefaults() {
@@ -301,8 +293,6 @@
   force_context_allocation_ = false;
 
   is_declaration_scope_ = false;
-
-  is_lazily_parsed_ = false;
 }
 
 bool Scope::HasSimpleParameters() {
@@ -310,6 +300,14 @@
   return !scope->is_function_scope() || scope->has_simple_parameters();
 }
 
+bool DeclarationScope::ShouldEagerCompile() const {
+  return force_eager_compilation_ || should_eager_compile_;
+}
+
+void DeclarationScope::set_should_eager_compile() {
+  should_eager_compile_ = !is_lazily_parsed_;
+}
+
 void DeclarationScope::set_asm_module() {
   asm_module_ = true;
   // Mark any existing inner function scopes as asm function scopes.
@@ -552,6 +550,9 @@
          scope->outer_scope()->scope_type() == SCRIPT_SCOPE ||
          scope->outer_scope()->already_resolved_);
 
+  // The outer scope is never lazy.
+  scope->set_should_eager_compile();
+
   scope->AllocateVariables(info, mode);
 
   // Ensuring that the outer script scope has a scope info avoids having
@@ -707,13 +708,32 @@
   return NULL;
 }
 
+void DeclarationScope::AddLocal(Variable* var) {
+  DCHECK(!already_resolved_);
+  // Temporaries are only placed in ClosureScopes.
+  DCHECK_EQ(GetClosureScope(), this);
+  locals_.Add(var);
+}
+
+Variable* Scope::Declare(Zone* zone, Scope* scope, const AstRawString* name,
+                         VariableMode mode, VariableKind kind,
+                         InitializationFlag initialization_flag,
+                         MaybeAssignedFlag maybe_assigned_flag) {
+  bool added;
+  Variable* var =
+      variables_.Declare(zone, scope, name, mode, kind, initialization_flag,
+                         maybe_assigned_flag, &added);
+  if (added) locals_.Add(var);
+  return var;
+}
+
 void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const {
   DCHECK_EQ(new_parent, outer_scope_->inner_scope_);
   DCHECK_EQ(new_parent->outer_scope_, outer_scope_);
   DCHECK_EQ(new_parent, new_parent->GetClosureScope());
   DCHECK_NULL(new_parent->inner_scope_);
   DCHECK_NULL(new_parent->unresolved_);
-  DCHECK_EQ(0, new_parent->locals_.length());
+  DCHECK(new_parent->locals_.is_empty());
   Scope* inner_scope = new_parent->sibling_;
   if (inner_scope != top_inner_scope_) {
     for (; inner_scope->sibling() != top_inner_scope_;
@@ -745,13 +765,13 @@
   // name in the closure-scope. See
   // test/mjsunit/harmony/default-parameter-do-expression.js.
   DeclarationScope* outer_closure = outer_scope_->GetClosureScope();
-  for (int i = top_local_; i < outer_closure->locals_.length(); i++) {
-    Variable* local = outer_closure->locals_.at(i);
+
+  new_parent->locals_.MoveTail(outer_closure->locals(), top_local_);
+  for (Variable* local : new_parent->locals_) {
     DCHECK(local->mode() == TEMPORARY || local->mode() == VAR);
     DCHECK_EQ(local->scope(), local->scope()->GetClosureScope());
     DCHECK_NE(local->scope(), new_parent);
     local->set_scope(new_parent);
-    new_parent->AddLocal(local);
     if (local->mode() == VAR) {
       outer_closure->variables_.Remove(local);
       new_parent->variables_.Add(new_parent->zone(), local);
@@ -787,20 +807,29 @@
   // There should be no local slot with the given name.
   DCHECK_LT(scope_info_->StackSlotIndex(*name_handle), 0);
 
+  bool found = false;
+
+  VariableLocation location;
+  int index;
   VariableMode mode;
   InitializationFlag init_flag;
   MaybeAssignedFlag maybe_assigned_flag;
 
-  VariableLocation location = VariableLocation::CONTEXT;
-  int index = ScopeInfo::ContextSlotIndex(scope_info_, name_handle, &mode,
-                                          &init_flag, &maybe_assigned_flag);
-  if (index < 0 && scope_type() == MODULE_SCOPE) {
+  {
+    location = VariableLocation::CONTEXT;
+    index = ScopeInfo::ContextSlotIndex(scope_info_, name_handle, &mode,
+                                        &init_flag, &maybe_assigned_flag);
+    found = index >= 0;
+  }
+
+  if (!found && scope_type() == MODULE_SCOPE) {
     location = VariableLocation::MODULE;
     index = scope_info_->ModuleIndex(name_handle, &mode, &init_flag,
                                      &maybe_assigned_flag);
+    found = index != 0;
   }
 
-  if (index < 0) {
+  if (!found) {
     index = scope_info_->FunctionContextSlotIndex(*name_handle);
     if (index < 0) return nullptr;  // Nowhere found.
     Variable* var = AsDeclarationScope()->DeclareFunctionVar(name);
@@ -849,9 +878,6 @@
     // TODO(wingo): Avoid O(n^2) check.
     *is_duplicate = IsDeclaredParameter(name);
   }
-  if (!is_optional && !is_rest && arity_ == params_.length()) {
-    ++arity_;
-  }
   has_rest_ = is_rest;
   params_.Add(var, zone());
   if (name == ast_value_factory->arguments_string()) {
@@ -971,22 +997,20 @@
   // same variable if it is declared several times. This is not a
   // semantic issue, but it may be a performance issue since it may
   // lead to repeated DeclareEvalVar or DeclareEvalFunction calls.
-  decls_.Add(declaration, zone());
+  decls_.Add(declaration);
   proxy->BindTo(var);
   return var;
 }
 
 VariableProxy* Scope::NewUnresolved(AstNodeFactory* factory,
                                     const AstRawString* name,
-                                    int start_position, int end_position,
-                                    VariableKind kind) {
+                                    int start_position, VariableKind kind) {
   // Note that we must not share the unresolved variables with
   // the same name because they may be removed selectively via
   // RemoveUnresolved().
   DCHECK(!already_resolved_);
   DCHECK_EQ(!needs_migration_, factory->zone() == zone());
-  VariableProxy* proxy =
-      factory->NewVariableProxy(name, kind, start_position, end_position);
+  VariableProxy* proxy = factory->NewVariableProxy(name, kind, start_position);
   proxy->set_next_unresolved(unresolved_);
   unresolved_ = proxy;
   return proxy;
@@ -1027,7 +1051,7 @@
 }
 
 bool Scope::RemoveUnresolved(const AstRawString* name) {
-  if (unresolved_->raw_name() == name) {
+  if (unresolved_ != nullptr && unresolved_->raw_name() == name) {
     VariableProxy* removed = unresolved_;
     unresolved_ = unresolved_->next_unresolved();
     removed->set_next_unresolved(nullptr);
@@ -1036,7 +1060,7 @@
   VariableProxy* current = unresolved_;
   while (current != nullptr) {
     VariableProxy* next = current->next_unresolved();
-    if (next->raw_name() == name) {
+    if (next != nullptr && next->raw_name() == name) {
       current->set_next_unresolved(next->next_unresolved());
       next->set_next_unresolved(nullptr);
       return true;
@@ -1055,9 +1079,7 @@
 }
 
 Declaration* Scope::CheckConflictingVarDeclarations() {
-  int length = decls_.length();
-  for (int i = 0; i < length; i++) {
-    Declaration* decl = decls_[i];
+  for (Declaration* decl : decls_) {
     VariableMode mode = decl->proxy()->var()->mode();
     if (IsLexicalVariableMode(mode) && !is_block_scope()) continue;
 
@@ -1092,10 +1114,8 @@
       // Conflict; find and return its declaration.
       DCHECK(IsLexicalVariableMode(var->mode()));
       const AstRawString* name = names.at(i);
-      for (int j = 0; j < decls_.length(); ++j) {
-        if (decls_[j]->proxy()->raw_name() == name) {
-          return decls_[j];
-        }
+      for (Declaration* decl : decls_) {
+        if (decl->proxy()->raw_name() == name) return decl;
       }
       DCHECK(false);
     }
@@ -1104,16 +1124,20 @@
 }
 
 void DeclarationScope::AllocateVariables(ParseInfo* info, AnalyzeMode mode) {
+  // Module variables must be allocated before variable resolution
+  // to ensure that AccessNeedsHoleCheck() can detect import variables.
+  if (is_module_scope()) AsModuleScope()->AllocateModuleVariables();
+
   ResolveVariablesRecursively(info);
   AllocateVariablesRecursively();
 
   MaybeHandle<ScopeInfo> outer_scope;
-  for (const Scope* s = outer_scope_; s != nullptr; s = s->outer_scope_) {
-    if (s->scope_info_.is_null()) continue;
-    outer_scope = s->scope_info_;
-    break;
+  if (outer_scope_ != nullptr) outer_scope = outer_scope_->scope_info_;
+
+  AllocateScopeInfosRecursively(info->isolate(), outer_scope);
+  if (mode == AnalyzeMode::kDebugger) {
+    AllocateDebuggerScopeInfos(info->isolate(), outer_scope);
   }
-  AllocateScopeInfosRecursively(info->isolate(), mode, outer_scope);
   // The debugger expects all shared function infos to contain a scope info.
   // Since the top-most scope will end up in a shared function info, make sure
   // it has one, even if it doesn't need a scope info.
@@ -1123,14 +1147,29 @@
   }
 }
 
-bool Scope::AllowsLazyParsingWithoutUnresolvedVariables() const {
-  // If we are inside a block scope, we must find unresolved variables in the
-  // inner scopes to find out how to allocate variables on the block scope. At
-  // this point, declarations may not have yet been parsed.
-  for (const Scope* s = this; s != nullptr; s = s->outer_scope_) {
-    if (s->is_block_scope()) return false;
-    // TODO(marja): Refactor parsing modes: also add s->is_function_scope()
-    // here.
+bool Scope::AllowsLazyParsingWithoutUnresolvedVariables(
+    const Scope* outer) const {
+  // If none of the outer scopes need to decide whether to context allocate
+  // specific variables, we can preparse inner functions without unresolved
+  // variables. Otherwise we need to find unresolved variables to force context
+  // allocation of the matching declarations. We can stop at the outer scope for
+  // the parse, since context allocation of those variables is already
+  // guaranteed to be correct.
+  for (const Scope* s = this; s != outer; s = s->outer_scope_) {
+    // Eval forces context allocation on all outer scopes, so we don't need to
+    // look at those scopes. Sloppy eval makes all top-level variables dynamic,
+    // whereas strict-mode requires context allocation.
+    if (s->is_eval_scope()) return !is_strict(s->language_mode());
+    // Catch scopes force context allocation of all variables.
+    if (s->is_catch_scope()) continue;
+    // With scopes do not introduce variables that need allocation.
+    if (s->is_with_scope()) continue;
+    // If everything is guaranteed to be context allocated we can ignore the
+    // scope.
+    if (s->has_forced_context_allocation()) continue;
+    // Only block scopes and function scopes should disallow preparsing.
+    DCHECK(s->is_block_scope() || s->is_function_scope());
+    return false;
   }
   return true;
 }
@@ -1139,17 +1178,6 @@
   return !force_eager_compilation_;
 }
 
-bool DeclarationScope::AllowsLazyCompilationWithoutContext() const {
-  if (force_eager_compilation_) return false;
-  // Disallow lazy compilation without context if any outer scope needs a
-  // context.
-  for (const Scope* scope = outer_scope_; scope != nullptr;
-       scope = scope->outer_scope_) {
-    if (scope->NeedsContext()) return false;
-  }
-  return true;
-}
-
 int Scope::ContextChainLength(Scope* scope) const {
   int n = 0;
   for (const Scope* s = this; s != scope; s = s->outer_scope_) {
@@ -1175,6 +1203,7 @@
 int Scope::MaxNestedContextChainLength() {
   int max_context_chain_length = 0;
   for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
+    if (scope->is_function_scope()) continue;
     max_context_chain_length = std::max(scope->MaxNestedContextChainLength(),
                                         max_context_chain_length);
   }
@@ -1192,6 +1221,14 @@
   return scope->AsDeclarationScope();
 }
 
+const DeclarationScope* Scope::GetClosureScope() const {
+  const Scope* scope = this;
+  while (!scope->is_declaration_scope() || scope->is_block_scope()) {
+    scope = scope->outer_scope();
+  }
+  return scope->AsDeclarationScope();
+}
+
 DeclarationScope* Scope::GetClosureScope() {
   Scope* scope = this;
   while (!scope->is_declaration_scope() || scope->is_block_scope()) {
@@ -1200,6 +1237,15 @@
   return scope->AsDeclarationScope();
 }
 
+bool Scope::NeedsScopeInfo() const {
+  DCHECK(!already_resolved_);
+  DCHECK(GetClosureScope()->ShouldEagerCompile());
+  // The debugger expects all functions to have scope infos.
+  // TODO(jochen|yangguo): Remove this requirement.
+  if (is_function_scope()) return true;
+  return NeedsContext();
+}
+
 ModuleScope* Scope::GetModuleScope() {
   Scope* scope = this;
   DCHECK(!scope->is_script_scope());
@@ -1243,32 +1289,17 @@
   DCHECK(is_function_scope());
 
   // Reset all non-trivial members.
-  decls_.Rewind(0);
-  locals_.Rewind(0);
+  params_.Clear();
+  decls_.Clear();
+  locals_.Clear();
   sloppy_block_function_map_.Clear();
   variables_.Clear();
   // Make sure we won't walk the scope tree from here on.
   inner_scope_ = nullptr;
   unresolved_ = nullptr;
 
-  // TODO(verwaest): We should properly preparse the parameters (no declarations
-  // should be created), and reparse on abort.
-  if (aborted) {
-    if (!IsArrowFunction(function_kind_)) {
-      DeclareDefaultFunctionVariables(ast_value_factory);
-    }
-    // Recreate declarations for parameters.
-    for (int i = 0; i < params_.length(); i++) {
-      Variable* var = params_[i];
-      if (var->mode() == TEMPORARY) {
-        locals_.Add(var, zone());
-      } else if (variables_.Lookup(var->raw_name()) == nullptr) {
-        variables_.Add(zone(), var);
-        locals_.Add(var, zone());
-      }
-    }
-  } else {
-    params_.Rewind(0);
+  if (aborted && !IsArrowFunction(function_kind_)) {
+    DeclareDefaultFunctionVariables(ast_value_factory);
   }
 
 #ifdef DEBUG
@@ -1378,9 +1409,9 @@
       PrintF("forced context allocation");
       comma = true;
     }
-    if (var->maybe_assigned() == kMaybeAssigned) {
+    if (var->maybe_assigned() == kNotAssigned) {
       if (comma) PrintF(", ");
-      PrintF("maybe assigned");
+      PrintF("never assigned");
     }
     PrintF("\n");
   }
@@ -1454,7 +1485,11 @@
     Indent(n1, "// scope uses 'super' property\n");
   }
   if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
-  if (is_lazily_parsed_) Indent(n1, "// lazily parsed\n");
+  if (is_declaration_scope()) {
+    DeclarationScope* scope = AsDeclarationScope();
+    if (scope->is_lazily_parsed()) Indent(n1, "// lazily parsed\n");
+    if (scope->ShouldEagerCompile()) Indent(n1, "// will be compiled\n");
+  }
   if (num_stack_slots_ > 0) {
     Indent(n1, "// ");
     PrintF("%d stack slots\n", num_stack_slots_);
@@ -1491,8 +1526,7 @@
 }
 
 void Scope::CheckScopePositions() {
-  // A scope is allowed to have invalid positions if it is hidden and has no
-  // inner scopes
+  // Visible leaf scopes must have real positions.
   if (!is_hidden() && inner_scope_ == nullptr) {
     CHECK_NE(kNoSourcePosition, start_position());
     CHECK_NE(kNoSourcePosition, end_position());
@@ -1632,6 +1666,59 @@
   }
 }
 
+namespace {
+
+bool AccessNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
+  if (!var->binding_needs_init()) {
+    return false;
+  }
+
+  // It's impossible to eliminate module import hole checks here, because it's
+  // unknown at compilation time whether the binding referred to in the
+  // exporting module itself requires hole checks.
+  if (var->location() == VariableLocation::MODULE && !var->IsExport()) {
+    return true;
+  }
+
+  // Check if the binding really needs an initialization check. The check
+  // can be skipped in the following situation: we have a LET or CONST
+  // binding, both the Variable and the VariableProxy have the same
+  // declaration scope (i.e. they are both in global code, in the
+  // same function or in the same eval code), the VariableProxy is in
+  // the source physically located after the initializer of the variable,
+  // and that the initializer cannot be skipped due to a nonlinear scope.
+  //
+  // The condition on the declaration scopes is a conservative check for
+  // nested functions that access a binding and are called before the
+  // binding is initialized:
+  //   function() { f(); let x = 1; function f() { x = 2; } }
+  //
+  // The check cannot be skipped on non-linear scopes, namely switch
+  // scopes, to ensure tests are done in cases like the following:
+  //   switch (1) { case 0: let x = 2; case 1: f(x); }
+  // The scope of the variable needs to be checked, in case the use is
+  // in a sub-block which may be linear.
+  if (var->scope()->GetDeclarationScope() != scope->GetDeclarationScope()) {
+    return true;
+  }
+
+  if (var->is_this()) {
+    DCHECK(
+        IsSubclassConstructor(scope->GetDeclarationScope()->function_kind()));
+    // TODO(littledan): implement 'this' hole check elimination.
+    return true;
+  }
+
+  // We should always have valid source positions.
+  DCHECK(var->initializer_position() != kNoSourcePosition);
+  DCHECK(proxy->position() != kNoSourcePosition);
+
+  return var->scope()->is_nonlinear() ||
+         var->initializer_position() >= proxy->position();
+}
+
+}  // anonymous namespace
+
 void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
 #ifdef DEBUG
   if (info->script_is_native()) {
@@ -1656,6 +1743,7 @@
 
   DCHECK_NOT_NULL(var);
   if (proxy->is_assigned()) var->set_maybe_assigned();
+  if (AccessNeedsHoleCheck(var, proxy, this)) proxy->set_needs_hole_check();
   proxy->BindTo(var);
 }
 
@@ -1833,8 +1921,8 @@
 }
 
 void Scope::AllocateNonParameterLocalsAndDeclaredGlobals() {
-  for (int i = 0; i < locals_.length(); i++) {
-    AllocateNonParameterLocal(locals_[i]);
+  for (Variable* local : locals_) {
+    AllocateNonParameterLocal(local);
   }
 
   if (is_declaration_scope()) {
@@ -1866,13 +1954,14 @@
 void ModuleScope::AllocateModuleVariables() {
   for (const auto& it : module()->regular_imports()) {
     Variable* var = LookupLocal(it.first);
-    // TODO(neis): Use a meaningful index.
-    var->AllocateTo(VariableLocation::MODULE, 42);
+    var->AllocateTo(VariableLocation::MODULE, it.second->cell_index);
+    DCHECK(!var->IsExport());
   }
 
   for (const auto& it : module()->regular_exports()) {
     Variable* var = LookupLocal(it.first);
-    var->AllocateTo(VariableLocation::MODULE, 0);
+    var->AllocateTo(VariableLocation::MODULE, it.second->cell_index);
+    DCHECK(var->IsExport());
   }
 }
 
@@ -1880,7 +1969,9 @@
   DCHECK(!already_resolved_);
   DCHECK_EQ(0, num_stack_slots_);
   // Don't allocate variables of preparsed scopes.
-  if (is_lazily_parsed_) return;
+  if (is_declaration_scope() && AsDeclarationScope()->is_lazily_parsed()) {
+    return;
+  }
 
   // Allocate variables for inner scopes.
   for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
@@ -1893,9 +1984,7 @@
   // Allocate variables for this scope.
   // Parameters must be allocated first, if any.
   if (is_declaration_scope()) {
-    if (is_module_scope()) {
-      AsModuleScope()->AllocateModuleVariables();
-    } else if (is_function_scope()) {
+    if (is_function_scope()) {
       AsDeclarationScope()->AllocateParameterLocals();
     }
     AsDeclarationScope()->AllocateReceiver();
@@ -1921,21 +2010,36 @@
   DCHECK(num_heap_slots_ == 0 || num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS);
 }
 
-void Scope::AllocateScopeInfosRecursively(Isolate* isolate, AnalyzeMode mode,
+void Scope::AllocateScopeInfosRecursively(Isolate* isolate,
                                           MaybeHandle<ScopeInfo> outer_scope) {
   DCHECK(scope_info_.is_null());
-  if (mode == AnalyzeMode::kDebugger || NeedsScopeInfo()) {
-    scope_info_ = ScopeInfo::Create(isolate, zone(), this, outer_scope);
-  }
-
-  // The ScopeInfo chain should mirror the context chain, so we only link to
-  // the next outer scope that needs a context.
   MaybeHandle<ScopeInfo> next_outer_scope = outer_scope;
-  if (NeedsContext()) next_outer_scope = scope_info_;
+
+  if (NeedsScopeInfo()) {
+    scope_info_ = ScopeInfo::Create(isolate, zone(), this, outer_scope);
+    // The ScopeInfo chain should mirror the context chain, so we only link to
+    // the next outer scope that needs a context.
+    if (NeedsContext()) next_outer_scope = scope_info_;
+  }
 
   // Allocate ScopeInfos for inner scopes.
   for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
-    scope->AllocateScopeInfosRecursively(isolate, mode, next_outer_scope);
+    if (!scope->is_function_scope() ||
+        scope->AsDeclarationScope()->ShouldEagerCompile()) {
+      scope->AllocateScopeInfosRecursively(isolate, next_outer_scope);
+    }
+  }
+}
+
+void Scope::AllocateDebuggerScopeInfos(Isolate* isolate,
+                                       MaybeHandle<ScopeInfo> outer_scope) {
+  if (scope_info_.is_null()) {
+    scope_info_ = ScopeInfo::Create(isolate, zone(), this, outer_scope);
+  }
+  MaybeHandle<ScopeInfo> outer = NeedsContext() ? scope_info_ : outer_scope;
+  for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
+    if (scope->is_function_scope()) continue;
+    scope->AllocateDebuggerScopeInfos(isolate, outer);
   }
 }
 
diff --git a/src/ast/scopes.h b/src/ast/scopes.h
index 0acff8a..c7d88ac 100644
--- a/src/ast/scopes.h
+++ b/src/ast/scopes.h
@@ -5,6 +5,7 @@
 #ifndef V8_AST_SCOPES_H_
 #define V8_AST_SCOPES_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/base/hashmap.h"
 #include "src/globals.h"
 #include "src/objects.h"
@@ -62,7 +63,7 @@
 // and ModuleScope. DeclarationScope is used for any scope that hosts 'var'
 // declarations. This includes script, module, eval, varblock, and function
 // scope. ModuleScope further specializes DeclarationScope.
-class Scope: public ZoneObject {
+class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   // ---------------------------------------------------------------------------
   // Construction
@@ -95,8 +96,8 @@
     Scope* outer_scope_;
     Scope* top_inner_scope_;
     VariableProxy* top_unresolved_;
-    int top_local_;
-    int top_decl_;
+    ThreadedList<Variable>::Iterator top_local_;
+    ThreadedList<Declaration>::Iterator top_decl_;
   };
 
   enum class DeserializationMode { kIncludingVariables, kScopesOnly };
@@ -157,15 +158,14 @@
                             bool* ok);
 
   // Declarations list.
-  ZoneList<Declaration*>* declarations() { return &decls_; }
+  ThreadedList<Declaration>* declarations() { return &decls_; }
 
-  ZoneList<Variable*>* locals() { return &locals_; }
+  ThreadedList<Variable>* locals() { return &locals_; }
 
   // Create a new unresolved variable.
   VariableProxy* NewUnresolved(AstNodeFactory* factory,
                                const AstRawString* name,
                                int start_position = kNoSourcePosition,
-                               int end_position = kNoSourcePosition,
                                VariableKind kind = NORMAL_VARIABLE);
 
   void AddUnresolved(VariableProxy* proxy);
@@ -351,7 +351,7 @@
 
   // Determine if we can parse a function literal in this scope lazily without
   // caring about the unresolved variables within.
-  bool AllowsLazyParsingWithoutUnresolvedVariables() const;
+  bool AllowsLazyParsingWithoutUnresolvedVariables(const Scope* outer) const;
 
   // The number of contexts between this and scope; zero if this == scope.
   int ContextChainLength(Scope* scope) const;
@@ -374,6 +374,7 @@
   // the scope for which a function prologue allocates a context) or declaring
   // temporaries.
   DeclarationScope* GetClosureScope();
+  const DeclarationScope* GetClosureScope() const;
 
   // Find the first (non-arrow) function or script scope.  This is where
   // 'this' is bound, and what determines the function kind.
@@ -422,8 +423,6 @@
   void set_is_debug_evaluate_scope() { is_debug_evaluate_scope_ = true; }
   bool is_debug_evaluate_scope() const { return is_debug_evaluate_scope_; }
 
-  bool is_lazily_parsed() const { return is_lazily_parsed_; }
-
  protected:
   explicit Scope(Zone* zone);
 
@@ -435,29 +434,13 @@
   Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name,
                     VariableMode mode, VariableKind kind,
                     InitializationFlag initialization_flag,
-                    MaybeAssignedFlag maybe_assigned_flag = kNotAssigned) {
-    bool added;
-    Variable* var =
-        variables_.Declare(zone, scope, name, mode, kind, initialization_flag,
-                           maybe_assigned_flag, &added);
-    if (added) locals_.Add(var, zone);
-    return var;
-  }
+                    MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
 
   // This method should only be invoked on scopes created during parsing (i.e.,
   // not deserialized from a context). Also, since NeedsContext() is only
   // returning a valid result after variables are resolved, NeedsScopeInfo()
   // should also be invoked after resolution.
-  bool NeedsScopeInfo() const {
-    DCHECK(!already_resolved_);
-    // A lazily parsed scope doesn't contain enough information to create a
-    // ScopeInfo from it.
-    if (is_lazily_parsed_) return false;
-    // The debugger expects all functions to have scope infos.
-    // TODO(jochen|yangguo): Remove this requirement.
-    if (is_function_scope()) return true;
-    return NeedsContext();
-  }
+  bool NeedsScopeInfo() const;
 
   Zone* zone_;
 
@@ -474,13 +457,12 @@
   VariableMap variables_;
   // In case of non-scopeinfo-backed scopes, this contains the variables of the
   // map above in order of addition.
-  // TODO(verwaest): Thread through Variable.
-  ZoneList<Variable*> locals_;
+  ThreadedList<Variable> locals_;
   // Unresolved variables referred to from this scope. The proxies themselves
   // form a linked list of all unresolved proxies.
   VariableProxy* unresolved_;
   // Declarations.
-  ZoneList<Declaration*> decls_;
+  ThreadedList<Declaration> decls_;
 
   // Serialized scope info support.
   Handle<ScopeInfo> scope_info_;
@@ -527,8 +509,6 @@
   // True if it holds 'var' declarations.
   bool is_declaration_scope_ : 1;
 
-  bool is_lazily_parsed_ : 1;
-
   // Create a non-local variable with a given name.
   // These variables are looked up dynamically at runtime.
   Variable* NonLocal(const AstRawString* name, VariableMode mode);
@@ -563,8 +543,10 @@
   void AllocateNonParameterLocalsAndDeclaredGlobals();
   void AllocateVariablesRecursively();
 
-  void AllocateScopeInfosRecursively(Isolate* isolate, AnalyzeMode mode,
+  void AllocateScopeInfosRecursively(Isolate* isolate,
                                      MaybeHandle<ScopeInfo> outer_scope);
+  void AllocateDebuggerScopeInfos(Isolate* isolate,
+                                  MaybeHandle<ScopeInfo> outer_scope);
 
   // Construct a scope based on the scope info.
   Scope(Zone* zone, ScopeType type, Handle<ScopeInfo> scope_info);
@@ -634,6 +616,10 @@
                                         IsClassConstructor(function_kind())));
   }
 
+  bool is_lazily_parsed() const { return is_lazily_parsed_; }
+  bool ShouldEagerCompile() const;
+  void set_should_eager_compile();
+
   void SetScriptScopeInfo(Handle<ScopeInfo> scope_info) {
     DCHECK(is_script_scope());
     DCHECK(scope_info_.is_null());
@@ -704,16 +690,6 @@
     return params_[index];
   }
 
-  // Returns the default function arity excluding default or rest parameters.
-  // This will be used to set the length of the function, by default.
-  // Class field initializers use this property to indicate the number of
-  // fields being initialized.
-  int arity() const { return arity_; }
-
-  // Normal code should not need to call this. Class field initializers use this
-  // property to indicate the number of fields being initialized.
-  void set_arity(int arity) { arity_ = arity; }
-
   // Returns the number of formal parameters, excluding a possible rest
   // parameter.  Examples:
   //   function foo(a, b) {}         ==> 2
@@ -758,12 +734,7 @@
   // Adds a local variable in this scope's locals list. This is for adjusting
   // the scope of temporaries and do-expression vars when desugaring parameter
   // initializers.
-  void AddLocal(Variable* var) {
-    DCHECK(!already_resolved_);
-    // Temporaries are only placed in ClosureScopes.
-    DCHECK_EQ(GetClosureScope(), this);
-    locals_.Add(var, zone());
-  }
+  void AddLocal(Variable* var);
 
   void DeclareSloppyBlockFunction(const AstRawString* name,
                                   SloppyBlockFunctionStatement* statement) {
@@ -796,16 +767,15 @@
   // Determine if we can use lazy compilation for this scope.
   bool AllowsLazyCompilation() const;
 
-  // Determine if we can use lazy compilation for this scope without a context.
-  bool AllowsLazyCompilationWithoutContext() const;
-
   // Make sure this closure and all outer closures are eagerly compiled.
   void ForceEagerCompilation() {
     DCHECK_EQ(this, GetClosureScope());
-    for (DeclarationScope* s = this; !s->is_script_scope();
+    DeclarationScope* s;
+    for (s = this; !s->is_script_scope();
          s = s->outer_scope()->GetClosureScope()) {
       s->force_eager_compilation_ = true;
     }
+    s->force_eager_compilation_ = true;
   }
 
 #ifdef DEBUG
@@ -848,9 +818,9 @@
   bool has_arguments_parameter_ : 1;
   // This scope uses "super" property ('super.foo').
   bool scope_uses_super_property_ : 1;
+  bool should_eager_compile_ : 1;
+  bool is_lazily_parsed_ : 1;
 
-  // Info about the parameter list of a function.
-  int arity_;
   // Parameter list in source order.
   ZoneList<Variable*> params_;
   // Map of function names to lists of functions defined in sloppy blocks
@@ -884,7 +854,7 @@
     return module_descriptor_;
   }
 
-  // Set MODULE as VariableLocation for all variables that will live in some
+  // Set MODULE as VariableLocation for all variables that will live in a
   // module's export table.
   void AllocateModuleVariables();
 
diff --git a/src/ast/variables.cc b/src/ast/variables.cc
index cc269cd..3771bfe 100644
--- a/src/ast/variables.cc
+++ b/src/ast/variables.cc
@@ -19,6 +19,7 @@
     : scope_(scope),
       name_(name),
       local_if_not_shadowed_(nullptr),
+      next_(nullptr),
       index_(-1),
       initializer_position_(kNoSourcePosition),
       bit_field_(MaybeAssignedFlagField::encode(maybe_assigned_flag) |
@@ -40,14 +41,5 @@
          scope_ != NULL && scope_->is_script_scope();
 }
 
-
-bool Variable::IsStaticGlobalObjectProperty() const {
-  // Temporaries are never global, they must always be allocated in the
-  // activation frame.
-  return (IsDeclaredVariableMode(mode()) && !IsLexicalVariableMode(mode())) &&
-         scope_ != NULL && scope_->is_script_scope();
-}
-
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/ast/variables.h b/src/ast/variables.h
index 5bc7869..b7d9226 100644
--- a/src/ast/variables.h
+++ b/src/ast/variables.h
@@ -64,7 +64,6 @@
   bool IsContextSlot() const { return location() == VariableLocation::CONTEXT; }
   bool IsLookupSlot() const { return location() == VariableLocation::LOOKUP; }
   bool IsGlobalObjectProperty() const;
-  bool IsStaticGlobalObjectProperty() const;
 
   bool is_dynamic() const { return IsDynamicVariableMode(mode()); }
   bool binding_needs_init() const {
@@ -102,13 +101,15 @@
   int index() const { return index_; }
 
   bool IsExport() const {
-    DCHECK(location() == VariableLocation::MODULE);
-    return index() == 0;
+    DCHECK_EQ(location(), VariableLocation::MODULE);
+    DCHECK_NE(index(), 0);
+    return index() > 0;
   }
 
   void AllocateTo(VariableLocation location, int index) {
     DCHECK(IsUnallocated() ||
            (this->location() == location && this->index() == index));
+    DCHECK_IMPLIES(location == VariableLocation::MODULE, index != 0);
     bit_field_ = LocationField::update(bit_field_, location);
     DCHECK_EQ(location, this->location());
     index_ = index;
@@ -119,6 +120,8 @@
     return mode == VAR ? kCreatedInitialized : kNeedsInitialization;
   }
 
+  typedef ThreadedList<Variable> List;
+
  private:
   Scope* scope_;
   const AstRawString* name_;
@@ -128,6 +131,7 @@
   // sloppy 'eval' calls between the reference scope (inclusive) and the
   // binding scope (exclusive).
   Variable* local_if_not_shadowed_;
+  Variable* next_;
   int index_;
   int initializer_position_;
   uint16_t bit_field_;
@@ -146,6 +150,8 @@
   class MaybeAssignedFlagField
       : public BitField16<MaybeAssignedFlag, InitializationFlagField::kNext,
                           2> {};
+  Variable** next() { return &next_; }
+  friend List;
 };
 }  // namespace internal
 }  // namespace v8
diff --git a/src/background-parsing-task.cc b/src/background-parsing-task.cc
index 83075c1..e0af700 100644
--- a/src/background-parsing-task.cc
+++ b/src/background-parsing-task.cc
@@ -29,24 +29,20 @@
 
   // Prepare the data for the internalization phase and compilation phase, which
   // will happen in the main thread after parsing.
-  Zone* zone = new Zone(isolate->allocator());
+  Zone* zone = new Zone(isolate->allocator(), ZONE_NAME);
   ParseInfo* info = new ParseInfo(zone);
+  info->set_toplevel();
   source->zone.reset(zone);
   source->info.reset(info);
   info->set_isolate(isolate);
   info->set_source_stream(source->source_stream.get());
   info->set_source_stream_encoding(source->encoding);
   info->set_hash_seed(isolate->heap()->HashSeed());
-  info->set_global();
   info->set_unicode_cache(&source_->unicode_cache);
   info->set_compile_options(options);
-  // Parse eagerly with ignition since we will compile eagerly.
-  info->set_allow_lazy_parsing(!(i::FLAG_ignition && i::FLAG_ignition_eager));
+  info->set_allow_lazy_parsing();
 
-  if (options == ScriptCompiler::kProduceParserCache ||
-      options == ScriptCompiler::kProduceCodeCache) {
-    source_->info->set_cached_data(&script_data_);
-  }
+  source_->info->set_cached_data(&script_data_);
   // Parser needs to stay alive for finalizing the parsing on the main
   // thread.
   source_->parser.reset(new Parser(source_->info.get()));
diff --git a/src/bailout-reason.h b/src/bailout-reason.h
index 6b7da16..247024f 100644
--- a/src/bailout-reason.h
+++ b/src/bailout-reason.h
@@ -35,6 +35,7 @@
   V(kBailoutWasNotPrepared, "Bailout was not prepared")                        \
   V(kBothRegistersWereSmisInSelectNonSmi,                                      \
     "Both registers were smis in SelectNonSmi")                                \
+  V(kClassConstructorFunction, "Class constructor function")                   \
   V(kClassLiteral, "Class literal")                                            \
   V(kCodeGenerationFailed, "Code generation failed")                           \
   V(kCodeObjectNotProperlyPatched, "Code object not properly patched")         \
@@ -60,6 +61,8 @@
   V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed")            \
   V(kEmitLoadRegisterUnsupportedDoubleImmediate,                               \
     "EmitLoadRegister: Unsupported double immediate")                          \
+  V(kCyclicObjectStateDetectedInEscapeAnalysis,                                \
+    "Cyclic object state detected by escape analysis")                         \
   V(kEval, "eval")                                                             \
   V(kExpectedAllocationSite, "Expected allocation site")                       \
   V(kExpectedBooleanValue, "Expected boolean value")                           \
@@ -74,7 +77,6 @@
   V(kExpectedPositiveZero, "Expected +0.0")                                    \
   V(kExpectedNewSpaceObject, "Expected new space object")                      \
   V(kExpectedUndefinedOrCell, "Expected undefined or cell in register")        \
-  V(kExpectingAlignmentForCopyBytes, "Expecting alignment for CopyBytes")      \
   V(kExternalStringExpectedButNotFound,                                        \
     "External string expected, but not found")                                 \
   V(kForInStatementWithNonLocalEachVariable,                                   \
@@ -116,10 +118,6 @@
   V(kInvalidLhsInCountOperation, "Invalid lhs in count operation")             \
   V(kInvalidMinLength, "Invalid min_length")                                   \
   V(kInvalidRegisterFileInGenerator, "invalid register file in generator")     \
-  V(kJSGlobalObjectNativeContextShouldBeANativeContext,                        \
-    "JSGlobalObject::native_context should be a native context")               \
-  V(kJSGlobalProxyContextShouldNotBeNull,                                      \
-    "JSGlobalProxy::context() should not be null")                             \
   V(kJSObjectWithFastElementsMapHasSlowElements,                               \
     "JSObject with fast elements map has slow elements")                       \
   V(kLetBindingReInitialization, "Let binding re-initialization")              \
@@ -176,6 +174,7 @@
   V(kReferenceToAVariableWhichRequiresDynamicLookup,                           \
     "Reference to a variable which requires dynamic lookup")                   \
   V(kReferenceToGlobalLexicalVariable, "Reference to global lexical variable") \
+  V(kReferenceToModuleVariable, "Reference to module-allocated variable")      \
   V(kReferenceToUninitializedVariable, "Reference to uninitialized variable")  \
   V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root")  \
   V(kRegisterWasClobbered, "Register was clobbered")                           \
@@ -247,6 +246,7 @@
   V(kUnsupportedLetCompoundAssignment, "Unsupported let compound assignment")  \
   V(kUnsupportedLookupSlotInDeclaration,                                       \
     "Unsupported lookup slot in declaration")                                  \
+  V(kUnsupportedModuleOperation, "Unsupported module operation")               \
   V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare")      \
   V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments")         \
   V(kUnsupportedPhiUseOfConstVariable,                                         \
@@ -256,8 +256,6 @@
   V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate")               \
   V(kUnstableConstantTypeHeapObject, "Unstable constant-type heap object")     \
   V(kVariableResolvedToWithContext, "Variable resolved to with context")       \
-  V(kWeShouldNotHaveAnEmptyLexicalContext,                                     \
-    "We should not have an empty lexical context")                             \
   V(kWithStatement, "WithStatement")                                           \
   V(kWrongFunctionContext, "Wrong context passed to function")                 \
   V(kWrongAddressOrValuePassedToRecordWrite,                                   \
diff --git a/src/base/atomic-utils.h b/src/base/atomic-utils.h
index 31db603..f40853c 100644
--- a/src/base/atomic-utils.h
+++ b/src/base/atomic-utils.h
@@ -51,6 +51,60 @@
   base::AtomicWord value_;
 };
 
+// This type uses no barrier accessors to change atomic word. Be careful with
+// data races.
+template <typename T>
+class NoBarrierAtomicValue {
+ public:
+  NoBarrierAtomicValue() : value_(0) {}
+
+  explicit NoBarrierAtomicValue(T initial)
+      : value_(cast_helper<T>::to_storage_type(initial)) {}
+
+  static NoBarrierAtomicValue* FromAddress(void* address) {
+    return reinterpret_cast<base::NoBarrierAtomicValue<T>*>(address);
+  }
+
+  V8_INLINE bool TrySetValue(T old_value, T new_value) {
+    return base::NoBarrier_CompareAndSwap(
+               &value_, cast_helper<T>::to_storage_type(old_value),
+               cast_helper<T>::to_storage_type(new_value)) ==
+           cast_helper<T>::to_storage_type(old_value);
+  }
+
+  V8_INLINE T Value() const {
+    return cast_helper<T>::to_return_type(base::NoBarrier_Load(&value_));
+  }
+
+  V8_INLINE void SetValue(T new_value) {
+    base::NoBarrier_Store(&value_, cast_helper<T>::to_storage_type(new_value));
+  }
+
+ private:
+  STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
+
+  template <typename S>
+  struct cast_helper {
+    static base::AtomicWord to_storage_type(S value) {
+      return static_cast<base::AtomicWord>(value);
+    }
+    static S to_return_type(base::AtomicWord value) {
+      return static_cast<S>(value);
+    }
+  };
+
+  template <typename S>
+  struct cast_helper<S*> {
+    static base::AtomicWord to_storage_type(S* value) {
+      return reinterpret_cast<base::AtomicWord>(value);
+    }
+    static S* to_return_type(base::AtomicWord value) {
+      return reinterpret_cast<S*>(value);
+    }
+  };
+
+  base::AtomicWord value_;
+};
 
 // Flag using T atomically. Also accepts void* as T.
 template <typename T>
@@ -73,7 +127,7 @@
   }
 
   V8_INLINE void SetBits(T bits, T mask) {
-    DCHECK_EQ(bits & ~mask, 0);
+    DCHECK_EQ(bits & ~mask, static_cast<T>(0));
     T old_value;
     T new_value;
     do {
diff --git a/src/base/atomicops.h b/src/base/atomicops.h
index 973e96b..927ebbe 100644
--- a/src/base/atomicops.h
+++ b/src/base/atomicops.h
@@ -26,9 +26,17 @@
 #define V8_BASE_ATOMICOPS_H_
 
 #include <stdint.h>
+
+// Small C++ header which defines implementation specific macros used to
+// identify the STL implementation.
+// - libc++: captures __config for _LIBCPP_VERSION
+// - libstdc++: captures bits/c++config.h for __GLIBCXX__
+#include <cstddef>
+
+#include "src/base/base-export.h"
 #include "src/base/build_config.h"
 
-#if defined(_WIN32) && defined(V8_HOST_ARCH_64_BIT)
+#if defined(V8_OS_WIN) && defined(V8_HOST_ARCH_64_BIT)
 // windows.h #defines this (only on x64). This causes problems because the
 // public API also uses MemoryBarrier at the public name for this fence. So, on
 // X64, undef it, and call its documented
@@ -100,13 +108,11 @@
 void MemoryBarrier();
 void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value);
 void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
-void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
 void Release_Store(volatile Atomic32* ptr, Atomic32 value);
 
 Atomic8 NoBarrier_Load(volatile const Atomic8* ptr);
 Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
 Atomic32 Acquire_Load(volatile const Atomic32* ptr);
-Atomic32 Release_Load(volatile const Atomic32* ptr);
 
 // 64-bit atomic operations (only available on 64-bit processors).
 #ifdef V8_HOST_ARCH_64_BIT
@@ -124,44 +130,25 @@
                                 Atomic64 old_value,
                                 Atomic64 new_value);
 void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
-void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
 void Release_Store(volatile Atomic64* ptr, Atomic64 value);
 Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
 Atomic64 Acquire_Load(volatile const Atomic64* ptr);
-Atomic64 Release_Load(volatile const Atomic64* ptr);
 #endif  // V8_HOST_ARCH_64_BIT
 
 }  // namespace base
 }  // namespace v8
 
-// Include our platform specific implementation.
-#if defined(THREAD_SANITIZER)
-#include "src/base/atomicops_internals_tsan.h"
-#elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+#if defined(V8_OS_WIN)
+// TODO(hpayer): The MSVC header includes windows.h, which other files end up
+//               relying on. Fix this as part of crbug.com/559247.
 #include "src/base/atomicops_internals_x86_msvc.h"
-#elif defined(__APPLE__)
-#include "src/base/atomicops_internals_mac.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_ARM64
-#include "src/base/atomicops_internals_arm64_gcc.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
-#include "src/base/atomicops_internals_arm_gcc.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_PPC
-#include "src/base/atomicops_internals_ppc_gcc.h"
-#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
-#include "src/base/atomicops_internals_x86_gcc.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
-#include "src/base/atomicops_internals_mips_gcc.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS64
-#include "src/base/atomicops_internals_mips64_gcc.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_S390
-#include "src/base/atomicops_internals_s390_gcc.h"
 #else
-#error "Atomic operations are not supported on your platform"
+#include "src/base/atomicops_internals_portable.h"
 #endif
 
 // On some platforms we need additional declarations to make
 // AtomicWord compatible with our other Atomic* types.
-#if defined(__APPLE__) || defined(__OpenBSD__) || defined(V8_OS_AIX)
+#if defined(V8_OS_MACOSX) || defined(V8_OS_OPENBSD) || defined(V8_OS_AIX)
 #include "src/base/atomicops_internals_atomicword_compat.h"
 #endif
 
diff --git a/src/base/atomicops_internals_arm64_gcc.h b/src/base/atomicops_internals_arm64_gcc.h
deleted file mode 100644
index f24050a..0000000
--- a/src/base/atomicops_internals_arm64_gcc.h
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
-
-namespace v8 {
-namespace base {
-
-inline void MemoryBarrier() {
-  __asm__ __volatile__ ("dmb ish" ::: "memory");  // NOLINT
-}
-
-// NoBarrier versions of the operation include "memory" in the clobber list.
-// This is not required for direct usage of the NoBarrier versions of the
-// operations. However this is required for correctness when they are used as
-// part of the Acquire or Release versions, to ensure that nothing from outside
-// the call is reordered between the operation and the memory barrier. This does
-// not change the code generated, so has no or minimal impact on the
-// NoBarrier operations.
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
-                                         Atomic32 old_value,
-                                         Atomic32 new_value) {
-  Atomic32 prev;
-  int32_t temp;
-
-  __asm__ __volatile__ (  // NOLINT
-    "0:                                    \n\t"
-    "ldxr %w[prev], %[ptr]                 \n\t"  // Load the previous value.
-    "cmp %w[prev], %w[old_value]           \n\t"
-    "bne 1f                                \n\t"
-    "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
-    "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
-    "1:                                    \n\t"
-    : [prev]"=&r" (prev),
-      [temp]"=&r" (temp),
-      [ptr]"+Q" (*ptr)
-    : [old_value]"IJr" (old_value),
-      [new_value]"r" (new_value)
-    : "cc", "memory"
-  );  // NOLINT
-
-  return prev;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
-                                         Atomic32 new_value) {
-  Atomic32 result;
-  int32_t temp;
-
-  __asm__ __volatile__ (  // NOLINT
-    "0:                                    \n\t"
-    "ldxr %w[result], %[ptr]               \n\t"  // Load the previous value.
-    "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
-    "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
-    : [result]"=&r" (result),
-      [temp]"=&r" (temp),
-      [ptr]"+Q" (*ptr)
-    : [new_value]"r" (new_value)
-    : "memory"
-  );  // NOLINT
-
-  return result;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
-                                          Atomic32 increment) {
-  Atomic32 result;
-  int32_t temp;
-
-  __asm__ __volatile__ (  // NOLINT
-    "0:                                       \n\t"
-    "ldxr %w[result], %[ptr]                  \n\t"  // Load the previous value.
-    "add %w[result], %w[result], %w[increment]\n\t"
-    "stxr %w[temp], %w[result], %[ptr]        \n\t"  // Try to store the result.
-    "cbnz %w[temp], 0b                        \n\t"  // Retry on failure.
-    : [result]"=&r" (result),
-      [temp]"=&r" (temp),
-      [ptr]"+Q" (*ptr)
-    : [increment]"IJr" (increment)
-    : "memory"
-  );  // NOLINT
-
-  return result;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
-                                        Atomic32 increment) {
-  Atomic32 result;
-
-  MemoryBarrier();
-  result = NoBarrier_AtomicIncrement(ptr, increment);
-  MemoryBarrier();
-
-  return result;
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  Atomic32 prev;
-
-  prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-  MemoryBarrier();
-
-  return prev;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  Atomic32 prev;
-
-  MemoryBarrier();
-  prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-
-  return prev;
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
-  *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
-  __asm__ __volatile__ (  // NOLINT
-    "stlr %w[value], %[ptr]  \n\t"
-    : [ptr]"=Q" (*ptr)
-    : [value]"r" (value)
-    : "memory"
-  );  // NOLINT
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
-  return *ptr;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
-  return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
-  Atomic32 value;
-
-  __asm__ __volatile__ (  // NOLINT
-    "ldar %w[value], %[ptr]  \n\t"
-    : [value]"=r" (value)
-    : [ptr]"Q" (*ptr)
-    : "memory"
-  );  // NOLINT
-
-  return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-// 64-bit versions of the operations.
-// See the 32-bit versions for comments.
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
-                                         Atomic64 old_value,
-                                         Atomic64 new_value) {
-  Atomic64 prev;
-  int32_t temp;
-
-  __asm__ __volatile__ (  // NOLINT
-    "0:                                    \n\t"
-    "ldxr %[prev], %[ptr]                  \n\t"
-    "cmp %[prev], %[old_value]             \n\t"
-    "bne 1f                                \n\t"
-    "stxr %w[temp], %[new_value], %[ptr]   \n\t"
-    "cbnz %w[temp], 0b                     \n\t"
-    "1:                                    \n\t"
-    : [prev]"=&r" (prev),
-      [temp]"=&r" (temp),
-      [ptr]"+Q" (*ptr)
-    : [old_value]"IJr" (old_value),
-      [new_value]"r" (new_value)
-    : "cc", "memory"
-  );  // NOLINT
-
-  return prev;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
-                                         Atomic64 new_value) {
-  Atomic64 result;
-  int32_t temp;
-
-  __asm__ __volatile__ (  // NOLINT
-    "0:                                    \n\t"
-    "ldxr %[result], %[ptr]                \n\t"
-    "stxr %w[temp], %[new_value], %[ptr]   \n\t"
-    "cbnz %w[temp], 0b                     \n\t"
-    : [result]"=&r" (result),
-      [temp]"=&r" (temp),
-      [ptr]"+Q" (*ptr)
-    : [new_value]"r" (new_value)
-    : "memory"
-  );  // NOLINT
-
-  return result;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
-                                          Atomic64 increment) {
-  Atomic64 result;
-  int32_t temp;
-
-  __asm__ __volatile__ (  // NOLINT
-    "0:                                     \n\t"
-    "ldxr %[result], %[ptr]                 \n\t"
-    "add %[result], %[result], %[increment] \n\t"
-    "stxr %w[temp], %[result], %[ptr]       \n\t"
-    "cbnz %w[temp], 0b                      \n\t"
-    : [result]"=&r" (result),
-      [temp]"=&r" (temp),
-      [ptr]"+Q" (*ptr)
-    : [increment]"IJr" (increment)
-    : "memory"
-  );  // NOLINT
-
-  return result;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
-                                        Atomic64 increment) {
-  Atomic64 result;
-
-  MemoryBarrier();
-  result = NoBarrier_AtomicIncrement(ptr, increment);
-  MemoryBarrier();
-
-  return result;
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  Atomic64 prev;
-
-  prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-  MemoryBarrier();
-
-  return prev;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  Atomic64 prev;
-
-  MemoryBarrier();
-  prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-
-  return prev;
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-  *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
-  __asm__ __volatile__ (  // NOLINT
-    "stlr %x[value], %[ptr]  \n\t"
-    : [ptr]"=Q" (*ptr)
-    : [value]"r" (value)
-    : "memory"
-  );  // NOLINT
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
-  return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
-  Atomic64 value;
-
-  __asm__ __volatile__ (  // NOLINT
-    "ldar %x[value], %[ptr]  \n\t"
-    : [value]"=r" (value)
-    : [ptr]"Q" (*ptr)
-    : "memory"
-  );  // NOLINT
-
-  return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-}  // namespace base
-}  // namespace v8
-
-#endif  // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/src/base/atomicops_internals_arm_gcc.h b/src/base/atomicops_internals_arm_gcc.h
deleted file mode 100644
index 8d049e0..0000000
--- a/src/base/atomicops_internals_arm_gcc.h
+++ /dev/null
@@ -1,304 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-//
-// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
-
-#if defined(__QNXNTO__)
-#include <sys/cpuinline.h>
-#endif
-
-namespace v8 {
-namespace base {
-
-// Memory barriers on ARM are funky, but the kernel is here to help:
-//
-// * ARMv5 didn't support SMP, there is no memory barrier instruction at
-//   all on this architecture, or when targeting its machine code.
-//
-// * Some ARMv6 CPUs support SMP. A full memory barrier can be produced by
-//   writing a random value to a very specific coprocessor register.
-//
-// * On ARMv7, the "dmb" instruction is used to perform a full memory
-//   barrier (though writing to the co-processor will still work).
-//   However, on single core devices (e.g. Nexus One, or Nexus S),
-//   this instruction will take up to 200 ns, which is huge, even though
-//   it's completely un-needed on these devices.
-//
-// * There is no easy way to determine at runtime if the device is
-//   single or multi-core. However, the kernel provides a useful helper
-//   function at a fixed memory address (0xffff0fa0), which will always
-//   perform a memory barrier in the most efficient way. I.e. on single
-//   core devices, this is an empty function that exits immediately.
-//   On multi-core devices, it implements a full memory barrier.
-//
-// * This source could be compiled to ARMv5 machine code that runs on a
-//   multi-core ARMv6 or ARMv7 device. In this case, memory barriers
-//   are needed for correct execution. Always call the kernel helper, even
-//   when targeting ARMv5TE.
-//
-
-inline void MemoryBarrier() {
-#if defined(__ANDROID__)
-  // Note: This is a function call, which is also an implicit compiler barrier.
-  typedef void (*KernelMemoryBarrierFunc)();
-  ((KernelMemoryBarrierFunc)0xffff0fa0)();
-#elif defined(__QNXNTO__)
-  __cpu_membarrier();
-#else
-  // Fallback to GCC built-in function
-  __sync_synchronize();
-#endif
-}
-
-// An ARM toolchain would only define one of these depending on which
-// variant of the target architecture is being used. This tests against
-// any known ARMv6 or ARMv7 variant, where it is possible to directly
-// use ldrex/strex instructions to implement fast atomic operations.
-#if defined(__ARM_ARCH_8A__) || \
-    defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) ||  \
-    defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \
-    defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) ||  \
-    defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
-    defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__)
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
-                                         Atomic32 old_value,
-                                         Atomic32 new_value) {
-  Atomic32 prev_value;
-  int reloop;
-  do {
-    // The following is equivalent to:
-    //
-    //   prev_value = LDREX(ptr)
-    //   reloop = 0
-    //   if (prev_value != old_value)
-    //      reloop = STREX(ptr, new_value)
-    __asm__ __volatile__("    ldrex %0, [%3]\n"
-                         "    mov %1, #0\n"
-                         "    cmp %0, %4\n"
-#ifdef __thumb2__
-                         "    it eq\n"
-#endif
-                         "    strexeq %1, %5, [%3]\n"
-                         : "=&r"(prev_value), "=&r"(reloop), "+m"(*ptr)
-                         : "r"(ptr), "r"(old_value), "r"(new_value)
-                         : "cc", "memory");
-  } while (reloop != 0);
-  return prev_value;
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  Atomic32 result = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-  MemoryBarrier();
-  return result;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  MemoryBarrier();
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
-                                          Atomic32 increment) {
-  Atomic32 value;
-  int reloop;
-  do {
-    // Equivalent to:
-    //
-    //  value = LDREX(ptr)
-    //  value += increment
-    //  reloop = STREX(ptr, value)
-    //
-    __asm__ __volatile__("    ldrex %0, [%3]\n"
-                         "    add %0, %0, %4\n"
-                         "    strex %1, %0, [%3]\n"
-                         : "=&r"(value), "=&r"(reloop), "+m"(*ptr)
-                         : "r"(ptr), "r"(increment)
-                         : "cc", "memory");
-  } while (reloop);
-  return value;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
-                                        Atomic32 increment) {
-  // TODO(digit): Investigate if it's possible to implement this with
-  // a single MemoryBarrier() operation between the LDREX and STREX.
-  // See http://crbug.com/246514
-  MemoryBarrier();
-  Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
-  MemoryBarrier();
-  return result;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
-                                         Atomic32 new_value) {
-  Atomic32 old_value;
-  int reloop;
-  do {
-    // old_value = LDREX(ptr)
-    // reloop = STREX(ptr, new_value)
-    __asm__ __volatile__("   ldrex %0, [%3]\n"
-                         "   strex %1, %4, [%3]\n"
-                         : "=&r"(old_value), "=&r"(reloop), "+m"(*ptr)
-                         : "r"(ptr), "r"(new_value)
-                         : "cc", "memory");
-  } while (reloop != 0);
-  return old_value;
-}
-
-// This tests against any known ARMv5 variant.
-#elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \
-      defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
-
-// The kernel also provides a helper function to perform an atomic
-// compare-and-swap operation at the hard-wired address 0xffff0fc0.
-// On ARMv5, this is implemented by a special code path that the kernel
-// detects and treats specially when thread pre-emption happens.
-// On ARMv6 and higher, it uses LDREX/STREX instructions instead.
-//
-// Note that this always perform a full memory barrier, there is no
-// need to add calls MemoryBarrier() before or after it. It also
-// returns 0 on success, and 1 on exit.
-//
-// Available and reliable since Linux 2.6.24. Both Android and ChromeOS
-// use newer kernel revisions, so this should not be a concern.
-namespace {
-
-inline int LinuxKernelCmpxchg(Atomic32 old_value,
-                              Atomic32 new_value,
-                              volatile Atomic32* ptr) {
-  typedef int (*KernelCmpxchgFunc)(Atomic32, Atomic32, volatile Atomic32*);
-  return ((KernelCmpxchgFunc)0xffff0fc0)(old_value, new_value, ptr);
-}
-
-}  // namespace
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
-                                         Atomic32 old_value,
-                                         Atomic32 new_value) {
-  Atomic32 prev_value;
-  for (;;) {
-    prev_value = *ptr;
-    if (prev_value != old_value)
-      return prev_value;
-    if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
-      return old_value;
-  }
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
-                                         Atomic32 new_value) {
-  Atomic32 old_value;
-  do {
-    old_value = *ptr;
-  } while (LinuxKernelCmpxchg(old_value, new_value, ptr));
-  return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
-                                          Atomic32 increment) {
-  return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
-                                        Atomic32 increment) {
-  for (;;) {
-    // Atomic exchange the old value with an incremented one.
-    Atomic32 old_value = *ptr;
-    Atomic32 new_value = old_value + increment;
-    if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) {
-      // The exchange took place as expected.
-      return new_value;
-    }
-    // Otherwise, *ptr changed mid-loop and we need to retry.
-  }
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  Atomic32 prev_value;
-  for (;;) {
-    prev_value = *ptr;
-    if (prev_value != old_value) {
-      // Always ensure acquire semantics.
-      MemoryBarrier();
-      return prev_value;
-    }
-    if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
-      return old_value;
-  }
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  // This could be implemented as:
-  //    MemoryBarrier();
-  //    return NoBarrier_CompareAndSwap();
-  //
-  // But would use 3 barriers per succesful CAS. To save performance,
-  // use Acquire_CompareAndSwap(). Its implementation guarantees that:
-  // - A succesful swap uses only 2 barriers (in the kernel helper).
-  // - An early return due to (prev_value != old_value) performs
-  //   a memory barrier with no store, which is equivalent to the
-  //   generic implementation above.
-  return Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-
-#else
-#  error "Your CPU's ARM architecture is not supported yet"
-#endif
-
-// NOTE: Atomicity of the following load and store operations is only
-// guaranteed in case of 32-bit alignement of |ptr| values.
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
-  MemoryBarrier();
-  *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
-  Atomic32 value = *ptr;
-  MemoryBarrier();
-  return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-// Byte accessors.
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
-  *ptr = value;
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
-
-}  // namespace base
-}  // namespace v8
-
-#endif  // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/src/base/atomicops_internals_atomicword_compat.h b/src/base/atomicops_internals_atomicword_compat.h
index 4f758a7..5071f44 100644
--- a/src/base/atomicops_internals_atomicword_compat.h
+++ b/src/base/atomicops_internals_atomicword_compat.h
@@ -67,11 +67,6 @@
       reinterpret_cast<volatile Atomic32*>(ptr), value);
 }
 
-inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
-  return v8::base::Acquire_Store(
-      reinterpret_cast<volatile Atomic32*>(ptr), value);
-}
-
 inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
   return v8::base::Release_Store(
       reinterpret_cast<volatile Atomic32*>(ptr), value);
@@ -87,11 +82,6 @@
       reinterpret_cast<volatile const Atomic32*>(ptr));
 }
 
-inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
-  return v8::base::Release_Load(
-      reinterpret_cast<volatile const Atomic32*>(ptr));
-}
-
 }  // namespace base
 }  // namespace v8
 
diff --git a/src/base/atomicops_internals_mac.h b/src/base/atomicops_internals_mac.h
deleted file mode 100644
index c112506..0000000
--- a/src/base/atomicops_internals_mac.h
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
-
-#include <libkern/OSAtomic.h>
-
-namespace v8 {
-namespace base {
-
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-
-inline void MemoryBarrier() { OSMemoryBarrier(); }
-
-inline void AcquireMemoryBarrier() {
-// On x86 processors, loads already have acquire semantics, so
-// there is no need to put a full barrier here.
-#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
-  ATOMICOPS_COMPILER_BARRIER();
-#else
-  MemoryBarrier();
-#endif
-}
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
-                                         Atomic32 old_value,
-                                         Atomic32 new_value) {
-  Atomic32 prev_value;
-  do {
-    if (OSAtomicCompareAndSwap32(old_value, new_value,
-                                 const_cast<Atomic32*>(ptr))) {
-      return old_value;
-    }
-    prev_value = *ptr;
-  } while (prev_value == old_value);
-  return prev_value;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
-                                         Atomic32 new_value) {
-  Atomic32 old_value;
-  do {
-    old_value = *ptr;
-  } while (!OSAtomicCompareAndSwap32(old_value, new_value,
-                                     const_cast<Atomic32*>(ptr)));
-  return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
-                                          Atomic32 increment) {
-  return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
-                                        Atomic32 increment) {
-  return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  Atomic32 prev_value;
-  do {
-    if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
-                                        const_cast<Atomic32*>(ptr))) {
-      return old_value;
-    }
-    prev_value = *ptr;
-  } while (prev_value == old_value);
-  return prev_value;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  return Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
-  *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
-  MemoryBarrier();
-  *ptr = value;
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
-  return *ptr;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
-  return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
-  Atomic32 value = *ptr;
-  AcquireMemoryBarrier();
-  return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-#ifdef __LP64__
-
-// 64-bit implementation on 64-bit platform
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
-                                         Atomic64 old_value,
-                                         Atomic64 new_value) {
-  Atomic64 prev_value;
-  do {
-    if (OSAtomicCompareAndSwap64(old_value, new_value,
-                                 reinterpret_cast<volatile int64_t*>(ptr))) {
-      return old_value;
-    }
-    prev_value = *ptr;
-  } while (prev_value == old_value);
-  return prev_value;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
-                                         Atomic64 new_value) {
-  Atomic64 old_value;
-  do {
-    old_value = *ptr;
-  } while (!OSAtomicCompareAndSwap64(old_value, new_value,
-                                     reinterpret_cast<volatile int64_t*>(ptr)));
-  return old_value;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
-                                          Atomic64 increment) {
-  return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
-                                        Atomic64 increment) {
-  return OSAtomicAdd64Barrier(increment,
-                              reinterpret_cast<volatile int64_t*>(ptr));
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  Atomic64 prev_value;
-  do {
-    if (OSAtomicCompareAndSwap64Barrier(
-        old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {
-      return old_value;
-    }
-    prev_value = *ptr;
-  } while (prev_value == old_value);
-  return prev_value;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  // The lib kern interface does not distinguish between
-  // Acquire and Release memory barriers; they are equivalent.
-  return Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-  *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
-  MemoryBarrier();
-  *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
-  return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
-  Atomic64 value = *ptr;
-  AcquireMemoryBarrier();
-  return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-#endif  // defined(__LP64__)
-
-#undef ATOMICOPS_COMPILER_BARRIER
-}  // namespace base
-}  // namespace v8
-
-#endif  // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
diff --git a/src/base/atomicops_internals_mips64_gcc.h b/src/base/atomicops_internals_mips64_gcc.h
deleted file mode 100644
index cf2e194..0000000
--- a/src/base/atomicops_internals_mips64_gcc.h
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-
-namespace v8 {
-namespace base {
-
-// Atomically execute:
-//      result = *ptr;
-//      if (*ptr == old_value)
-//        *ptr = new_value;
-//      return result;
-//
-// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
-// Always return the old value of "*ptr"
-//
-// This routine implies no memory barriers.
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
-                                         Atomic32 old_value,
-                                         Atomic32 new_value) {
-  Atomic32 prev, tmp;
-  __asm__ __volatile__(".set push\n"
-                       ".set noreorder\n"
-                       "1:\n"
-                       "ll %0, %5\n"  // prev = *ptr
-                       "bne %0, %3, 2f\n"  // if (prev != old_value) goto 2
-                       "move %2, %4\n"  // tmp = new_value
-                       "sc %2, %1\n"  // *ptr = tmp (with atomic check)
-                       "beqz %2, 1b\n"  // start again on atomic error
-                       "nop\n"  // delay slot nop
-                       "2:\n"
-                       ".set pop\n"
-                       : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
-                       : "r" (old_value), "r" (new_value), "m" (*ptr)
-                       : "memory");
-  return prev;
-}
-
-// Atomically store new_value into *ptr, returning the previous value held in
-// *ptr.  This routine implies no memory barriers.
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
-                                         Atomic32 new_value) {
-  Atomic32 temp, old;
-  __asm__ __volatile__(".set push\n"
-                       ".set noreorder\n"
-                       "1:\n"
-                       "ll %1, %2\n"  // old = *ptr
-                       "move %0, %3\n"  // temp = new_value
-                       "sc %0, %2\n"  // *ptr = temp (with atomic check)
-                       "beqz %0, 1b\n"  // start again on atomic error
-                       "nop\n"  // delay slot nop
-                       ".set pop\n"
-                       : "=&r" (temp), "=&r" (old), "=m" (*ptr)
-                       : "r" (new_value), "m" (*ptr)
-                       : "memory");
-
-  return old;
-}
-
-// Atomically increment *ptr by "increment".  Returns the new value of
-// *ptr with the increment applied.  This routine implies no memory barriers.
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
-                                          Atomic32 increment) {
-  Atomic32 temp, temp2;
-
-  __asm__ __volatile__(
-      ".set push\n"
-      ".set noreorder\n"
-      "1:\n"
-      "ll %0, %2\n"        // temp = *ptr
-      "addu %1, %0, %3\n"  // temp2 = temp + increment
-      "sc %1, %2\n"        // *ptr = temp2 (with atomic check)
-      "beqz %1, 1b\n"      // start again on atomic error
-      "addu %1, %0, %3\n"  // temp2 = temp + increment
-      ".set pop\n"
-      : "=&r"(temp), "=&r"(temp2), "=ZC"(*ptr)
-      : "Ir"(increment), "m"(*ptr)
-      : "memory");
-  // temp2 now holds the final value.
-  return temp2;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
-                                        Atomic32 increment) {
-  MemoryBarrier();
-  Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
-  MemoryBarrier();
-  return res;
-}
-
-// "Acquire" operations
-// ensure that no later memory access can be reordered ahead of the operation.
-// "Release" operations ensure that no previous memory access can be reordered
-// after the operation.  "Barrier" operations have both "Acquire" and "Release"
-// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
-// access.
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-  MemoryBarrier();
-  return res;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  MemoryBarrier();
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
-  *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-}
-
-inline void MemoryBarrier() {
-  __asm__ __volatile__("sync" : : : "memory");
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
-  MemoryBarrier();
-  *ptr = value;
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
-  return *ptr;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
-  return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
-  Atomic32 value = *ptr;
-  MemoryBarrier();
-  return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-
-// 64-bit versions of the atomic ops.
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
-                                         Atomic64 old_value,
-                                         Atomic64 new_value) {
-  Atomic64 prev, tmp;
-  __asm__ __volatile__(".set push\n"
-                       ".set noreorder\n"
-                       "1:\n"
-                       "lld %0, %5\n"  // prev = *ptr
-                       "bne %0, %3, 2f\n"  // if (prev != old_value) goto 2
-                       "move %2, %4\n"  // tmp = new_value
-                       "scd %2, %1\n"  // *ptr = tmp (with atomic check)
-                       "beqz %2, 1b\n"  // start again on atomic error
-                       "nop\n"  // delay slot nop
-                       "2:\n"
-                       ".set pop\n"
-                       : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
-                       : "r" (old_value), "r" (new_value), "m" (*ptr)
-                       : "memory");
-  return prev;
-}
-
-// Atomically store new_value into *ptr, returning the previous value held in
-// *ptr.  This routine implies no memory barriers.
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
-                                         Atomic64 new_value) {
-  Atomic64 temp, old;
-  __asm__ __volatile__(".set push\n"
-                       ".set noreorder\n"
-                       "1:\n"
-                       "lld %1, %2\n"  // old = *ptr
-                       "move %0, %3\n"  // temp = new_value
-                       "scd %0, %2\n"  // *ptr = temp (with atomic check)
-                       "beqz %0, 1b\n"  // start again on atomic error
-                       "nop\n"  // delay slot nop
-                       ".set pop\n"
-                       : "=&r" (temp), "=&r" (old), "=m" (*ptr)
-                       : "r" (new_value), "m" (*ptr)
-                       : "memory");
-
-  return old;
-}
-
-// Atomically increment *ptr by "increment".  Returns the new value of
-// *ptr with the increment applied.  This routine implies no memory barriers.
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
-                                          Atomic64 increment) {
-  Atomic64 temp, temp2;
-
-  __asm__ __volatile__(
-      ".set push\n"
-      ".set noreorder\n"
-      "1:\n"
-      "lld %0, %2\n"        // temp = *ptr
-      "daddu %1, %0, %3\n"  // temp2 = temp + increment
-      "scd %1, %2\n"        // *ptr = temp2 (with atomic check)
-      "beqz %1, 1b\n"       // start again on atomic error
-      "daddu %1, %0, %3\n"  // temp2 = temp + increment
-      ".set pop\n"
-      : "=&r"(temp), "=&r"(temp2), "=ZC"(*ptr)
-      : "Ir"(increment), "m"(*ptr)
-      : "memory");
-  // temp2 now holds the final value.
-  return temp2;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
-                                        Atomic64 increment) {
-  MemoryBarrier();
-  Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
-  MemoryBarrier();
-  return res;
-}
-
-// "Acquire" operations
-// ensure that no later memory access can be reordered ahead of the operation.
-// "Release" operations ensure that no previous memory access can be reordered
-// after the operation.  "Barrier" operations have both "Acquire" and "Release"
-// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
-// access.
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-  MemoryBarrier();
-  return res;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  MemoryBarrier();
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-  *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
-  MemoryBarrier();
-  *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
-  return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
-  Atomic64 value = *ptr;
-  MemoryBarrier();
-  return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-}  // namespace base
-}  // namespace v8
-
-#endif  // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/src/base/atomicops_internals_mips_gcc.h b/src/base/atomicops_internals_mips_gcc.h
deleted file mode 100644
index 8d65db2..0000000
--- a/src/base/atomicops_internals_mips_gcc.h
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-
-namespace v8 {
-namespace base {
-
-// Atomically execute:
-//      result = *ptr;
-//      if (*ptr == old_value)
-//        *ptr = new_value;
-//      return result;
-//
-// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
-// Always return the old value of "*ptr"
-//
-// This routine implies no memory barriers.
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
-                                         Atomic32 old_value,
-                                         Atomic32 new_value) {
-  Atomic32 prev, tmp;
-  __asm__ __volatile__(".set push\n"
-                       ".set noreorder\n"
-                       "1:\n"
-                       "ll %0, 0(%4)\n"  // prev = *ptr
-                       "bne %0, %2, 2f\n"  // if (prev != old_value) goto 2
-                       "move %1, %3\n"  // tmp = new_value
-                       "sc %1, 0(%4)\n"  // *ptr = tmp (with atomic check)
-                       "beqz %1, 1b\n"  // start again on atomic error
-                       "nop\n"  // delay slot nop
-                       "2:\n"
-                       ".set pop\n"
-                       : "=&r" (prev), "=&r" (tmp)
-                       : "r" (old_value), "r" (new_value), "r" (ptr)
-                       : "memory");
-  return prev;
-}
-
-// Atomically store new_value into *ptr, returning the previous value held in
-// *ptr.  This routine implies no memory barriers.
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
-                                         Atomic32 new_value) {
-  Atomic32 temp, old;
-  __asm__ __volatile__(".set push\n"
-                       ".set noreorder\n"
-                       ".set at\n"
-                       "1:\n"
-                       "ll %1, 0(%3)\n"  // old = *ptr
-                       "move %0, %2\n"  // temp = new_value
-                       "sc %0, 0(%3)\n"  // *ptr = temp (with atomic check)
-                       "beqz %0, 1b\n"  // start again on atomic error
-                       "nop\n"  // delay slot nop
-                       ".set pop\n"
-                       : "=&r" (temp), "=&r" (old)
-                       : "r" (new_value), "r" (ptr)
-                       : "memory");
-
-  return old;
-}
-
-// Atomically increment *ptr by "increment".  Returns the new value of
-// *ptr with the increment applied.  This routine implies no memory barriers.
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
-                                          Atomic32 increment) {
-  Atomic32 temp, temp2;
-
-  __asm__ __volatile__(".set push\n"
-                       ".set noreorder\n"
-                       "1:\n"
-                       "ll %0, 0(%3)\n"  // temp = *ptr
-                       "addu %1, %0, %2\n"  // temp2 = temp + increment
-                       "sc %1, 0(%3)\n"  // *ptr = temp2 (with atomic check)
-                       "beqz %1, 1b\n"  // start again on atomic error
-                       "addu %1, %0, %2\n"  // temp2 = temp + increment
-                       ".set pop\n"
-                       : "=&r" (temp), "=&r" (temp2)
-                       : "Ir" (increment), "r" (ptr)
-                       : "memory");
-  // temp2 now holds the final value.
-  return temp2;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
-                                        Atomic32 increment) {
-  MemoryBarrier();
-  Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
-  MemoryBarrier();
-  return res;
-}
-
-// "Acquire" operations
-// ensure that no later memory access can be reordered ahead of the operation.
-// "Release" operations ensure that no previous memory access can be reordered
-// after the operation.  "Barrier" operations have both "Acquire" and "Release"
-// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
-// access.
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-  MemoryBarrier();
-  return res;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  MemoryBarrier();
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
-  *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-}
-
-inline void MemoryBarrier() {
-  __asm__ __volatile__("sync" : : : "memory");
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
-  MemoryBarrier();
-  *ptr = value;
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
-  return *ptr;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
-  return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
-  Atomic32 value = *ptr;
-  MemoryBarrier();
-  return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-}  // namespace base
-}  // namespace v8
-
-#endif  // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/src/base/atomicops_internals_portable.h b/src/base/atomicops_internals_portable.h
new file mode 100644
index 0000000..72c1d9a
--- /dev/null
+++ b/src/base/atomicops_internals_portable.h
@@ -0,0 +1,172 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+//
+// This implementation uses C++11 atomics' member functions. The code base is
+// currently written assuming atomicity revolves around accesses instead of
+// C++11's memory locations. The burden is on the programmer to ensure that all
+// memory locations accessed atomically are never accessed non-atomically (tsan
+// should help with this).
+//
+// Of note in this implementation:
+//  * All NoBarrier variants are implemented as relaxed.
+//  * All Barrier variants are implemented as sequentially-consistent.
+//  * Compare exchange's failure ordering is always the same as the success one
+//    (except for release, which fails as relaxed): using a weaker ordering is
+//    only valid under certain uses of compare exchange.
+//  * Acquire store doesn't exist in the C11 memory model, it is instead
+//    implemented as a relaxed store followed by a sequentially consistent
+//    fence.
+//  * Release load doesn't exist in the C11 memory model, it is instead
+//    implemented as sequentially consistent fence followed by a relaxed load.
+//  * Atomic increment is expected to return the post-incremented value, whereas
+//    C11 fetch add returns the previous value. The implementation therefore
+//    needs to increment twice (which the compiler should be able to detect and
+//    optimize).
+
+#ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+#define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+
+#include <atomic>
+
+#include "src/base/build_config.h"
+
+namespace v8 {
+namespace base {
+
+// This implementation is transitional and maintains the original API for
+// atomicops.h.
+
+inline void MemoryBarrier() {
+#if defined(__GLIBCXX__)
+  // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
+  // not defined, leading to the linker complaining about undefined references.
+  __atomic_thread_fence(std::memory_order_seq_cst);
+#else
+  std::atomic_thread_fence(std::memory_order_seq_cst);
+#endif
+}
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+                              __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value, Atomic32 new_value) {
+  __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+                              __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+  return old_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value, Atomic32 new_value) {
+  __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+                              __ATOMIC_RELEASE, __ATOMIC_RELAXED);
+  return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+  return __atomic_load_n(ptr, __ATOMIC_RELAXED);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return __atomic_load_n(ptr, __ATOMIC_RELAXED);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
+}
+
+#if defined(V8_HOST_ARCH_64_BIT)
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+                              __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+  return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value, Atomic64 new_value) {
+  __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+                              __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+  return old_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value, Atomic64 new_value) {
+  __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+                              __ATOMIC_RELEASE, __ATOMIC_RELAXED);
+  return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return __atomic_load_n(ptr, __ATOMIC_RELAXED);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
+}
+
+#endif  // defined(V8_HOST_ARCH_64_BIT)
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
diff --git a/src/base/atomicops_internals_ppc_gcc.h b/src/base/atomicops_internals_ppc_gcc.h
deleted file mode 100644
index 0d16500..0000000
--- a/src/base/atomicops_internals_ppc_gcc.h
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-//
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_PPC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_PPC_H_
-
-namespace v8 {
-namespace base {
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
-                                         Atomic32 old_value,
-                                         Atomic32 new_value) {
-  return (__sync_val_compare_and_swap(ptr, old_value, new_value));
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
-                                         Atomic32 new_value) {
-  Atomic32 old_value;
-  do {
-    old_value = *ptr;
-  } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
-  return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
-                                          Atomic32 increment) {
-  return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
-                                        Atomic32 increment) {
-  for (;;) {
-    Atomic32 old_value = *ptr;
-    Atomic32 new_value = old_value + increment;
-    if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
-      return new_value;
-      // The exchange took place as expected.
-    }
-    // Otherwise, *ptr changed mid-loop and we need to retry.
-  }
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value, Atomic32 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value, Atomic32 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
-  *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-}
-
-inline void MemoryBarrier() {
-  __asm__ __volatile__("sync" : : : "memory"); }
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
-  MemoryBarrier();
-  *ptr = value;
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
-  Atomic32 value = *ptr;
-  MemoryBarrier();
-  return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-#ifdef V8_TARGET_ARCH_PPC64
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
-                                         Atomic64 old_value,
-                                         Atomic64 new_value) {
-  return (__sync_val_compare_and_swap(ptr, old_value, new_value));
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
-                                         Atomic64 new_value) {
-  Atomic64 old_value;
-  do {
-    old_value = *ptr;
-  } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
-  return old_value;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
-                                          Atomic64 increment) {
-  return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
-                                        Atomic64 increment) {
-  for (;;) {
-    Atomic64 old_value = *ptr;
-    Atomic64 new_value = old_value + increment;
-    if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
-      return new_value;
-      // The exchange took place as expected.
-    }
-    // Otherwise, *ptr changed mid-loop and we need to retry.
-  }
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value, Atomic64 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value, Atomic64 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-  *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
-  MemoryBarrier();
-  *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; }
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
-  Atomic64 value = *ptr;
-  MemoryBarrier();
-  return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-#endif
-}  // namespace base
-}  // namespace v8
-
-#endif  // V8_BASE_ATOMICOPS_INTERNALS_PPC_GCC_H_
diff --git a/src/base/atomicops_internals_s390_gcc.h b/src/base/atomicops_internals_s390_gcc.h
deleted file mode 100644
index 6e34f30..0000000
--- a/src/base/atomicops_internals_s390_gcc.h
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_S390_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_S390_H_
-
-namespace v8 {
-namespace base {
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
-                                         Atomic32 old_value,
-                                         Atomic32 new_value) {
-  return (__sync_val_compare_and_swap(ptr, old_value, new_value));
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
-                                         Atomic32 new_value) {
-  Atomic32 old_value;
-  do {
-    old_value = *ptr;
-  } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
-  return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
-                                          Atomic32 increment) {
-  return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
-                                        Atomic32 increment) {
-  return __sync_add_and_fetch(ptr, increment);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value, Atomic32 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value, Atomic32 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
-  *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-}
-
-inline void MemoryBarrier() { __sync_synchronize(); }
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
-  MemoryBarrier();
-  *ptr = value;
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
-  Atomic32 value = *ptr;
-  MemoryBarrier();
-  return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-#ifdef V8_TARGET_ARCH_S390X
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
-                                         Atomic64 old_value,
-                                         Atomic64 new_value) {
-  return (__sync_val_compare_and_swap(ptr, old_value, new_value));
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
-                                         Atomic64 new_value) {
-  Atomic64 old_value;
-  do {
-    old_value = *ptr;
-  } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
-  return old_value;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
-                                          Atomic64 increment) {
-  return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
-                                        Atomic64 increment) {
-  return __sync_add_and_fetch(ptr, increment);
-}
-
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value, Atomic64 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value, Atomic64 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-  *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
-  MemoryBarrier();
-  *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; }
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
-  Atomic64 value = *ptr;
-  MemoryBarrier();
-  return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-#endif
-
-}  // namespace base
-}  // namespace v8
-
-#endif  // V8_BASE_ATOMICOPS_INTERNALS_S390_H_
diff --git a/src/base/atomicops_internals_tsan.h b/src/base/atomicops_internals_tsan.h
deleted file mode 100644
index 646e5bd..0000000
--- a/src/base/atomicops_internals_tsan.h
+++ /dev/null
@@ -1,363 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-
-// This file is an internal atomic implementation for compiler-based
-// ThreadSanitizer. Use base/atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
-
-namespace v8 {
-namespace base {
-
-#ifndef TSAN_INTERFACE_ATOMIC_H
-#define TSAN_INTERFACE_ATOMIC_H
-
-
-extern "C" {
-typedef char  __tsan_atomic8;
-typedef short __tsan_atomic16;  // NOLINT
-typedef int   __tsan_atomic32;
-typedef long  __tsan_atomic64;  // NOLINT
-
-#if defined(__SIZEOF_INT128__) \
-    || (__clang_major__ * 100 + __clang_minor__ >= 302)
-typedef __int128 __tsan_atomic128;
-#define __TSAN_HAS_INT128 1
-#else
-typedef char     __tsan_atomic128;
-#define __TSAN_HAS_INT128 0
-#endif
-
-typedef enum {
-  __tsan_memory_order_relaxed,
-  __tsan_memory_order_consume,
-  __tsan_memory_order_acquire,
-  __tsan_memory_order_release,
-  __tsan_memory_order_acq_rel,
-  __tsan_memory_order_seq_cst,
-} __tsan_memory_order;
-
-__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
-    __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
-    __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
-    __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
-    __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128* a,
-    __tsan_memory_order mo);
-
-void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
-    __tsan_memory_order mo);
-void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
-    __tsan_memory_order mo);
-void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
-    __tsan_memory_order mo);
-void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
-    __tsan_memory_order mo);
-void __tsan_atomic128_store(volatile __tsan_atomic128* a, __tsan_atomic128 v,
-    __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
-    __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
-    __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
-    __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
-    __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128* a,
-    __tsan_atomic128 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
-    __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
-    __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
-    __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
-    __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128* a,
-    __tsan_atomic128 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
-    __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
-    __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
-    __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
-    __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128* a,
-    __tsan_atomic128 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
-    __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
-    __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
-    __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
-    __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128* a,
-    __tsan_atomic128 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
-    __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
-    __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
-    __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
-    __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128* a,
-    __tsan_atomic128 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8* a,
-    __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16* a,
-    __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32* a,
-    __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64* a,
-    __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128* a,
-    __tsan_atomic128 v, __tsan_memory_order mo);
-
-int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
-    __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
-    __tsan_memory_order fail_mo);
-int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
-    __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
-    __tsan_memory_order fail_mo);
-int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
-    __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
-    __tsan_memory_order fail_mo);
-int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
-    __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
-    __tsan_memory_order fail_mo);
-int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128* a,
-    __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
-    __tsan_memory_order fail_mo);
-
-int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
-    __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
-    __tsan_memory_order fail_mo);
-int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
-    __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
-    __tsan_memory_order fail_mo);
-int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
-    __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
-    __tsan_memory_order fail_mo);
-int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
-    __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
-    __tsan_memory_order fail_mo);
-int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128* a,
-    __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
-    __tsan_memory_order fail_mo);
-
-__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
-    volatile __tsan_atomic8* a, __tsan_atomic8 c, __tsan_atomic8 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
-__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
-    volatile __tsan_atomic16* a, __tsan_atomic16 c, __tsan_atomic16 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
-__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
-    volatile __tsan_atomic32* a, __tsan_atomic32 c, __tsan_atomic32 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
-__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
-    volatile __tsan_atomic64* a, __tsan_atomic64 c, __tsan_atomic64 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
-__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
-    volatile __tsan_atomic128* a, __tsan_atomic128 c, __tsan_atomic128 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
-
-void __tsan_atomic_thread_fence(__tsan_memory_order mo);
-void __tsan_atomic_signal_fence(__tsan_memory_order mo);
-}  // extern "C"
-
-#endif  // #ifndef TSAN_INTERFACE_ATOMIC_H
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
-                                         Atomic32 old_value,
-                                         Atomic32 new_value) {
-  Atomic32 cmp = old_value;
-  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
-      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
-  return cmp;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
-                                         Atomic32 new_value) {
-  return __tsan_atomic32_exchange(ptr, new_value,
-      __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
-                                       Atomic32 new_value) {
-  return __tsan_atomic32_exchange(ptr, new_value,
-      __tsan_memory_order_acquire);
-}
-
-inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
-                                       Atomic32 new_value) {
-  return __tsan_atomic32_exchange(ptr, new_value,
-      __tsan_memory_order_release);
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
-                                          Atomic32 increment) {
-  return increment + __tsan_atomic32_fetch_add(ptr, increment,
-      __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
-                                        Atomic32 increment) {
-  return increment + __tsan_atomic32_fetch_add(ptr, increment,
-      __tsan_memory_order_acq_rel);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  Atomic32 cmp = old_value;
-  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
-      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
-  return cmp;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  Atomic32 cmp = old_value;
-  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
-      __tsan_memory_order_release, __tsan_memory_order_relaxed);
-  return cmp;
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
-  __tsan_atomic8_store(ptr, value, __tsan_memory_order_relaxed);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
-  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
-  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
-  __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
-  return __tsan_atomic8_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
-  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
-  return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
-                                         Atomic64 old_value,
-                                         Atomic64 new_value) {
-  Atomic64 cmp = old_value;
-  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
-      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
-  return cmp;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
-                                         Atomic64 new_value) {
-  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
-                                       Atomic64 new_value) {
-  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
-}
-
-inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
-                                       Atomic64 new_value) {
-  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
-                                          Atomic64 increment) {
-  return increment + __tsan_atomic64_fetch_add(ptr, increment,
-      __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
-                                        Atomic64 increment) {
-  return increment + __tsan_atomic64_fetch_add(ptr, increment,
-      __tsan_memory_order_acq_rel);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
-  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
-  __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
-  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
-  return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  Atomic64 cmp = old_value;
-  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
-      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
-  return cmp;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  Atomic64 cmp = old_value;
-  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
-      __tsan_memory_order_release, __tsan_memory_order_relaxed);
-  return cmp;
-}
-
-inline void MemoryBarrier() {
-  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-}  // namespace base
-}  // namespace v8
-
-#endif  // V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
diff --git a/src/base/atomicops_internals_x86_gcc.cc b/src/base/atomicops_internals_x86_gcc.cc
deleted file mode 100644
index c031030..0000000
--- a/src/base/atomicops_internals_x86_gcc.cc
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This module gets enough CPU information to optimize the
-// atomicops module on x86.
-
-#include <string.h>  // NOLINT(build/include)
-
-#include "src/base/atomicops.h"
-
-// This file only makes sense with atomicops_internals_x86_gcc.h -- it
-// depends on structs that are defined in that file.  If atomicops.h
-// doesn't sub-include that file, then we aren't needed, and shouldn't
-// try to do anything.
-#ifdef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
-
-// Inline cpuid instruction.  In PIC compilations, %ebx contains the address
-// of the global offset table.  To avoid breaking such executables, this code
-// must preserve that register's value across cpuid instructions.
-#if defined(__i386__)
-#define cpuid(a, b, c, d, inp) \
-  asm("mov %%ebx, %%edi\n"     \
-      "cpuid\n"                \
-      "xchg %%edi, %%ebx\n"    \
-      : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
-#elif defined(__x86_64__)
-#define cpuid(a, b, c, d, inp) \
-  asm("mov %%rbx, %%rdi\n"     \
-      "cpuid\n"                \
-      "xchg %%rdi, %%rbx\n"    \
-      : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
-#endif
-
-#if defined(cpuid)        // initialize the struct only on x86
-
-namespace v8 {
-namespace base {
-
-// Set the flags so that code will run correctly and conservatively, so even
-// if we haven't been initialized yet, we're probably single threaded, and our
-// default values should hopefully be pretty safe.
-struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
-  false,          // bug can't exist before process spawns multiple threads
-#if !defined(__SSE2__)
-  false,          // no SSE2
-#endif
-};
-
-}  // namespace base
-}  // namespace v8
-
-namespace {
-
-// Initialize the AtomicOps_Internalx86CPUFeatures struct.
-void AtomicOps_Internalx86CPUFeaturesInit() {
-  using v8::base::AtomicOps_Internalx86CPUFeatures;
-
-  uint32_t eax = 0;
-  uint32_t ebx = 0;
-  uint32_t ecx = 0;
-  uint32_t edx = 0;
-
-  // Get vendor string (issue CPUID with eax = 0)
-  cpuid(eax, ebx, ecx, edx, 0);
-  char vendor[13];
-  memcpy(vendor, &ebx, 4);
-  memcpy(vendor + 4, &edx, 4);
-  memcpy(vendor + 8, &ecx, 4);
-  vendor[12] = 0;
-
-  // get feature flags in ecx/edx, and family/model in eax
-  cpuid(eax, ebx, ecx, edx, 1);
-
-  int family = (eax >> 8) & 0xf;        // family and model fields
-  int model = (eax >> 4) & 0xf;
-  if (family == 0xf) {                  // use extended family and model fields
-    family += (eax >> 20) & 0xff;
-    model += ((eax >> 16) & 0xf) << 4;
-  }
-
-  // Opteron Rev E has a bug in which on very rare occasions a locked
-  // instruction doesn't act as a read-acquire barrier if followed by a
-  // non-locked read-modify-write instruction.  Rev F has this bug in
-  // pre-release versions, but not in versions released to customers,
-  // so we test only for Rev E, which is family 15, model 32..63 inclusive.
-  if (strcmp(vendor, "AuthenticAMD") == 0 &&       // AMD
-      family == 15 &&
-      32 <= model && model <= 63) {
-    AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true;
-  } else {
-    AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
-  }
-
-#if !defined(__SSE2__)
-  // edx bit 26 is SSE2 which we use to tell use whether we can use mfence
-  AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
-#endif
-}
-
-class AtomicOpsx86Initializer {
- public:
-  AtomicOpsx86Initializer() {
-    AtomicOps_Internalx86CPUFeaturesInit();
-  }
-};
-
-
-// A global to get use initialized on startup via static initialization :/
-AtomicOpsx86Initializer g_initer;
-
-}  // namespace
-
-#endif  // if x86
-
-#endif  // ifdef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/src/base/atomicops_internals_x86_gcc.h b/src/base/atomicops_internals_x86_gcc.h
deleted file mode 100644
index 55bc44c..0000000
--- a/src/base/atomicops_internals_x86_gcc.h
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
-
-namespace v8 {
-namespace base {
-
-// This struct is not part of the public API of this module; clients may not
-// use it.
-// Features of this x86.  Values may not be correct before main() is run,
-// but are set conservatively.
-struct AtomicOps_x86CPUFeatureStruct {
-  bool has_amd_lock_mb_bug;  // Processor has AMD memory-barrier bug; do lfence
-                             // after acquire compare-and-swap.
-#if !defined(__SSE2__)
-  bool has_sse2;             // Processor has SSE2.
-#endif
-};
-extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
-
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-
-// 32-bit low-level operations on any platform.
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
-                                         Atomic32 old_value,
-                                         Atomic32 new_value) {
-  Atomic32 prev;
-  __asm__ __volatile__("lock; cmpxchgl %1,%2"
-                       : "=a" (prev)
-                       : "q" (new_value), "m" (*ptr), "0" (old_value)
-                       : "memory");
-  return prev;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
-                                         Atomic32 new_value) {
-  __asm__ __volatile__("xchgl %1,%0"  // The lock prefix is implicit for xchg.
-                       : "=r" (new_value)
-                       : "m" (*ptr), "0" (new_value)
-                       : "memory");
-  return new_value;  // Now it's the previous value.
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
-                                          Atomic32 increment) {
-  Atomic32 temp = increment;
-  __asm__ __volatile__("lock; xaddl %0,%1"
-                       : "+r" (temp), "+m" (*ptr)
-                       : : "memory");
-  // temp now holds the old value of *ptr
-  return temp + increment;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
-                                        Atomic32 increment) {
-  Atomic32 temp = increment;
-  __asm__ __volatile__("lock; xaddl %0,%1"
-                       : "+r" (temp), "+m" (*ptr)
-                       : : "memory");
-  // temp now holds the old value of *ptr
-  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
-    __asm__ __volatile__("lfence" : : : "memory");
-  }
-  return temp + increment;
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
-    __asm__ __volatile__("lfence" : : : "memory");
-  }
-  return x;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
-  *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-}
-
-#if defined(__x86_64__) || defined(__SSE2__)
-
-// 64-bit implementations of memory barrier can be simpler, because it
-// "mfence" is guaranteed to exist.
-inline void MemoryBarrier() {
-  __asm__ __volatile__("mfence" : : : "memory");
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-#else
-
-inline void MemoryBarrier() {
-  if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
-    __asm__ __volatile__("mfence" : : : "memory");
-  } else {  // mfence is faster but not present on PIII
-    Atomic32 x = 0;
-    NoBarrier_AtomicExchange(&x, 0);  // acts as a barrier on PIII
-  }
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
-    *ptr = value;
-    __asm__ __volatile__("mfence" : : : "memory");
-  } else {
-    NoBarrier_AtomicExchange(ptr, value);
-                          // acts as a barrier on PIII
-  }
-}
-#endif
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
-  ATOMICOPS_COMPILER_BARRIER();
-  *ptr = value;  // An x86 store acts as a release barrier.
-  // See comments in Atomic64 version of Release_Store(), below.
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
-  return *ptr;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
-  return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
-  Atomic32 value = *ptr;  // An x86 load acts as a acquire barrier.
-  // See comments in Atomic64 version of Release_Store(), below.
-  ATOMICOPS_COMPILER_BARRIER();
-  return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-#if defined(__x86_64__) && defined(V8_HOST_ARCH_64_BIT)
-
-// 64-bit low-level operations on 64-bit platform.
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
-                                         Atomic64 old_value,
-                                         Atomic64 new_value) {
-  Atomic64 prev;
-  __asm__ __volatile__("lock; cmpxchgq %1,%2"
-                       : "=a" (prev)
-                       : "q" (new_value), "m" (*ptr), "0" (old_value)
-                       : "memory");
-  return prev;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
-                                         Atomic64 new_value) {
-  __asm__ __volatile__("xchgq %1,%0"  // The lock prefix is implicit for xchg.
-                       : "=r" (new_value)
-                       : "m" (*ptr), "0" (new_value)
-                       : "memory");
-  return new_value;  // Now it's the previous value.
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
-                                          Atomic64 increment) {
-  Atomic64 temp = increment;
-  __asm__ __volatile__("lock; xaddq %0,%1"
-                       : "+r" (temp), "+m" (*ptr)
-                       : : "memory");
-  // temp now contains the previous value of *ptr
-  return temp + increment;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
-                                        Atomic64 increment) {
-  Atomic64 temp = increment;
-  __asm__ __volatile__("lock; xaddq %0,%1"
-                       : "+r" (temp), "+m" (*ptr)
-                       : : "memory");
-  // temp now contains the previous value of *ptr
-  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
-    __asm__ __volatile__("lfence" : : : "memory");
-  }
-  return temp + increment;
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-  *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
-  ATOMICOPS_COMPILER_BARRIER();
-
-  *ptr = value;  // An x86 store acts as a release barrier
-                 // for current AMD/Intel chips as of Jan 2008.
-                 // See also Acquire_Load(), below.
-
-  // When new chips come out, check:
-  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
-  //  System Programming Guide, Chatper 7: Multiple-processor management,
-  //  Section 7.2, Memory Ordering.
-  // Last seen at:
-  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
-  //
-  // x86 stores/loads fail to act as barriers for a few instructions (clflush
-  // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
-  // not generated by the compiler, and are rare.  Users of these instructions
-  // need to know about cache behaviour in any case since all of these involve
-  // either flushing cache lines or non-temporal cache hints.
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
-  return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
-  Atomic64 value = *ptr;  // An x86 load acts as a acquire barrier,
-                          // for current AMD/Intel chips as of Jan 2008.
-                          // See also Release_Store(), above.
-  ATOMICOPS_COMPILER_BARRIER();
-  return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
-    __asm__ __volatile__("lfence" : : : "memory");
-  }
-  return x;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-#endif  // defined(__x86_64__)
-
-}  // namespace base
-}  // namespace v8
-
-#undef ATOMICOPS_COMPILER_BARRIER
-
-#endif  // V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/src/base/atomicops_internals_x86_msvc.h b/src/base/atomicops_internals_x86_msvc.h
index c37bc78..0d2068e 100644
--- a/src/base/atomicops_internals_x86_msvc.h
+++ b/src/base/atomicops_internals_x86_msvc.h
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// This file is an internal atomic implementation, use atomicops.h instead.
+// This file is an internal atomic implementation, use base/atomicops.h instead.
 
 #ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
 #define V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
@@ -26,25 +26,23 @@
                                          Atomic32 old_value,
                                          Atomic32 new_value) {
   LONG result = InterlockedCompareExchange(
-      reinterpret_cast<volatile LONG*>(ptr),
-      static_cast<LONG>(new_value),
+      reinterpret_cast<volatile LONG*>(ptr), static_cast<LONG>(new_value),
       static_cast<LONG>(old_value));
   return static_cast<Atomic32>(result);
 }
 
 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
                                          Atomic32 new_value) {
-  LONG result = InterlockedExchange(
-      reinterpret_cast<volatile LONG*>(ptr),
-      static_cast<LONG>(new_value));
+  LONG result = InterlockedExchange(reinterpret_cast<volatile LONG*>(ptr),
+                                    static_cast<LONG>(new_value));
   return static_cast<Atomic32>(result);
 }
 
 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
                                         Atomic32 increment) {
-  return InterlockedExchangeAdd(
-      reinterpret_cast<volatile LONG*>(ptr),
-      static_cast<LONG>(increment)) + increment;
+  return InterlockedExchangeAdd(reinterpret_cast<volatile LONG*>(ptr),
+                                static_cast<LONG>(increment)) +
+         increment;
 }
 
 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
@@ -52,9 +50,6 @@
   return Barrier_AtomicIncrement(ptr, increment);
 }
 
-#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
-#error "We require at least vs2005 for MemoryBarrier"
-#endif
 inline void MemoryBarrier() {
 #if defined(V8_HOST_ARCH_64_BIT)
   // See #undef and note at the top of this file.
@@ -85,11 +80,6 @@
   *ptr = value;
 }
 
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  NoBarrier_AtomicExchange(ptr, value);
-              // acts as a barrier in this implementation
-}
-
 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
   *ptr = value;  // works w/o barrier for current Intel chips as of June 2005
   // See comments in Atomic64 version of Release_Store() below.
@@ -108,16 +98,11 @@
   return value;
 }
 
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
 #if defined(_WIN64)
 
 // 64-bit low-level operations on 64-bit platform.
 
-STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID));
+static_assert(sizeof(Atomic64) == sizeof(PVOID), "atomic word is atomic");
 
 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
                                          Atomic64 old_value,
@@ -152,11 +137,6 @@
   *ptr = value;
 }
 
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  NoBarrier_AtomicExchange(ptr, value);
-              // acts as a barrier in this implementation
-}
-
 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
   *ptr = value;  // works w/o barrier for current Intel chips as of June 2005
 
@@ -177,11 +157,6 @@
   return value;
 }
 
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
                                        Atomic64 old_value,
                                        Atomic64 new_value) {
diff --git a/src/base/base-export.h b/src/base/base-export.h
new file mode 100644
index 0000000..a2b3dac
--- /dev/null
+++ b/src/base/base-export.h
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_BASE_EXPORT_H_
+#define V8_BASE_BASE_EXPORT_H_
+
+#include "include/v8config.h"
+
+#if V8_OS_WIN
+
+#ifdef BUILDING_V8_BASE_SHARED
+#define V8_BASE_EXPORT __declspec(dllexport)
+#elif USING_V8_BASE_SHARED
+#define V8_BASE_EXPORT __declspec(dllimport)
+#else
+#define V8_BASE_EXPORT
+#endif  // BUILDING_V8_BASE_SHARED
+
+#else  // !V8_OS_WIN
+
+// Setup for Linux shared library export.
+#ifdef BUILDING_V8_BASE_SHARED
+#define V8_BASE_EXPORT __attribute__((visibility("default")))
+#else
+#define V8_BASE_EXPORT
+#endif
+
+#endif  // V8_OS_WIN
+
+#endif  // V8_BASE_BASE_EXPORT_H_
diff --git a/src/base/bits.h b/src/base/bits.h
index da12ee6..b186494 100644
--- a/src/base/bits.h
+++ b/src/base/bits.h
@@ -6,6 +6,8 @@
 #define V8_BASE_BITS_H_
 
 #include <stdint.h>
+
+#include "src/base/base-export.h"
 #include "src/base/macros.h"
 #if V8_CC_MSVC
 #include <intrin.h>
@@ -172,8 +174,7 @@
 // power of two, it is returned as is. |value| must be less than or equal to
 // 0x80000000u. Implementation is from "Hacker's Delight" by Henry S. Warren,
 // Jr., figure 3-3, page 48, where the function is called clp2.
-uint32_t RoundUpToPowerOfTwo32(uint32_t value);
-
+V8_BASE_EXPORT uint32_t RoundUpToPowerOfTwo32(uint32_t value);
 
 // RoundDownToPowerOfTwo32(value) returns the greatest power of two which is
 // less than or equal to |value|. If you pass in a |value| that is already a
@@ -241,7 +242,7 @@
 // SignedMulOverflow32(lhs,rhs,val) performs a signed multiplication of |lhs|
 // and |rhs| and stores the result into the variable pointed to by |val| and
 // returns true if the signed multiplication resulted in an overflow.
-bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t* val);
+V8_BASE_EXPORT bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t* val);
 
 // SignedAddOverflow64(lhs,rhs,val) performs a signed summation of |lhs| and
 // |rhs| and stores the result into the variable pointed to by |val| and
@@ -265,31 +266,28 @@
 // SignedMulOverflow64(lhs,rhs,val) performs a signed multiplication of |lhs|
 // and |rhs| and stores the result into the variable pointed to by |val| and
 // returns true if the signed multiplication resulted in an overflow.
-bool SignedMulOverflow64(int64_t lhs, int64_t rhs, int64_t* val);
+V8_BASE_EXPORT bool SignedMulOverflow64(int64_t lhs, int64_t rhs, int64_t* val);
 
 // SignedMulHigh32(lhs, rhs) multiplies two signed 32-bit values |lhs| and
 // |rhs|, extracts the most significant 32 bits of the result, and returns
 // those.
-int32_t SignedMulHigh32(int32_t lhs, int32_t rhs);
-
+V8_BASE_EXPORT int32_t SignedMulHigh32(int32_t lhs, int32_t rhs);
 
 // SignedMulHighAndAdd32(lhs, rhs, acc) multiplies two signed 32-bit values
 // |lhs| and |rhs|, extracts the most significant 32 bits of the result, and
 // adds the accumulate value |acc|.
-int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs, int32_t acc);
-
+V8_BASE_EXPORT int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs,
+                                             int32_t acc);
 
 // SignedDiv32(lhs, rhs) divides |lhs| by |rhs| and returns the quotient
 // truncated to int32. If |rhs| is zero, then zero is returned. If |lhs|
 // is minint and |rhs| is -1, it returns minint.
-int32_t SignedDiv32(int32_t lhs, int32_t rhs);
-
+V8_BASE_EXPORT int32_t SignedDiv32(int32_t lhs, int32_t rhs);
 
 // SignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder
 // truncated to int32. If either |rhs| is zero or |lhs| is minint and |rhs|
 // is -1, it returns zero.
-int32_t SignedMod32(int32_t lhs, int32_t rhs);
-
+V8_BASE_EXPORT int32_t SignedMod32(int32_t lhs, int32_t rhs);
 
 // UnsignedAddOverflow32(lhs,rhs,val) performs an unsigned summation of |lhs|
 // and |rhs| and stores the result into the variable pointed to by |val| and
@@ -319,18 +317,16 @@
 
 
 // Clamp |value| on overflow and underflow conditions.
-int64_t FromCheckedNumeric(const internal::CheckedNumeric<int64_t> value);
-
+V8_BASE_EXPORT int64_t
+FromCheckedNumeric(const internal::CheckedNumeric<int64_t> value);
 
 // SignedSaturatedAdd64(lhs, rhs) adds |lhs| and |rhs|,
 // checks and returns the result.
-int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs);
-
+V8_BASE_EXPORT int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs);
 
 // SignedSaturatedSub64(lhs, rhs) substracts |lhs| by |rhs|,
 // checks and returns the result.
-int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs);
-
+V8_BASE_EXPORT int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs);
 
 }  // namespace bits
 }  // namespace base
diff --git a/src/base/compiler-specific.h b/src/base/compiler-specific.h
index 822893f..1858caa 100644
--- a/src/base/compiler-specific.h
+++ b/src/base/compiler-specific.h
@@ -60,4 +60,46 @@
 #define STATIC_CONST_MEMBER_DEFINITION
 #endif
 
+#if V8_CC_MSVC
+
+#include <sal.h>
+
+// Macros for suppressing and disabling warnings on MSVC.
+//
+// Warning numbers are enumerated at:
+// http://msdn.microsoft.com/en-us/library/8x5x43k7(VS.80).aspx
+//
+// The warning pragma:
+// http://msdn.microsoft.com/en-us/library/2c8f766e(VS.80).aspx
+//
+// Using __pragma instead of #pragma inside macros:
+// http://msdn.microsoft.com/en-us/library/d9x1s805.aspx
+
+// MSVC_SUPPRESS_WARNING disables warning |n| for the remainder of the line and
+// for the next line of the source file.
+#define MSVC_SUPPRESS_WARNING(n) __pragma(warning(suppress : n))
+
+// Allows exporting a class that inherits from a non-exported base class.
+// This uses suppress instead of push/pop because the delimiter after the
+// declaration (either "," or "{") has to be placed before the pop macro.
+//
+// Example usage:
+// class EXPORT_API Foo : NON_EXPORTED_BASE(public Bar) {
+//
+// MSVC Compiler warning C4275:
+// non dll-interface class 'Bar' used as base for dll-interface class 'Foo'.
+// Note that this is intended to be used only when no access to the base class'
+// static data is done through derived classes or inline methods. For more info,
+// see http://msdn.microsoft.com/en-us/library/3tdb471s(VS.80).aspx
+#define NON_EXPORTED_BASE(code) \
+  MSVC_SUPPRESS_WARNING(4275)   \
+  code
+
+#else  // Not MSVC
+
+#define MSVC_SUPPRESS_WARNING(n)
+#define NON_EXPORTED_BASE(code) code
+
+#endif  // V8_CC_MSVC
+
 #endif  // V8_BASE_COMPILER_SPECIFIC_H_
diff --git a/src/base/cpu.cc b/src/base/cpu.cc
index 7757192..cf1f9c3 100644
--- a/src/base/cpu.cc
+++ b/src/base/cpu.cc
@@ -415,7 +415,7 @@
   }
 
   // Check if CPU has non stoppable time stamp counter.
-  const int parameter_containing_non_stop_time_stamp_counter = 0x80000007;
+  const unsigned parameter_containing_non_stop_time_stamp_counter = 0x80000007;
   if (num_ext_ids >= parameter_containing_non_stop_time_stamp_counter) {
     __cpuid(cpu_info, parameter_containing_non_stop_time_stamp_counter);
     has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0;
@@ -607,7 +607,7 @@
   char* implementer = cpu_info.ExtractField("CPU implementer");
   if (implementer != NULL) {
     char* end;
-    implementer_ = strtol(implementer, &end, 0);
+    implementer_ = static_cast<int>(strtol(implementer, &end, 0));
     if (end == implementer) {
       implementer_ = 0;
     }
@@ -617,7 +617,7 @@
   char* variant = cpu_info.ExtractField("CPU variant");
   if (variant != NULL) {
     char* end;
-    variant_ = strtol(variant, &end, 0);
+    variant_ = static_cast<int>(strtol(variant, &end, 0));
     if (end == variant) {
       variant_ = -1;
     }
@@ -628,7 +628,7 @@
   char* part = cpu_info.ExtractField("CPU part");
   if (part != NULL) {
     char* end;
-    part_ = strtol(part, &end, 0);
+    part_ = static_cast<int>(strtol(part, &end, 0));
     if (end == part) {
       part_ = 0;
     }
diff --git a/src/base/cpu.h b/src/base/cpu.h
index 19d4102..e0fcea1 100644
--- a/src/base/cpu.h
+++ b/src/base/cpu.h
@@ -13,6 +13,7 @@
 #ifndef V8_BASE_CPU_H_
 #define V8_BASE_CPU_H_
 
+#include "src/base/base-export.h"
 #include "src/base/macros.h"
 
 namespace v8 {
@@ -28,7 +29,7 @@
 // architectures. For each architecture the file cpu_<arch>.cc contains the
 // implementation of these static functions.
 
-class CPU final {
+class V8_BASE_EXPORT CPU final {
  public:
   CPU();
 
diff --git a/src/base/debug/stack_trace.h b/src/base/debug/stack_trace.h
index e938ef2..1361bb5 100644
--- a/src/base/debug/stack_trace.h
+++ b/src/base/debug/stack_trace.h
@@ -13,6 +13,7 @@
 #include <iosfwd>
 #include <string>
 
+#include "src/base/base-export.h"
 #include "src/base/build_config.h"
 
 #if V8_OS_POSIX
@@ -31,8 +32,8 @@
 // Enables stack dump to console output on exception and signals.
 // When enabled, the process will quit immediately. This is meant to be used in
 // tests only!
-bool EnableInProcessStackDumping();
-void DisableSignalStackDump();
+V8_BASE_EXPORT bool EnableInProcessStackDumping();
+V8_BASE_EXPORT void DisableSignalStackDump();
 
 // A stacktrace can be helpful in debugging. For example, you can include a
 // stacktrace member in a object (probably around #ifndef NDEBUG) so that you
diff --git a/src/base/division-by-constant.cc b/src/base/division-by-constant.cc
index 5167b7a..03d198e 100644
--- a/src/base/division-by-constant.cc
+++ b/src/base/division-by-constant.cc
@@ -13,13 +13,6 @@
 namespace base {
 
 template <class T>
-bool MagicNumbersForDivision<T>::operator==(
-    const MagicNumbersForDivision& rhs) const {
-  return multiplier == rhs.multiplier && shift == rhs.shift && add == rhs.add;
-}
-
-
-template <class T>
 MagicNumbersForDivision<T> SignedDivisionByConstant(T d) {
   STATIC_ASSERT(static_cast<T>(0) < static_cast<T>(-1));
   DCHECK(d != static_cast<T>(-1) && d != 0 && d != 1);
@@ -100,8 +93,8 @@
 // -----------------------------------------------------------------------------
 // Instantiations.
 
-template struct MagicNumbersForDivision<uint32_t>;
-template struct MagicNumbersForDivision<uint64_t>;
+template struct V8_BASE_EXPORT MagicNumbersForDivision<uint32_t>;
+template struct V8_BASE_EXPORT MagicNumbersForDivision<uint64_t>;
 
 template MagicNumbersForDivision<uint32_t> SignedDivisionByConstant(uint32_t d);
 template MagicNumbersForDivision<uint64_t> SignedDivisionByConstant(uint64_t d);
diff --git a/src/base/division-by-constant.h b/src/base/division-by-constant.h
index 02e7e14..5d063f8 100644
--- a/src/base/division-by-constant.h
+++ b/src/base/division-by-constant.h
@@ -5,6 +5,10 @@
 #ifndef V8_BASE_DIVISION_BY_CONSTANT_H_
 #define V8_BASE_DIVISION_BY_CONSTANT_H_
 
+#include <stdint.h>
+
+#include "src/base/base-export.h"
+
 namespace v8 {
 namespace base {
 
@@ -14,10 +18,12 @@
 // Delight", chapter 10. The template parameter must be one of the unsigned
 // integral types.
 template <class T>
-struct MagicNumbersForDivision {
+struct V8_BASE_EXPORT MagicNumbersForDivision {
   MagicNumbersForDivision(T m, unsigned s, bool a)
       : multiplier(m), shift(s), add(a) {}
-  bool operator==(const MagicNumbersForDivision& rhs) const;
+  bool operator==(const MagicNumbersForDivision& rhs) const {
+    return multiplier == rhs.multiplier && shift == rhs.shift && add == rhs.add;
+  }
 
   T multiplier;
   unsigned shift;
@@ -28,17 +34,26 @@
 // Calculate the multiplier and shift for signed division via multiplication.
 // The divisor must not be -1, 0 or 1 when interpreted as a signed value.
 template <class T>
-MagicNumbersForDivision<T> SignedDivisionByConstant(T d);
-
+V8_BASE_EXPORT MagicNumbersForDivision<T> SignedDivisionByConstant(T d);
 
 // Calculate the multiplier and shift for unsigned division via multiplication,
 // see Warren's "Hacker's Delight", chapter 10. The divisor must not be 0 and
 // leading_zeros can be used to speed up the calculation if the given number of
 // upper bits of the dividend value are known to be zero.
 template <class T>
-MagicNumbersForDivision<T> UnsignedDivisionByConstant(
+V8_BASE_EXPORT MagicNumbersForDivision<T> UnsignedDivisionByConstant(
     T d, unsigned leading_zeros = 0);
 
+extern template V8_BASE_EXPORT MagicNumbersForDivision<uint32_t>
+SignedDivisionByConstant(uint32_t d);
+extern template V8_BASE_EXPORT MagicNumbersForDivision<uint64_t>
+SignedDivisionByConstant(uint64_t d);
+
+extern template V8_BASE_EXPORT MagicNumbersForDivision<uint32_t>
+UnsignedDivisionByConstant(uint32_t d, unsigned leading_zeros);
+extern template V8_BASE_EXPORT MagicNumbersForDivision<uint64_t>
+UnsignedDivisionByConstant(uint64_t d, unsigned leading_zeros);
+
 }  // namespace base
 }  // namespace v8
 
diff --git a/src/base/file-utils.cc b/src/base/file-utils.cc
index 2262df9..31b1b41 100644
--- a/src/base/file-utils.cc
+++ b/src/base/file-utils.cc
@@ -10,13 +10,13 @@
 #include "src/base/platform/platform.h"
 
 namespace v8 {
-namespace internal {
+namespace base {
 
 char* RelativePath(char** buffer, const char* exec_path, const char* name) {
   DCHECK(exec_path);
   int path_separator = static_cast<int>(strlen(exec_path)) - 1;
   while (path_separator >= 0 &&
-         !base::OS::isDirectorySeparator(exec_path[path_separator])) {
+         !OS::isDirectorySeparator(exec_path[path_separator])) {
     path_separator--;
   }
   if (path_separator >= 0) {
@@ -32,5 +32,5 @@
   return *buffer;
 }
 
-}  // namespace internal
+}  // namespace base
 }  // namespace v8
diff --git a/src/base/file-utils.h b/src/base/file-utils.h
index ce9e9a1..271f0ff 100644
--- a/src/base/file-utils.h
+++ b/src/base/file-utils.h
@@ -2,17 +2,20 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_FILE_UTILS_H_
-#define V8_FILE_UTILS_H_
+#ifndef V8_BASE_FILE_UTILS_H_
+#define V8_BASE_FILE_UTILS_H_
+
+#include "src/base/base-export.h"
 
 namespace v8 {
-namespace internal {
+namespace base {
 
 // Helper functions to manipulate file paths.
 
-char* RelativePath(char** buffer, const char* exec_path, const char* name);
+V8_BASE_EXPORT char* RelativePath(char** buffer, const char* exec_path,
+                                  const char* name);
 
-}  // namespace internal
+}  // namespace base
 }  // namespace v8
 
 #endif  // V8_FILE_UTILS_H_
diff --git a/src/base/functional.h b/src/base/functional.h
index ff0d807..634e7ba 100644
--- a/src/base/functional.h
+++ b/src/base/functional.h
@@ -13,6 +13,7 @@
 #include <functional>
 #include <utility>
 
+#include "src/base/base-export.h"
 #include "src/base/macros.h"
 
 namespace v8 {
@@ -67,7 +68,7 @@
 
 V8_INLINE size_t hash_combine() { return 0u; }
 V8_INLINE size_t hash_combine(size_t seed) { return seed; }
-size_t hash_combine(size_t seed, size_t value);
+V8_BASE_EXPORT size_t hash_combine(size_t seed, size_t value);
 template <typename T, typename... Ts>
 V8_INLINE size_t hash_combine(T const& v, Ts const&... vs) {
   return hash_combine(hash_combine(vs...), hash<T>()(v));
@@ -91,9 +92,9 @@
 V8_BASE_HASH_VALUE_TRIVIAL(unsigned short)  // NOLINT(runtime/int)
 #undef V8_BASE_HASH_VALUE_TRIVIAL
 
-size_t hash_value(unsigned int);
-size_t hash_value(unsigned long);       // NOLINT(runtime/int)
-size_t hash_value(unsigned long long);  // NOLINT(runtime/int)
+V8_BASE_EXPORT size_t hash_value(unsigned int);
+V8_BASE_EXPORT size_t hash_value(unsigned long);       // NOLINT(runtime/int)
+V8_BASE_EXPORT size_t hash_value(unsigned long long);  // NOLINT(runtime/int)
 
 #define V8_BASE_HASH_VALUE_SIGNED(type)            \
   V8_INLINE size_t hash_value(signed type v) {     \
diff --git a/src/base/hashmap.h b/src/base/hashmap.h
index 54038c5..d2fc133 100644
--- a/src/base/hashmap.h
+++ b/src/base/hashmap.h
@@ -229,9 +229,8 @@
           class AllocationPolicy>
 void TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Clear() {
   // Mark all entries as empty.
-  const Entry* end = map_end();
-  for (Entry* entry = map_; entry < end; entry++) {
-    entry->clear();
+  for (size_t i = 0; i < capacity_; ++i) {
+    map_[i].clear();
   }
   occupancy_ = 0;
 }
@@ -264,19 +263,15 @@
 TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Probe(
     const Key& key, uint32_t hash) const {
   DCHECK(base::bits::IsPowerOfTwo32(capacity_));
-  Entry* entry = map_ + (hash & (capacity_ - 1));
-  const Entry* end = map_end();
-  DCHECK(map_ <= entry && entry < end);
+  size_t i = hash & (capacity_ - 1);
+  DCHECK(i < capacity_);
 
   DCHECK(occupancy_ < capacity_);  // Guarantees loop termination.
-  while (entry->exists() && !match_(hash, entry->hash, key, entry->key)) {
-    entry++;
-    if (entry >= end) {
-      entry = map_;
-    }
+  while (map_[i].exists() && !match_(hash, map_[i].hash, key, map_[i].key)) {
+    i = (i + 1) & (capacity_ - 1);
   }
 
-  return entry;
+  return &map_[i];
 }
 
 template <typename Key, typename Value, typename MatchFun,
diff --git a/src/base/ieee754.h b/src/base/ieee754.h
index 80523a1..72f3db1 100644
--- a/src/base/ieee754.h
+++ b/src/base/ieee754.h
@@ -5,73 +5,75 @@
 #ifndef V8_BASE_IEEE754_H_
 #define V8_BASE_IEEE754_H_
 
+#include "src/base/base-export.h"
+
 namespace v8 {
 namespace base {
 namespace ieee754 {
 
 // Returns the arc cosine of |x|; that is the value whose cosine is |x|.
-double acos(double x);
+V8_BASE_EXPORT double acos(double x);
 
 // Returns the inverse hyperbolic cosine of |x|; that is the value whose
 // hyperbolic cosine is |x|.
-double acosh(double x);
+V8_BASE_EXPORT double acosh(double x);
 
 // Returns the arc sine of |x|; that is the value whose sine is |x|.
-double asin(double x);
+V8_BASE_EXPORT double asin(double x);
 
 // Returns the inverse hyperbolic sine of |x|; that is the value whose
 // hyperbolic sine is |x|.
-double asinh(double x);
+V8_BASE_EXPORT double asinh(double x);
 
 // Returns the principal value of the arc tangent of |x|; that is the value
 // whose tangent is |x|.
-double atan(double x);
+V8_BASE_EXPORT double atan(double x);
 
 // Returns the principal value of the arc tangent of |y/x|, using the signs of
 // the two arguments to determine the quadrant of the result.
-double atan2(double y, double x);
+V8_BASE_EXPORT double atan2(double y, double x);
 
 // Returns the cosine of |x|, where |x| is given in radians.
-double cos(double x);
+V8_BASE_EXPORT double cos(double x);
 
 // Returns the base-e exponential of |x|.
-double exp(double x);
+V8_BASE_EXPORT double exp(double x);
 
-double atanh(double x);
+V8_BASE_EXPORT double atanh(double x);
 
 // Returns the natural logarithm of |x|.
-double log(double x);
+V8_BASE_EXPORT double log(double x);
 
 // Returns a value equivalent to |log(1+x)|, but computed in a way that is
 // accurate even if the value of |x| is near zero.
-double log1p(double x);
+V8_BASE_EXPORT double log1p(double x);
 
 // Returns the base 2 logarithm of |x|.
-double log2(double x);
+V8_BASE_EXPORT double log2(double x);
 
 // Returns the base 10 logarithm of |x|.
-double log10(double x);
+V8_BASE_EXPORT double log10(double x);
 
 // Returns the cube root of |x|.
-double cbrt(double x);
+V8_BASE_EXPORT double cbrt(double x);
 
 // Returns exp(x)-1, the exponential of |x| minus 1.
-double expm1(double x);
+V8_BASE_EXPORT double expm1(double x);
 
 // Returns the sine of |x|, where |x| is given in radians.
-double sin(double x);
+V8_BASE_EXPORT double sin(double x);
 
 // Returns the tangent of |x|, where |x| is given in radians.
-double tan(double x);
+V8_BASE_EXPORT double tan(double x);
 
 // Returns the hyperbolic cosine of |x|, where |x| is given radians.
-double cosh(double x);
+V8_BASE_EXPORT double cosh(double x);
 
 // Returns the hyperbolic sine of |x|, where |x| is given radians.
-double sinh(double x);
+V8_BASE_EXPORT double sinh(double x);
 
 // Returns the hyperbolic tangent of |x|, where |x| is given radians.
-double tanh(double x);
+V8_BASE_EXPORT double tanh(double x);
 
 }  // namespace ieee754
 }  // namespace base
diff --git a/src/base/logging.h b/src/base/logging.h
index 50fceca..7bbb82a 100644
--- a/src/base/logging.h
+++ b/src/base/logging.h
@@ -9,10 +9,11 @@
 #include <sstream>
 #include <string>
 
+#include "src/base/base-export.h"
 #include "src/base/build_config.h"
 #include "src/base/compiler-specific.h"
 
-extern "C" PRINTF_FORMAT(3, 4) V8_NORETURN
+extern "C" PRINTF_FORMAT(3, 4) V8_NORETURN V8_BASE_EXPORT
     void V8_Fatal(const char* file, int line, const char* format, ...);
 
 // The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
@@ -87,8 +88,8 @@
 
 // Commonly used instantiations of MakeCheckOpString<>. Explicitly instantiated
 // in logging.cc.
-#define DEFINE_MAKE_CHECK_OP_STRING(type)                     \
-  extern template std::string* MakeCheckOpString<type, type>( \
+#define DEFINE_MAKE_CHECK_OP_STRING(type)                                    \
+  extern template V8_BASE_EXPORT std::string* MakeCheckOpString<type, type>( \
       type const&, type const&, char const*);
 DEFINE_MAKE_CHECK_OP_STRING(int)
 DEFINE_MAKE_CHECK_OP_STRING(long)       // NOLINT(runtime/int)
@@ -117,10 +118,11 @@
                                            char const* msg) {                  \
     return V8_LIKELY(lhs op rhs) ? nullptr : MakeCheckOpString(lhs, rhs, msg); \
   }                                                                            \
-  extern template std::string* Check##NAME##Impl<float, float>(                \
+  extern template V8_BASE_EXPORT std::string* Check##NAME##Impl<float, float>( \
       float const& lhs, float const& rhs, char const* msg);                    \
-  extern template std::string* Check##NAME##Impl<double, double>(              \
-      double const& lhs, double const& rhs, char const* msg);
+  extern template V8_BASE_EXPORT std::string*                                  \
+      Check##NAME##Impl<double, double>(double const& lhs, double const& rhs,  \
+                                        char const* msg);
 DEFINE_CHECK_OP_IMPL(EQ, ==)
 DEFINE_CHECK_OP_IMPL(NE, !=)
 DEFINE_CHECK_OP_IMPL(LE, <=)
diff --git a/src/base/once.h b/src/base/once.h
index 790a886..8008812 100644
--- a/src/base/once.h
+++ b/src/base/once.h
@@ -55,6 +55,7 @@
 #include <stddef.h>
 
 #include "src/base/atomicops.h"
+#include "src/base/base-export.h"
 
 namespace v8 {
 namespace base {
@@ -79,7 +80,8 @@
   typedef void (*type)(T);
 };
 
-void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg);
+V8_BASE_EXPORT void CallOnceImpl(OnceType* once, PointerArgFunction init_func,
+                                 void* arg);
 
 inline void CallOnce(OnceType* once, NoArgFunction init_func) {
   if (Acquire_Load(once) != ONCE_STATE_DONE) {
diff --git a/src/base/platform/condition-variable.h b/src/base/platform/condition-variable.h
index 72d6f28..48e7c36 100644
--- a/src/base/platform/condition-variable.h
+++ b/src/base/platform/condition-variable.h
@@ -5,6 +5,7 @@
 #ifndef V8_BASE_PLATFORM_CONDITION_VARIABLE_H_
 #define V8_BASE_PLATFORM_CONDITION_VARIABLE_H_
 
+#include "src/base/base-export.h"
 #include "src/base/lazy-instance.h"
 #include "src/base/platform/mutex.h"
 
@@ -28,7 +29,7 @@
 // the mutex and suspend the execution of the calling thread. When the condition
 // variable is notified, the thread is awakened, and the mutex is reacquired.
 
-class ConditionVariable final {
+class V8_BASE_EXPORT ConditionVariable final {
  public:
   ConditionVariable();
   ~ConditionVariable();
diff --git a/src/base/platform/mutex.h b/src/base/platform/mutex.h
index 61df19d..e7231bd 100644
--- a/src/base/platform/mutex.h
+++ b/src/base/platform/mutex.h
@@ -5,6 +5,7 @@
 #ifndef V8_BASE_PLATFORM_MUTEX_H_
 #define V8_BASE_PLATFORM_MUTEX_H_
 
+#include "src/base/base-export.h"
 #include "src/base/lazy-instance.h"
 #if V8_OS_WIN
 #include "src/base/win32-headers.h"
@@ -33,7 +34,7 @@
 // |TryLock()|. The behavior of a program is undefined if a mutex is destroyed
 // while still owned by some thread. The Mutex class is non-copyable.
 
-class Mutex final {
+class V8_BASE_EXPORT Mutex final {
  public:
   Mutex();
   ~Mutex();
@@ -127,7 +128,7 @@
 // The behavior of a program is undefined if a recursive mutex is destroyed
 // while still owned by some thread. The RecursiveMutex class is non-copyable.
 
-class RecursiveMutex final {
+class V8_BASE_EXPORT RecursiveMutex final {
  public:
   RecursiveMutex();
   ~RecursiveMutex();
diff --git a/src/base/platform/platform.h b/src/base/platform/platform.h
index d3b6c9c..5d570e7 100644
--- a/src/base/platform/platform.h
+++ b/src/base/platform/platform.h
@@ -25,6 +25,7 @@
 #include <string>
 #include <vector>
 
+#include "src/base/base-export.h"
 #include "src/base/build_config.h"
 #include "src/base/compiler-specific.h"
 #include "src/base/platform/mutex.h"
@@ -69,7 +70,7 @@
 
 #define V8_FAST_TLS_SUPPORTED 1
 
-extern intptr_t kMacTlsBaseOffset;
+extern V8_BASE_EXPORT intptr_t kMacTlsBaseOffset;
 
 INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
 
@@ -102,7 +103,7 @@
 // functions. Add methods here to cope with differences between the
 // supported platforms.
 
-class OS {
+class V8_BASE_EXPORT OS {
  public:
   // Initialize the OS class.
   // - random_seed: Used for the GetRandomMmapAddress() if non-zero.
@@ -211,7 +212,7 @@
     char text[kStackWalkMaxTextLen];
   };
 
-  class MemoryMappedFile {
+  class V8_BASE_EXPORT MemoryMappedFile {
    public:
     virtual ~MemoryMappedFile() {}
     virtual void* memory() const = 0;
@@ -286,7 +287,7 @@
 // Control of the reserved memory can be assigned to another VirtualMemory
 // object by assignment or copy-contructing. This removes the reserved memory
 // from the original object.
-class VirtualMemory {
+class V8_BASE_EXPORT VirtualMemory {
  public:
   // Empty VirtualMemory object, controlling no reserved memory.
   VirtualMemory();
@@ -418,7 +419,7 @@
 // thread. The Thread object should not be deallocated before the thread has
 // terminated.
 
-class Thread {
+class V8_BASE_EXPORT Thread {
  public:
   // Opaque data type for thread-local storage keys.
   typedef int32_t LocalStorageKey;
diff --git a/src/base/platform/semaphore.h b/src/base/platform/semaphore.h
index 39029c8..31aeca3 100644
--- a/src/base/platform/semaphore.h
+++ b/src/base/platform/semaphore.h
@@ -5,6 +5,7 @@
 #ifndef V8_BASE_PLATFORM_SEMAPHORE_H_
 #define V8_BASE_PLATFORM_SEMAPHORE_H_
 
+#include "src/base/base-export.h"
 #include "src/base/lazy-instance.h"
 #if V8_OS_WIN
 #include "src/base/win32-headers.h"
@@ -31,7 +32,7 @@
 // count reaches zero,  threads waiting for the semaphore blocks until the
 // count becomes non-zero.
 
-class Semaphore final {
+class V8_BASE_EXPORT Semaphore final {
  public:
   explicit Semaphore(int count);
   ~Semaphore();
diff --git a/src/base/platform/time.cc b/src/base/platform/time.cc
index 76a8209..6b48338 100644
--- a/src/base/platform/time.cc
+++ b/src/base/platform/time.cc
@@ -639,7 +639,7 @@
 
 bool ThreadTicks::IsSupported() {
 #if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
-  defined(V8_OS_MACOSX) || defined(V8_OS_ANDROID)
+    defined(V8_OS_MACOSX) || defined(V8_OS_ANDROID) || defined(V8_OS_SOLARIS)
   return true;
 #elif defined(V8_OS_WIN)
   return IsSupportedWin();
@@ -655,6 +655,8 @@
 #elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
   defined(V8_OS_ANDROID)
   return ThreadTicks(ClockNow(CLOCK_THREAD_CPUTIME_ID));
+#elif V8_OS_SOLARIS
+  return ThreadTicks(gethrvtime() / Time::kNanosecondsPerMicrosecond);
 #elif V8_OS_WIN
   return ThreadTicks::GetForThread(::GetCurrentThread());
 #else
diff --git a/src/base/platform/time.h b/src/base/platform/time.h
index be62014..ed17512 100644
--- a/src/base/platform/time.h
+++ b/src/base/platform/time.h
@@ -9,6 +9,7 @@
 #include <iosfwd>
 #include <limits>
 
+#include "src/base/base-export.h"
 #include "src/base/bits.h"
 #include "src/base/macros.h"
 #include "src/base/safe_math.h"
@@ -42,7 +43,7 @@
 // This class represents a duration of time, internally represented in
 // microseonds.
 
-class TimeDelta final {
+class V8_BASE_EXPORT TimeDelta final {
  public:
   TimeDelta() : delta_(0) {}
 
@@ -277,7 +278,7 @@
 // This class represents an absolute point in time, internally represented as
 // microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970.
 
-class Time final : public time_internal::TimeBase<Time> {
+class V8_BASE_EXPORT Time final : public time_internal::TimeBase<Time> {
  public:
   // Contains the NULL time. Use Time::Now() to get the current time.
   Time() : TimeBase(0) {}
@@ -322,7 +323,7 @@
   explicit Time(int64_t us) : TimeBase(us) {}
 };
 
-std::ostream& operator<<(std::ostream&, const Time&);
+V8_BASE_EXPORT std::ostream& operator<<(std::ostream&, const Time&);
 
 inline Time operator+(const TimeDelta& delta, const Time& time) {
   return time + delta;
@@ -339,7 +340,8 @@
 // Time::Now() may actually decrease or jump).  But note that TimeTicks may
 // "stand still", for example if the computer suspended.
 
-class TimeTicks final : public time_internal::TimeBase<TimeTicks> {
+class V8_BASE_EXPORT TimeTicks final
+    : public time_internal::TimeBase<TimeTicks> {
  public:
   TimeTicks() : TimeBase(0) {}
 
@@ -376,7 +378,8 @@
 
 // Represents a clock, specific to a particular thread, than runs only while the
 // thread is running.
-class ThreadTicks final : public time_internal::TimeBase<ThreadTicks> {
+class V8_BASE_EXPORT ThreadTicks final
+    : public time_internal::TimeBase<ThreadTicks> {
  public:
   ThreadTicks() : TimeBase(0) {}
 
@@ -408,6 +411,9 @@
 #endif
 
  private:
+  template <class TimeClass>
+  friend class time_internal::TimeBase;
+
   // Please use Now() or GetForThread() to create a new object. This is for
   // internal use and testing. Ticks are in microseconds.
   explicit ThreadTicks(int64_t ticks) : TimeBase(ticks) {}
diff --git a/src/base/ring-buffer.h b/src/base/ring-buffer.h
new file mode 100644
index 0000000..b347977
--- /dev/null
+++ b/src/base/ring-buffer.h
@@ -0,0 +1,54 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_RING_BUFFER_H_
+#define V8_BASE_RING_BUFFER_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+template <typename T>
+class RingBuffer {
+ public:
+  RingBuffer() { Reset(); }
+  static const int kSize = 10;
+  void Push(const T& value) {
+    if (count_ == kSize) {
+      elements_[start_++] = value;
+      if (start_ == kSize) start_ = 0;
+    } else {
+      DCHECK_EQ(start_, 0);
+      elements_[count_++] = value;
+    }
+  }
+
+  int Count() const { return count_; }
+
+  template <typename Callback>
+  T Sum(Callback callback, const T& initial) const {
+    int j = start_ + count_ - 1;
+    if (j >= kSize) j -= kSize;
+    T result = initial;
+    for (int i = 0; i < count_; i++) {
+      result = callback(result, elements_[j]);
+      if (--j == -1) j += kSize;
+    }
+    return result;
+  }
+
+  void Reset() { start_ = count_ = 0; }
+
+ private:
+  T elements_[kSize];
+  int start_;
+  int count_;
+  DISALLOW_COPY_AND_ASSIGN(RingBuffer);
+};
+
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_RING_BUFFER_H_
diff --git a/src/base/sys-info.h b/src/base/sys-info.h
index 4504c82..772f443 100644
--- a/src/base/sys-info.h
+++ b/src/base/sys-info.h
@@ -6,12 +6,14 @@
 #define V8_BASE_SYS_INFO_H_
 
 #include <stdint.h>
+
+#include "src/base/base-export.h"
 #include "src/base/compiler-specific.h"
 
 namespace v8 {
 namespace base {
 
-class SysInfo final {
+class V8_BASE_EXPORT SysInfo final {
  public:
   // Returns the number of logical processors/core on the current machine.
   static int NumberOfProcessors();
diff --git a/src/base/utils/random-number-generator.h b/src/base/utils/random-number-generator.h
index cd3e6bf..7a322b5 100644
--- a/src/base/utils/random-number-generator.h
+++ b/src/base/utils/random-number-generator.h
@@ -5,6 +5,7 @@
 #ifndef V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
 #define V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
 
+#include "src/base/base-export.h"
 #include "src/base/macros.h"
 
 namespace v8 {
@@ -31,7 +32,7 @@
 // https://code.google.com/p/v8/issues/detail?id=2905
 // This class is neither reentrant nor threadsafe.
 
-class RandomNumberGenerator final {
+class V8_BASE_EXPORT RandomNumberGenerator final {
  public:
   // EntropySource is used as a callback function when V8 needs a source of
   // entropy.
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 62cebfb..ba5f4d5 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -364,17 +364,6 @@
   InstallFunction(target, name, function, name_string, attributes);
 }
 
-Handle<JSFunction> InstallGetter(Handle<JSObject> target,
-                                 Handle<Name> property_name,
-                                 Handle<JSFunction> getter,
-                                 PropertyAttributes attributes = DONT_ENUM) {
-  Handle<Object> setter = target->GetIsolate()->factory()->undefined_value();
-  JSObject::DefineAccessor(target, property_name, getter, setter, attributes)
-      .Check();
-  getter->shared()->set_native(true);
-  return getter;
-}
-
 Handle<JSFunction> CreateFunction(Isolate* isolate, Handle<String> name,
                                   InstanceType type, int instance_size,
                                   MaybeHandle<JSObject> maybe_prototype,
@@ -460,17 +449,54 @@
   return fun;
 }
 
+void SimpleInstallGetterSetter(Handle<JSObject> base, Handle<String> name,
+                               Builtins::Name call_getter,
+                               Builtins::Name call_setter,
+                               PropertyAttributes attribs) {
+  Isolate* const isolate = base->GetIsolate();
+
+  Handle<String> getter_name =
+      Name::ToFunctionName(name, isolate->factory()->get_string())
+          .ToHandleChecked();
+  Handle<JSFunction> getter =
+      SimpleCreateFunction(isolate, getter_name, call_getter, 0, true);
+  getter->shared()->set_native(true);
+
+  Handle<String> setter_name =
+      Name::ToFunctionName(name, isolate->factory()->set_string())
+          .ToHandleChecked();
+  Handle<JSFunction> setter =
+      SimpleCreateFunction(isolate, setter_name, call_setter, 1, true);
+  setter->shared()->set_native(true);
+
+  JSObject::DefineAccessor(base, name, getter, setter, attribs).Check();
+}
+
+Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
+                                       Handle<String> name,
+                                       Handle<Name> property_name,
+                                       Builtins::Name call, bool adapt) {
+  Isolate* const isolate = base->GetIsolate();
+
+  Handle<String> getter_name =
+      Name::ToFunctionName(name, isolate->factory()->get_string())
+          .ToHandleChecked();
+  Handle<JSFunction> getter =
+      SimpleCreateFunction(isolate, getter_name, call, 0, adapt);
+  getter->shared()->set_native(true);
+
+  Handle<Object> setter = isolate->factory()->undefined_value();
+
+  JSObject::DefineAccessor(base, property_name, getter, setter, DONT_ENUM)
+      .Check();
+
+  return getter;
+}
+
 Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
                                        Handle<String> name, Builtins::Name call,
                                        bool adapt) {
-  Isolate* const isolate = base->GetIsolate();
-  Handle<String> fun_name =
-      Name::ToFunctionName(name, isolate->factory()->get_string())
-          .ToHandleChecked();
-  Handle<JSFunction> fun =
-      SimpleCreateFunction(isolate, fun_name, call, 0, adapt);
-  InstallGetter(base, name, fun);
-  return fun;
+  return SimpleInstallGetter(base, name, name, call, adapt);
 }
 
 Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
@@ -934,8 +960,9 @@
   if (global_proxy_template.IsEmpty()) {
     Handle<String> name = Handle<String>(heap()->empty_string());
     Handle<Code> code = isolate()->builtins()->Illegal();
-    global_proxy_function = factory()->NewFunction(
-        name, code, JS_GLOBAL_PROXY_TYPE, JSGlobalProxy::kSize);
+    global_proxy_function =
+        factory()->NewFunction(name, code, JS_GLOBAL_PROXY_TYPE,
+                               JSGlobalProxy::SizeWithInternalFields(0));
   } else {
     Handle<ObjectTemplateInfo> data =
         v8::Utils::OpenHandle(*global_proxy_template);
@@ -1030,7 +1057,6 @@
                                 Builtins::kErrorPrototypeToString, 0, true);
       isolate->native_context()->set_error_to_string(*to_string_fun);
     } else {
-      DCHECK(context_index != Context::ERROR_FUNCTION_INDEX);
       DCHECK(isolate->native_context()->error_to_string()->IsJSFunction());
 
       InstallFunction(prototype, isolate->error_to_string(),
@@ -1104,9 +1130,12 @@
     SimpleInstallFunction(object_function, factory->assign_string(),
                           Builtins::kObjectAssign, 2, false);
     SimpleInstallFunction(object_function, factory->create_string(),
-                          Builtins::kObjectCreate, 2, false);
+                          Builtins::kObjectCreate, 2, true);
     SimpleInstallFunction(object_function, "getOwnPropertyDescriptor",
                           Builtins::kObjectGetOwnPropertyDescriptor, 2, false);
+    SimpleInstallFunction(object_function,
+                          factory->getOwnPropertyDescriptors_string(),
+                          Builtins::kObjectGetOwnPropertyDescriptors, 1, false);
     SimpleInstallFunction(object_function, "getOwnPropertyNames",
                           Builtins::kObjectGetOwnPropertyNames, 1, false);
     SimpleInstallFunction(object_function, "getOwnPropertySymbols",
@@ -1136,6 +1165,8 @@
         object_function, "getPrototypeOf", Builtins::kObjectGetPrototypeOf,
         1, false);
     native_context()->set_object_get_prototype_of(*object_get_prototype_of);
+    SimpleInstallFunction(object_function, "setPrototypeOf",
+                          Builtins::kObjectSetPrototypeOf, 2, false);
 
     Handle<JSFunction> object_is_extensible = SimpleInstallFunction(
         object_function, "isExtensible", Builtins::kObjectIsExtensible,
@@ -1153,6 +1184,10 @@
     Handle<JSFunction> object_keys = SimpleInstallFunction(
         object_function, "keys", Builtins::kObjectKeys, 1, false);
     native_context()->set_object_keys(*object_keys);
+    SimpleInstallFunction(object_function, factory->entries_string(),
+                          Builtins::kObjectEntries, 1, false);
+    SimpleInstallFunction(object_function, factory->values_string(),
+                          Builtins::kObjectValues, 1, false);
 
     SimpleInstallFunction(isolate->initial_object_prototype(),
                           "__defineGetter__", Builtins::kObjectDefineGetter, 2,
@@ -1171,6 +1206,11 @@
     SimpleInstallFunction(
         isolate->initial_object_prototype(), "propertyIsEnumerable",
         Builtins::kObjectPrototypePropertyIsEnumerable, 1, false);
+
+    SimpleInstallGetterSetter(isolate->initial_object_prototype(),
+                              factory->proto_string(),
+                              Builtins::kObjectPrototypeGetProto,
+                              Builtins::kObjectPrototypeSetProto, DONT_ENUM);
   }
 
   Handle<JSObject> global(native_context()->global_object());
@@ -1280,6 +1320,97 @@
     native_context()->set_is_arraylike(*is_arraylike);
   }
 
+  {  // --- A r r a y I t e r a t o r ---
+    Handle<JSObject> iterator_prototype(
+        native_context()->initial_iterator_prototype());
+
+    Handle<JSObject> array_iterator_prototype =
+        factory->NewJSObject(isolate->object_function(), TENURED);
+    JSObject::ForceSetPrototype(array_iterator_prototype, iterator_prototype);
+
+    JSObject::AddProperty(
+        array_iterator_prototype, factory->to_string_tag_symbol(),
+        factory->ArrayIterator_string(),
+        static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+    Handle<JSFunction> next = InstallFunction(
+        array_iterator_prototype, "next", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+        MaybeHandle<JSObject>(), Builtins::kArrayIteratorPrototypeNext);
+    next->shared()->set_builtin_function_id(kArrayIteratorNext);
+
+    // Set the expected parameters for %ArrayIteratorPrototype%.next to 0 (not
+    // including the receiver), as required by the builtin.
+    next->shared()->set_internal_formal_parameter_count(0);
+
+    // Set the length for the function to satisfy ECMA-262.
+    next->shared()->set_length(0);
+
+    Handle<JSFunction> array_iterator_function = CreateFunction(
+        isolate, factory->ArrayIterator_string(),
+        JS_FAST_ARRAY_VALUE_ITERATOR_TYPE, JSArrayIterator::kSize,
+        array_iterator_prototype, Builtins::kIllegal);
+    array_iterator_function->shared()->set_instance_class_name(
+        isolate->heap()->ArrayIterator_string());
+
+    native_context()->set_initial_array_iterator_prototype(
+        *array_iterator_prototype);
+    native_context()->set_initial_array_iterator_prototype_map(
+        array_iterator_prototype->map());
+
+    Handle<Map> initial_map(array_iterator_function->initial_map(), isolate);
+
+#define ARRAY_ITERATOR_LIST(V)                                              \
+  V(TYPED_ARRAY, KEY, typed_array, key)                                     \
+  V(FAST_ARRAY, KEY, fast_array, key)                                       \
+  V(GENERIC_ARRAY, KEY, array, key)                                         \
+  V(UINT8_ARRAY, KEY_VALUE, uint8_array, key_value)                         \
+  V(INT8_ARRAY, KEY_VALUE, int8_array, key_value)                           \
+  V(UINT16_ARRAY, KEY_VALUE, uint16_array, key_value)                       \
+  V(INT16_ARRAY, KEY_VALUE, int16_array, key_value)                         \
+  V(UINT32_ARRAY, KEY_VALUE, uint32_array, key_value)                       \
+  V(INT32_ARRAY, KEY_VALUE, int32_array, key_value)                         \
+  V(FLOAT32_ARRAY, KEY_VALUE, float32_array, key_value)                     \
+  V(FLOAT64_ARRAY, KEY_VALUE, float64_array, key_value)                     \
+  V(UINT8_CLAMPED_ARRAY, KEY_VALUE, uint8_clamped_array, key_value)         \
+  V(FAST_SMI_ARRAY, KEY_VALUE, fast_smi_array, key_value)                   \
+  V(FAST_HOLEY_SMI_ARRAY, KEY_VALUE, fast_holey_smi_array, key_value)       \
+  V(FAST_ARRAY, KEY_VALUE, fast_array, key_value)                           \
+  V(FAST_HOLEY_ARRAY, KEY_VALUE, fast_holey_array, key_value)               \
+  V(FAST_DOUBLE_ARRAY, KEY_VALUE, fast_double_array, key_value)             \
+  V(FAST_HOLEY_DOUBLE_ARRAY, KEY_VALUE, fast_holey_double_array, key_value) \
+  V(GENERIC_ARRAY, KEY_VALUE, array, key_value)                             \
+  V(UINT8_ARRAY, VALUE, uint8_array, value)                                 \
+  V(INT8_ARRAY, VALUE, int8_array, value)                                   \
+  V(UINT16_ARRAY, VALUE, uint16_array, value)                               \
+  V(INT16_ARRAY, VALUE, int16_array, value)                                 \
+  V(UINT32_ARRAY, VALUE, uint32_array, value)                               \
+  V(INT32_ARRAY, VALUE, int32_array, value)                                 \
+  V(FLOAT32_ARRAY, VALUE, float32_array, value)                             \
+  V(FLOAT64_ARRAY, VALUE, float64_array, value)                             \
+  V(UINT8_CLAMPED_ARRAY, VALUE, uint8_clamped_array, value)                 \
+  V(FAST_SMI_ARRAY, VALUE, fast_smi_array, value)                           \
+  V(FAST_HOLEY_SMI_ARRAY, VALUE, fast_holey_smi_array, value)               \
+  V(FAST_ARRAY, VALUE, fast_array, value)                                   \
+  V(FAST_HOLEY_ARRAY, VALUE, fast_holey_array, value)                       \
+  V(FAST_DOUBLE_ARRAY, VALUE, fast_double_array, value)                     \
+  V(FAST_HOLEY_DOUBLE_ARRAY, VALUE, fast_holey_double_array, value)         \
+  V(GENERIC_ARRAY, VALUE, array, value)
+
+#define CREATE_ARRAY_ITERATOR_MAP(PREFIX, SUFFIX, prefix, suffix)           \
+  do {                                                                      \
+    const InstanceType type = JS_##PREFIX##_##SUFFIX##_ITERATOR_TYPE;       \
+    Handle<Map> map =                                                       \
+        Map::Copy(initial_map, "JS_" #PREFIX "_" #SUFFIX "_ITERATOR_TYPE"); \
+    map->set_instance_type(type);                                           \
+    native_context()->set_##prefix##_##suffix##_iterator_map(*map);         \
+  } while (0);
+
+    ARRAY_ITERATOR_LIST(CREATE_ARRAY_ITERATOR_MAP)
+
+#undef CREATE_ARRAY_ITERATOR_MAP
+#undef ARRAY_ITERATOR_LIST
+  }
+
   {  // --- N u m b e r ---
     Handle<JSFunction> number_fun = InstallFunction(
         global, "Number", JS_VALUE_TYPE, JSValue::kSize,
@@ -1294,7 +1425,7 @@
     // Create the %NumberPrototype%
     Handle<JSValue> prototype =
         Handle<JSValue>::cast(factory->NewJSObject(number_fun, TENURED));
-    prototype->set_value(Smi::FromInt(0));
+    prototype->set_value(Smi::kZero);
     Accessors::FunctionSetPrototype(number_fun, prototype).Assert();
 
     // Install the "constructor" property on the {prototype}.
@@ -1325,6 +1456,20 @@
     SimpleInstallFunction(number_fun, "isNaN", Builtins::kNumberIsNaN, 1, true);
     SimpleInstallFunction(number_fun, "isSafeInteger",
                           Builtins::kNumberIsSafeInteger, 1, true);
+
+    // Install Number.parseFloat and Global.parseFloat.
+    Handle<JSFunction> parse_float_fun = SimpleInstallFunction(
+        number_fun, "parseFloat", Builtins::kNumberParseFloat, 1, true);
+    JSObject::AddProperty(global_object,
+                          factory->NewStringFromAsciiChecked("parseFloat"),
+                          parse_float_fun, DONT_ENUM);
+
+    // Install Number.parseInt and Global.parseInt.
+    Handle<JSFunction> parse_int_fun = SimpleInstallFunction(
+        number_fun, "parseInt", Builtins::kNumberParseInt, 2, true);
+    JSObject::AddProperty(global_object,
+                          factory->NewStringFromAsciiChecked("parseInt"),
+                          parse_int_fun, DONT_ENUM);
   }
 
   {  // --- B o o l e a n ---
@@ -1385,7 +1530,7 @@
 
     // Install the String.fromCharCode function.
     SimpleInstallFunction(string_fun, "fromCharCode",
-                          Builtins::kStringFromCharCode, 1, true);
+                          Builtins::kStringFromCharCode, 1, false);
 
     // Install the String.fromCodePoint function.
     SimpleInstallFunction(string_fun, "fromCodePoint",
@@ -1406,6 +1551,12 @@
                           1, true);
     SimpleInstallFunction(prototype, "charCodeAt",
                           Builtins::kStringPrototypeCharCodeAt, 1, true);
+    SimpleInstallFunction(prototype, "endsWith",
+                          Builtins::kStringPrototypeEndsWith, 1, false);
+    SimpleInstallFunction(prototype, "includes",
+                          Builtins::kStringPrototypeIncludes, 1, false);
+    SimpleInstallFunction(prototype, "indexOf",
+                          Builtins::kStringPrototypeIndexOf, 1, false);
     SimpleInstallFunction(prototype, "lastIndexOf",
                           Builtins::kStringPrototypeLastIndexOf, 1, false);
     SimpleInstallFunction(prototype, "localeCompare",
@@ -1416,6 +1567,8 @@
                           2, true);
     SimpleInstallFunction(prototype, "substring",
                           Builtins::kStringPrototypeSubstring, 2, true);
+    SimpleInstallFunction(prototype, "startsWith",
+                          Builtins::kStringPrototypeStartsWith, 1, false);
     SimpleInstallFunction(prototype, "toString",
                           Builtins::kStringPrototypeToString, 0, true);
     SimpleInstallFunction(prototype, "trim", Builtins::kStringPrototypeTrim, 0,
@@ -1431,6 +1584,7 @@
         isolate, factory->NewStringFromAsciiChecked("[Symbol.iterator]"),
         Builtins::kStringPrototypeIterator, 0, true);
     iterator->shared()->set_native(true);
+    iterator->shared()->set_builtin_function_id(kStringIterator);
     JSObject::AddProperty(prototype, factory->iterator_symbol(), iterator,
                           static_cast<PropertyAttributes>(DONT_ENUM));
   }
@@ -1662,14 +1816,142 @@
     shared->DontAdaptArguments();
     shared->set_length(2);
 
-    // RegExp.prototype setup.
+    {
+      // RegExp.prototype setup.
 
-    // Install the "constructor" property on the {prototype}.
-    JSObject::AddProperty(prototype, factory->constructor_string(), regexp_fun,
-                          DONT_ENUM);
+      // Install the "constructor" property on the {prototype}.
+      JSObject::AddProperty(prototype, factory->constructor_string(),
+                            regexp_fun, DONT_ENUM);
 
-    SimpleInstallFunction(prototype, "exec", Builtins::kRegExpPrototypeExec, 1,
-                          true, DONT_ENUM);
+      {
+        Handle<JSFunction> fun = SimpleInstallFunction(
+            prototype, factory->exec_string(), Builtins::kRegExpPrototypeExec,
+            1, true, DONT_ENUM);
+        native_context()->set_regexp_exec_function(*fun);
+      }
+
+      SimpleInstallGetter(prototype, factory->flags_string(),
+                          Builtins::kRegExpPrototypeFlagsGetter, true);
+      SimpleInstallGetter(prototype, factory->global_string(),
+                          Builtins::kRegExpPrototypeGlobalGetter, true);
+      SimpleInstallGetter(prototype, factory->ignoreCase_string(),
+                          Builtins::kRegExpPrototypeIgnoreCaseGetter, true);
+      SimpleInstallGetter(prototype, factory->multiline_string(),
+                          Builtins::kRegExpPrototypeMultilineGetter, true);
+      SimpleInstallGetter(prototype, factory->source_string(),
+                          Builtins::kRegExpPrototypeSourceGetter, false);
+      SimpleInstallGetter(prototype, factory->sticky_string(),
+                          Builtins::kRegExpPrototypeStickyGetter, true);
+      SimpleInstallGetter(prototype, factory->unicode_string(),
+                          Builtins::kRegExpPrototypeUnicodeGetter, true);
+
+      SimpleInstallFunction(prototype, "compile",
+                            Builtins::kRegExpPrototypeCompile, 2, false,
+                            DONT_ENUM);
+      SimpleInstallFunction(prototype, factory->toString_string(),
+                            Builtins::kRegExpPrototypeToString, 0, false,
+                            DONT_ENUM);
+      SimpleInstallFunction(prototype, "test", Builtins::kRegExpPrototypeTest,
+                            1, true, DONT_ENUM);
+
+      {
+        Handle<JSFunction> fun = SimpleCreateFunction(
+            isolate, factory->InternalizeUtf8String("[Symbol.match]"),
+            Builtins::kRegExpPrototypeMatch, 1, false);
+        InstallFunction(prototype, fun, factory->match_symbol(), DONT_ENUM);
+      }
+
+      {
+        Handle<JSFunction> fun = SimpleCreateFunction(
+            isolate, factory->InternalizeUtf8String("[Symbol.replace]"),
+            Builtins::kRegExpPrototypeReplace, 2, true);
+        InstallFunction(prototype, fun, factory->replace_symbol(), DONT_ENUM);
+      }
+
+      {
+        Handle<JSFunction> fun = SimpleCreateFunction(
+            isolate, factory->InternalizeUtf8String("[Symbol.search]"),
+            Builtins::kRegExpPrototypeSearch, 1, true);
+        InstallFunction(prototype, fun, factory->search_symbol(), DONT_ENUM);
+      }
+
+      {
+        Handle<JSFunction> fun = SimpleCreateFunction(
+            isolate, factory->InternalizeUtf8String("[Symbol.split]"),
+            Builtins::kRegExpPrototypeSplit, 2, false);
+        InstallFunction(prototype, fun, factory->split_symbol(), DONT_ENUM);
+      }
+
+      // Store the initial RegExp.prototype map. This is used in fast-path
+      // checks. Do not alter the prototype after this point.
+      native_context()->set_regexp_prototype_map(prototype->map());
+    }
+
+    {
+      // RegExp getters and setters.
+
+      SimpleInstallGetter(regexp_fun,
+                          factory->InternalizeUtf8String("[Symbol.species]"),
+                          factory->species_symbol(),
+                          Builtins::kRegExpPrototypeSpeciesGetter, false);
+
+      // Static properties set by a successful match.
+
+      const PropertyAttributes no_enum = DONT_ENUM;
+      SimpleInstallGetterSetter(regexp_fun, factory->input_string(),
+                                Builtins::kRegExpInputGetter,
+                                Builtins::kRegExpInputSetter, no_enum);
+      SimpleInstallGetterSetter(
+          regexp_fun, factory->InternalizeUtf8String("$_"),
+          Builtins::kRegExpInputGetter, Builtins::kRegExpInputSetter, no_enum);
+
+      SimpleInstallGetterSetter(
+          regexp_fun, factory->InternalizeUtf8String("lastMatch"),
+          Builtins::kRegExpLastMatchGetter, Builtins::kEmptyFunction, no_enum);
+      SimpleInstallGetterSetter(
+          regexp_fun, factory->InternalizeUtf8String("$&"),
+          Builtins::kRegExpLastMatchGetter, Builtins::kEmptyFunction, no_enum);
+
+      SimpleInstallGetterSetter(
+          regexp_fun, factory->InternalizeUtf8String("lastParen"),
+          Builtins::kRegExpLastParenGetter, Builtins::kEmptyFunction, no_enum);
+      SimpleInstallGetterSetter(
+          regexp_fun, factory->InternalizeUtf8String("$+"),
+          Builtins::kRegExpLastParenGetter, Builtins::kEmptyFunction, no_enum);
+
+      SimpleInstallGetterSetter(regexp_fun,
+                                factory->InternalizeUtf8String("leftContext"),
+                                Builtins::kRegExpLeftContextGetter,
+                                Builtins::kEmptyFunction, no_enum);
+      SimpleInstallGetterSetter(regexp_fun,
+                                factory->InternalizeUtf8String("$`"),
+                                Builtins::kRegExpLeftContextGetter,
+                                Builtins::kEmptyFunction, no_enum);
+
+      SimpleInstallGetterSetter(regexp_fun,
+                                factory->InternalizeUtf8String("rightContext"),
+                                Builtins::kRegExpRightContextGetter,
+                                Builtins::kEmptyFunction, no_enum);
+      SimpleInstallGetterSetter(regexp_fun,
+                                factory->InternalizeUtf8String("$'"),
+                                Builtins::kRegExpRightContextGetter,
+                                Builtins::kEmptyFunction, no_enum);
+
+#define INSTALL_CAPTURE_GETTER(i)                         \
+  SimpleInstallGetterSetter(                              \
+      regexp_fun, factory->InternalizeUtf8String("$" #i), \
+      Builtins::kRegExpCapture##i##Getter, Builtins::kEmptyFunction, no_enum)
+      INSTALL_CAPTURE_GETTER(1);
+      INSTALL_CAPTURE_GETTER(2);
+      INSTALL_CAPTURE_GETTER(3);
+      INSTALL_CAPTURE_GETTER(4);
+      INSTALL_CAPTURE_GETTER(5);
+      INSTALL_CAPTURE_GETTER(6);
+      INSTALL_CAPTURE_GETTER(7);
+      INSTALL_CAPTURE_GETTER(8);
+      INSTALL_CAPTURE_GETTER(9);
+#undef INSTALL_CAPTURE_GETTER
+    }
 
     DCHECK(regexp_fun->has_initial_map());
     Handle<Map> initial_map(regexp_fun->initial_map());
@@ -1681,7 +1963,7 @@
     // ECMA-262, section 15.10.7.5.
     PropertyAttributes writable =
         static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
-    DataDescriptor field(factory->last_index_string(),
+    DataDescriptor field(factory->lastIndex_string(),
                          JSRegExp::kLastIndexFieldIndex, writable,
                          Representation::Tagged());
     initial_map->AppendDescriptor(&field);
@@ -1691,6 +1973,24 @@
     initial_map->set_unused_property_fields(0);
     initial_map->set_instance_size(initial_map->instance_size() +
                                    num_fields * kPointerSize);
+
+    {  // Internal: RegExpInternalMatch
+      Handle<JSFunction> function =
+          factory->NewFunction(isolate->factory()->empty_string(),
+                               isolate->builtins()->RegExpInternalMatch(),
+                               JS_OBJECT_TYPE, JSObject::kHeaderSize);
+      function->shared()->set_internal_formal_parameter_count(2);
+      function->shared()->set_length(2);
+      function->shared()->set_native(true);
+      native_context()->set(Context::REGEXP_INTERNAL_MATCH, *function);
+    }
+
+    // Create the last match info. One for external use, and one for internal
+    // use when we don't want to modify the externally visible match info.
+    Handle<RegExpMatchInfo> last_match_info = factory->NewRegExpMatchInfo();
+    native_context()->set_regexp_last_match_info(*last_match_info);
+    Handle<RegExpMatchInfo> internal_match_info = factory->NewRegExpMatchInfo();
+    native_context()->set_regexp_internal_match_info(*internal_match_info);
   }
 
   {  // -- E r r o r
@@ -1738,6 +2038,16 @@
                      Context::MAKE_URI_ERROR_INDEX);
   }
 
+  {  // -- C o m p i l e E r r o r
+    Handle<JSObject> dummy = factory->NewJSObject(isolate->object_function());
+    InstallError(isolate, dummy, factory->CompileError_string(),
+                 Context::WASM_COMPILE_ERROR_FUNCTION_INDEX);
+
+    // -- R u n t i m e E r r o r
+    InstallError(isolate, dummy, factory->RuntimeError_string(),
+                 Context::WASM_RUNTIME_ERROR_FUNCTION_INDEX);
+  }
+
   // Initialize the embedder data slot.
   Handle<FixedArray> embedder_data = factory->NewFixedArray(3);
   native_context()->set_embedder_data(*embedder_data);
@@ -1798,6 +2108,7 @@
     Handle<JSFunction> math_pow =
         SimpleInstallFunction(math, "pow", Builtins::kMathPow, 2, true);
     native_context()->set_math_pow(*math_pow);
+    SimpleInstallFunction(math, "random", Builtins::kMathRandom, 0, true);
     SimpleInstallFunction(math, "round", Builtins::kMathRound, 1, true);
     SimpleInstallFunction(math, "sign", Builtins::kMathSign, 1, true);
     SimpleInstallFunction(math, "sin", Builtins::kMathSin, 1, true);
@@ -1840,6 +2151,10 @@
         math, factory->NewStringFromAsciiChecked("SQRT2"),
         factory->NewNumber(std::sqrt(2.0)),
         static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+    JSObject::AddProperty(
+        math, factory->to_string_tag_symbol(),
+        factory->NewStringFromAsciiChecked("Math"),
+        static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
   }
 
   {  // -- A r r a y B u f f e r
@@ -1878,6 +2193,24 @@
     SimpleInstallGetter(prototype, factory->length_string(),
                         Builtins::kTypedArrayPrototypeLength, true,
                         kTypedArrayLength);
+
+    // Install "keys", "values" and "entries" methods on the {prototype}.
+    Handle<JSFunction> entries =
+        SimpleInstallFunction(prototype, factory->entries_string(),
+                              Builtins::kTypedArrayPrototypeEntries, 0, true);
+    entries->shared()->set_builtin_function_id(kTypedArrayEntries);
+
+    Handle<JSFunction> keys =
+        SimpleInstallFunction(prototype, factory->keys_string(),
+                              Builtins::kTypedArrayPrototypeKeys, 0, true);
+    keys->shared()->set_builtin_function_id(kTypedArrayKeys);
+
+    Handle<JSFunction> values =
+        SimpleInstallFunction(prototype, factory->values_string(),
+                              Builtins::kTypedArrayPrototypeValues, 0, true);
+    values->shared()->set_builtin_function_id(kTypedArrayValues);
+    JSObject::AddProperty(prototype, factory->iterator_symbol(), values,
+                          DONT_ENUM);
   }
 
   {  // -- T y p e d A r r a y s
@@ -1978,6 +2311,33 @@
                                      Context::JS_SET_FUN_INDEX);
   }
 
+  {  // -- J S M o d u l e N a m e s p a c e
+    Handle<Map> map =
+        factory->NewMap(JS_MODULE_NAMESPACE_TYPE, JSModuleNamespace::kSize);
+    Map::SetPrototype(map, isolate->factory()->null_value());
+    Map::EnsureDescriptorSlack(map, 2);
+    native_context()->set_js_module_namespace_map(*map);
+
+    {  // Install @@toStringTag.
+      PropertyAttributes attribs =
+          static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+      DataConstantDescriptor d(factory->to_string_tag_symbol(),
+                               factory->NewStringFromAsciiChecked("Module"),
+                               attribs);
+      map->AppendDescriptor(&d);
+    }
+
+    {  // Install @@iterator.
+      Handle<JSFunction> iterator = SimpleCreateFunction(
+          isolate, factory->NewStringFromAsciiChecked("[Symbol.iterator]"),
+          Builtins::kModuleNamespaceIterator, 0, true);
+      iterator->shared()->set_native(true);
+      // TODO(neis): Is this really supposed to be writable?
+      DataConstantDescriptor d(factory->iterator_symbol(), iterator, DONT_ENUM);
+      map->AppendDescriptor(&d);
+    }
+  }
+
   {  // -- I t e r a t o r R e s u l t
     Handle<Map> map =
         factory->NewMap(JS_OBJECT_TYPE, JSIteratorResult::kSize);
@@ -2174,23 +2534,20 @@
     const PropertyAttributes attributes =
       static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
 
-    // Create the ThrowTypeError functions.
+    // Create the ThrowTypeError function.
     Handle<AccessorPair> callee = factory->NewAccessorPair();
-    Handle<AccessorPair> caller = factory->NewAccessorPair();
 
     Handle<JSFunction> poison = GetStrictArgumentsPoisonFunction();
 
-    // Install the ThrowTypeError functions.
+    // Install the ThrowTypeError function.
     callee->set_getter(*poison);
     callee->set_setter(*poison);
-    caller->set_getter(*poison);
-    caller->set_setter(*poison);
 
     // Create the map. Allocate one in-object field for length.
     Handle<Map> map = factory->NewMap(
         JS_ARGUMENTS_TYPE, JSStrictArgumentsObject::kSize, FAST_ELEMENTS);
     // Create the descriptor array for the arguments object.
-    Map::EnsureDescriptorSlack(map, 3);
+    Map::EnsureDescriptorSlack(map, 2);
 
     {  // length
       DataDescriptor d(factory->length_string(),
@@ -2203,11 +2560,6 @@
                                    attributes);
       map->AppendDescriptor(&d);
     }
-    {  // caller
-      AccessorConstantDescriptor d(factory->caller_string(), caller,
-                                   attributes);
-      map->AppendDescriptor(&d);
-    }
     // @@iterator method is added later.
 
     DCHECK_EQ(native_context()->object_function()->prototype(),
@@ -2269,16 +2621,10 @@
 
   Handle<JSObject> prototype =
       factory()->NewJSObject(isolate()->object_function(), TENURED);
-  Handle<JSFunction> result =
-      InstallFunction(global, name, JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize,
-                      prototype, Builtins::kIllegal);
-
-  Handle<Map> initial_map = isolate()->factory()->NewMap(
-      JS_TYPED_ARRAY_TYPE,
-      JSTypedArray::kSizeWithInternalFields,
-      elements_kind);
-  JSFunction::SetInitialMap(result, initial_map,
-                            handle(initial_map->prototype(), isolate()));
+  Handle<JSFunction> result = InstallFunction(
+      global, name, JS_TYPED_ARRAY_TYPE, JSTypedArray::kSizeWithInternalFields,
+      prototype, Builtins::kIllegal);
+  result->initial_map()->set_elements_kind(elements_kind);
 
   CHECK(JSObject::SetPrototype(result, typed_array_function, false,
                                Object::DONT_THROW)
@@ -2585,6 +2931,25 @@
         *generator_function_function);
   }
 
+  {  // -- F i x e d A r r a y I t e r a t o r
+    int size = JSFixedArrayIterator::kHeaderSize +
+               JSFixedArrayIterator::kInObjectPropertyCount * kPointerSize;
+    Handle<Map> map = factory->NewMap(JS_FIXED_ARRAY_ITERATOR_TYPE, size);
+    Map::SetPrototype(map, iterator_prototype);
+    Map::EnsureDescriptorSlack(map,
+                               JSFixedArrayIterator::kInObjectPropertyCount);
+    map->SetInObjectProperties(JSFixedArrayIterator::kInObjectPropertyCount);
+    map->SetConstructor(native_context->object_function());
+
+    {  // next
+      DataDescriptor d(factory->next_string(), JSFixedArrayIterator::kNextIndex,
+                       DONT_ENUM, Representation::Tagged());
+      map->AppendDescriptor(&d);
+    }
+
+    native_context->set_fixed_array_iterator_map(*map);
+  }
+
   {  // -- S e t I t e r a t o r
     Handle<JSObject> set_iterator_prototype =
         isolate->factory()->NewJSObject(isolate->object_function(), TENURED);
@@ -2680,15 +3045,6 @@
       script_map->AppendDescriptor(&d);
     }
 
-    Handle<AccessorInfo> script_line_ends =
-        Accessors::ScriptLineEndsInfo(isolate, attribs);
-    {
-      AccessorConstantDescriptor d(
-          Handle<Name>(Name::cast(script_line_ends->name())), script_line_ends,
-          attribs);
-      script_map->AppendDescriptor(&d);
-    }
-
     Handle<AccessorInfo> script_context_data =
         Accessors::ScriptContextDataInfo(isolate, attribs);
     {
@@ -2868,13 +3224,11 @@
   void Genesis::InitializeGlobal_##id() {}
 
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_for_in)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_lookbehind)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_named_captures)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tailcalls)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_declarations)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_string_padding)
 #ifdef V8_I18N_SUPPORT
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(datetime_format_to_parts)
@@ -2959,35 +3313,6 @@
 }
 
 
-void Genesis::InitializeGlobal_harmony_object_values_entries() {
-  if (!FLAG_harmony_object_values_entries) return;
-
-  Handle<JSGlobalObject> global(
-      JSGlobalObject::cast(native_context()->global_object()));
-  Isolate* isolate = global->GetIsolate();
-  Factory* factory = isolate->factory();
-
-  Handle<JSFunction> object_function = isolate->object_function();
-  SimpleInstallFunction(object_function, factory->entries_string(),
-                        Builtins::kObjectEntries, 1, false);
-  SimpleInstallFunction(object_function, factory->values_string(),
-                        Builtins::kObjectValues, 1, false);
-}
-
-void Genesis::InitializeGlobal_harmony_object_own_property_descriptors() {
-  if (!FLAG_harmony_object_own_property_descriptors) return;
-
-  Handle<JSGlobalObject> global(
-      JSGlobalObject::cast(native_context()->global_object()));
-  Isolate* isolate = global->GetIsolate();
-  Factory* factory = isolate->factory();
-
-  Handle<JSFunction> object_function = isolate->object_function();
-  SimpleInstallFunction(object_function,
-                        factory->getOwnPropertyDescriptors_string(),
-                        Builtins::kObjectGetOwnPropertyDescriptors, 1, false);
-}
-
 void Genesis::InitializeGlobal_harmony_array_prototype_values() {
   if (!FLAG_harmony_array_prototype_values) return;
   Handle<JSFunction> array_constructor(native_context()->array_function());
@@ -3165,12 +3490,13 @@
       HeapObject::cast(object_function->initial_map()->prototype())->map());
 
   // Set up the map for Object.create(null) instances.
-  Handle<Map> object_with_null_prototype_map =
+  Handle<Map> slow_object_with_null_prototype_map =
       Map::CopyInitialMap(handle(object_function->initial_map(), isolate()));
-  Map::SetPrototype(object_with_null_prototype_map,
+  slow_object_with_null_prototype_map->set_dictionary_map(true);
+  Map::SetPrototype(slow_object_with_null_prototype_map,
                     isolate()->factory()->null_value());
-  native_context()->set_object_with_null_prototype_map(
-      *object_with_null_prototype_map);
+  native_context()->set_slow_object_with_null_prototype_map(
+      *slow_object_with_null_prototype_map);
 
   // Store the map for the %StringPrototype% after the natives has been compiled
   // and the String function has been set up.
@@ -3271,6 +3597,33 @@
         *isolate()->builtins()->JSBuiltinsConstructStub());
     InstallWithIntrinsicDefaultProto(isolate(), function,
                                      Context::PROMISE_FUNCTION_INDEX);
+
+    {
+      Handle<Code> code = handle(
+          isolate()->builtins()->builtin(Builtins::kPromiseResolveClosure),
+          isolate());
+      Handle<SharedFunctionInfo> info =
+          isolate()->factory()->NewSharedFunctionInfo(factory()->empty_string(),
+                                                      code, false);
+      info->set_internal_formal_parameter_count(1);
+      info->set_length(1);
+      native_context()->set_promise_resolve_shared_fun(*info);
+
+      code = handle(
+          isolate()->builtins()->builtin(Builtins::kPromiseRejectClosure),
+          isolate());
+      info = isolate()->factory()->NewSharedFunctionInfo(
+          factory()->empty_string(), code, false);
+      info->set_internal_formal_parameter_count(2);
+      info->set_length(1);
+      native_context()->set_promise_reject_shared_fun(*info);
+    }
+
+    Handle<JSFunction> create_resolving_functions =
+        SimpleCreateFunction(isolate(), factory()->empty_string(),
+                             Builtins::kCreateResolvingFunctions, 2, false);
+    native_context()->set_create_resolving_functions(
+        *create_resolving_functions);
   }
 
   InstallBuiltinFunctionIds();
@@ -3461,15 +3814,10 @@
   static const char* harmony_simd_natives[] = {"native harmony-simd.js",
                                                nullptr};
   static const char* harmony_do_expressions_natives[] = {nullptr};
-  static const char* harmony_for_in_natives[] = {nullptr};
   static const char* harmony_regexp_lookbehind_natives[] = {nullptr};
-  static const char* harmony_restrictive_declarations_natives[] = {nullptr};
   static const char* harmony_regexp_named_captures_natives[] = {nullptr};
   static const char* harmony_regexp_property_natives[] = {nullptr};
   static const char* harmony_function_sent_natives[] = {nullptr};
-  static const char* harmony_object_values_entries_natives[] = {nullptr};
-  static const char* harmony_object_own_property_descriptors_natives[] = {
-      nullptr};
   static const char* harmony_array_prototype_values_natives[] = {nullptr};
   static const char* harmony_string_padding_natives[] = {
       "native harmony-string-padding.js", nullptr};
@@ -3695,11 +4043,11 @@
           InstallExtension(isolate, "v8/gc", &extension_states)) &&
          (!FLAG_expose_externalize_string ||
           InstallExtension(isolate, "v8/externalize", &extension_states)) &&
-         (!FLAG_track_gc_object_stats ||
+         (!FLAG_gc_stats ||
           InstallExtension(isolate, "v8/statistics", &extension_states)) &&
          (!FLAG_expose_trigger_failure ||
           InstallExtension(isolate, "v8/trigger-failure", &extension_states)) &&
-         (!(FLAG_ignition && FLAG_trace_ignition_dispatches) ||
+         (!FLAG_trace_ignition_dispatches ||
           InstallExtension(isolate, "v8/ignition-statistics",
                            &extension_states)) &&
          InstallRequestedExtensions(isolate, extensions, &extension_states);
@@ -4037,7 +4385,12 @@
   // and initialize it later in CreateNewGlobals.
   Handle<JSGlobalProxy> global_proxy;
   if (!maybe_global_proxy.ToHandle(&global_proxy)) {
-    global_proxy = isolate->factory()->NewUninitializedJSGlobalProxy();
+    const int internal_field_count =
+        !global_proxy_template.IsEmpty()
+            ? global_proxy_template->InternalFieldCount()
+            : 0;
+    global_proxy = isolate->factory()->NewUninitializedJSGlobalProxy(
+        JSGlobalProxy::SizeWithInternalFields(internal_field_count));
   }
 
   // We can only de-serialize a context if the isolate was initialized from
@@ -4093,7 +4446,7 @@
     isolate->counters()->contexts_created_from_scratch()->Increment();
     // Re-initialize the counter because it got incremented during snapshot
     // creation.
-    isolate->native_context()->set_errors_thrown(Smi::FromInt(0));
+    isolate->native_context()->set_errors_thrown(Smi::kZero);
   }
 
   // Install experimental natives. Do not include them into the
@@ -4146,9 +4499,12 @@
     return;
   }
 
+  const int proxy_size = JSGlobalProxy::SizeWithInternalFields(
+      global_proxy_template->InternalFieldCount());
+
   Handle<JSGlobalProxy> global_proxy;
   if (!maybe_global_proxy.ToHandle(&global_proxy)) {
-    global_proxy = factory()->NewUninitializedJSGlobalProxy();
+    global_proxy = factory()->NewUninitializedJSGlobalProxy(proxy_size);
   }
 
   // CreateNewGlobals.
@@ -4164,9 +4520,10 @@
   Handle<JSFunction> global_proxy_function =
       isolate->factory()->NewFunctionFromSharedFunctionInfo(
           initial_map, shared, factory()->undefined_value());
-  DCHECK_EQ(global_proxy_data->internal_field_count(), 0);
+  DCHECK_EQ(global_proxy_data->internal_field_count(),
+            global_proxy_template->InternalFieldCount());
   Handle<Map> global_proxy_map = isolate->factory()->NewMap(
-      JS_GLOBAL_PROXY_TYPE, JSGlobalProxy::kSize, FAST_HOLEY_SMI_ELEMENTS);
+      JS_GLOBAL_PROXY_TYPE, proxy_size, FAST_HOLEY_SMI_ELEMENTS);
   JSFunction::SetInitialMap(global_proxy_function, global_proxy_map,
                             factory()->null_value());
   global_proxy_map->set_is_access_check_needed(true);
@@ -4179,7 +4536,7 @@
   factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
 
   // HookUpGlobalProxy.
-  global_proxy->set_native_context(*factory()->null_value());
+  global_proxy->set_native_context(heap()->null_value());
 
   // DetachGlobal.
   JSObject::ForceSetPrototype(global_proxy, factory()->null_value());
diff --git a/src/builtins/arm/builtins-arm.cc b/src/builtins/arm/builtins-arm.cc
index 2c0bef2..6103971 100644
--- a/src/builtins/arm/builtins-arm.cc
+++ b/src/builtins/arm/builtins-arm.cc
@@ -260,7 +260,7 @@
 
   // 2b. No arguments, return +0.
   __ bind(&no_arguments);
-  __ Move(r0, Smi::FromInt(0));
+  __ Move(r0, Smi::kZero);
   __ Ret(1);
 }
 
@@ -288,7 +288,7 @@
     __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2));
     __ b(&done);
     __ bind(&no_arguments);
-    __ Move(r2, Smi::FromInt(0));
+    __ Move(r2, Smi::kZero);
     __ bind(&done);
   }
 
@@ -547,14 +547,14 @@
   GenerateTailCallToSharedCode(masm);
 }
 
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
-                                           bool is_api_function,
-                                           bool create_implicit_receiver,
-                                           bool check_derived_construct) {
+namespace {
+
+void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
+                                    bool create_implicit_receiver,
+                                    bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- r0     : number of arguments
   //  -- r1     : constructor function
-  //  -- r2     : allocation site or undefined
   //  -- r3     : new target
   //  -- cp     : context
   //  -- lr     : return address
@@ -568,10 +568,8 @@
     FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
 
     // Preserve the incoming parameters on the stack.
-    __ AssertUndefinedOrAllocationSite(r2, r4);
-    __ Push(cp);
     __ SmiTag(r0);
-    __ Push(r2, r0);
+    __ Push(cp, r0);
 
     if (create_implicit_receiver) {
       // Allocate the new receiver object.
@@ -701,6 +699,8 @@
   __ Jump(lr);
 }
 
+}  // namespace
+
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
   Generate_JSConstructStubHelper(masm, false, true, false);
 }
@@ -1146,31 +1146,6 @@
   __ Jump(r4);
 }
 
-void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
-  // Save the function and context for call to CompileBaseline.
-  __ ldr(r1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
-  __ ldr(kContextRegister,
-         MemOperand(fp, StandardFrameConstants::kContextOffset));
-
-  // Leave the frame before recompiling for baseline so that we don't count as
-  // an activation on the stack.
-  LeaveInterpreterFrame(masm, r2);
-
-  {
-    FrameScope frame_scope(masm, StackFrame::INTERNAL);
-    // Push return value.
-    __ push(r0);
-
-    // Push function as argument and compile for baseline.
-    __ push(r1);
-    __ CallRuntime(Runtime::kCompileBaseline);
-
-    // Restore return value.
-    __ pop(r0);
-  }
-  __ Jump(lr);
-}
-
 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
                                         Register scratch,
                                         Label* stack_overflow) {
@@ -1321,12 +1296,12 @@
   }
 }
 
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
   // Set the return address to the correct point in the interpreter entry
   // trampoline.
   Smi* interpreter_entry_return_pc_offset(
       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
-  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
   __ Move(r2, masm->isolate()->builtins()->InterpreterEntryTrampoline());
   __ add(lr, r2, Operand(interpreter_entry_return_pc_offset->value() +
                          Code::kHeaderSize - kHeapObjectTag));
@@ -1362,6 +1337,29 @@
   __ mov(pc, ip);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+  // Advance the current bytecode offset stored within the given interpreter
+  // stack frame. This simulates what all bytecode handlers do upon completion
+  // of the underlying operation.
+  __ ldr(r1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ ldr(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(kInterpreterAccumulatorRegister, r1, r2);
+    __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
+    __ mov(r2, r0);  // Result is the new bytecode offset.
+    __ Pop(kInterpreterAccumulatorRegister);
+  }
+  __ str(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+  Generate_InterpreterEnterBytecode(masm);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  Generate_InterpreterEnterBytecode(masm);
+}
+
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r0 : argument count (preserved for callee)
@@ -1370,7 +1368,6 @@
   // -----------------------------------
   // First lookup code, maybe we don't need to compile!
   Label gotta_call_runtime, gotta_call_runtime_no_stack;
-  Label maybe_call_runtime;
   Label try_shared;
   Label loop_top, loop_bottom;
 
@@ -1437,15 +1434,12 @@
          FieldMemOperand(array_pointer,
                          SharedFunctionInfo::kOffsetToPreviousCachedCode));
   __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
-  __ JumpIfSmi(entry, &maybe_call_runtime);
+  __ JumpIfSmi(entry, &try_shared);
 
   // Found literals and code. Get them into the closure and return.
   __ pop(closure);
   // Store code entry in the closure.
   __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-  Label install_optimized_code_and_tailcall;
-  __ bind(&install_optimized_code_and_tailcall);
   __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
   __ RecordWriteCodeEntryField(closure, entry, r5);
 
@@ -1480,25 +1474,18 @@
   // We found neither literals nor code.
   __ jmp(&gotta_call_runtime);
 
-  __ bind(&maybe_call_runtime);
-  __ pop(closure);
-
-  // Last possibility. Check the context free optimized code map entry.
-  __ ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
-                                         SharedFunctionInfo::kSharedCodeIndex));
-  __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
-  __ JumpIfSmi(entry, &try_shared);
-
-  // Store code entry in the closure.
-  __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ jmp(&install_optimized_code_and_tailcall);
-
   __ bind(&try_shared);
+  __ pop(closure);
   __ pop(new_target);
   __ pop(argument_count);
-  // Is the full code valid?
   __ ldr(entry,
          FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  // Is the shared function marked for tier up?
+  __ ldrb(r5, FieldMemOperand(entry,
+                              SharedFunctionInfo::kMarkedForTierUpByteOffset));
+  __ tst(r5, Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
+  __ b(ne, &gotta_call_runtime_no_stack);
+  // Is the full code valid?
   __ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
   __ ldr(r5, FieldMemOperand(entry, Code::kFlagsOffset));
   __ and_(r5, r5, Operand(Code::KindField::kMask));
@@ -1859,7 +1846,7 @@
 
   // If the code object is null, just return to the caller.
   Label skip;
-  __ cmp(r0, Operand(Smi::FromInt(0)));
+  __ cmp(r0, Operand(Smi::kZero));
   __ b(ne, &skip);
   __ Ret();
 
@@ -2443,8 +2430,8 @@
         __ Push(r0, r1);
         __ mov(r0, r3);
         __ Push(cp);
-        ToObjectStub stub(masm->isolate());
-        __ CallStub(&stub);
+        __ Call(masm->isolate()->builtins()->ToObject(),
+                RelocInfo::CODE_TARGET);
         __ Pop(cp);
         __ mov(r3, r0);
         __ Pop(r0, r1);
@@ -2773,7 +2760,7 @@
   // -----------------------------------
   __ SmiTag(r1);
   __ Push(r1);
-  __ Move(cp, Smi::FromInt(0));
+  __ Move(cp, Smi::kZero);
   __ TailCallRuntime(Runtime::kAllocateInNewSpace);
 }
 
@@ -2786,7 +2773,7 @@
   __ SmiTag(r1);
   __ Move(r2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
   __ Push(r1, r2);
-  __ Move(cp, Smi::FromInt(0));
+  __ Move(cp, Smi::kZero);
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
@@ -2797,7 +2784,7 @@
   //  -- lr : return address
   // -----------------------------------
   __ Push(r1);
-  __ Move(cp, Smi::FromInt(0));
+  __ Move(cp, Smi::kZero);
   __ TailCallRuntime(Runtime::kAbort);
 }
 
diff --git a/src/builtins/arm64/builtins-arm64.cc b/src/builtins/arm64/builtins-arm64.cc
index 48551de..aeb0508 100644
--- a/src/builtins/arm64/builtins-arm64.cc
+++ b/src/builtins/arm64/builtins-arm64.cc
@@ -278,7 +278,7 @@
     __ Ldr(x2, MemOperand(jssp, x0, LSL, kPointerSizeLog2));
     __ B(&done);
     __ Bind(&no_arguments);
-    __ Mov(x2, Smi::FromInt(0));
+    __ Mov(x2, Smi::kZero);
     __ Bind(&done);
   }
 
@@ -535,14 +535,14 @@
   GenerateTailCallToSharedCode(masm);
 }
 
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
-                                           bool is_api_function,
-                                           bool create_implicit_receiver,
-                                           bool check_derived_construct) {
+namespace {
+
+void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
+                                    bool create_implicit_receiver,
+                                    bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- x0     : number of arguments
   //  -- x1     : constructor function
-  //  -- x2     : allocation site or undefined
   //  -- x3     : new target
   //  -- lr     : return address
   //  -- cp     : context pointer
@@ -560,14 +560,11 @@
     // Preserve the four incoming parameters on the stack.
     Register argc = x0;
     Register constructor = x1;
-    Register allocation_site = x2;
     Register new_target = x3;
 
     // Preserve the incoming parameters on the stack.
-    __ AssertUndefinedOrAllocationSite(allocation_site, x10);
-    __ Push(cp);
     __ SmiTag(argc);
-    __ Push(allocation_site, argc);
+    __ Push(cp, argc);
 
     if (create_implicit_receiver) {
       // Allocate the new receiver object.
@@ -703,6 +700,8 @@
   __ Ret();
 }
 
+}  // namespace
+
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
   Generate_JSConstructStubHelper(masm, false, true, false);
 }
@@ -1155,31 +1154,6 @@
   __ Jump(x7);
 }
 
-void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
-  // Save the function and context for call to CompileBaseline.
-  __ ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
-  __ ldr(kContextRegister,
-         MemOperand(fp, StandardFrameConstants::kContextOffset));
-
-  // Leave the frame before recompiling for baseline so that we don't count as
-  // an activation on the stack.
-  LeaveInterpreterFrame(masm, x2);
-
-  {
-    FrameScope frame_scope(masm, StackFrame::INTERNAL);
-    // Push return value.
-    __ push(x0);
-
-    // Push function as argument and compile for baseline.
-    __ push(x1);
-    __ CallRuntime(Runtime::kCompileBaseline);
-
-    // Restore return value.
-    __ pop(x0);
-  }
-  __ Ret();
-}
-
 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
                                         Register scratch,
                                         Label* stack_overflow) {
@@ -1332,12 +1306,12 @@
   }
 }
 
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
   // Set the return address to the correct point in the interpreter entry
   // trampoline.
   Smi* interpreter_entry_return_pc_offset(
       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
-  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
   __ LoadObject(x1, masm->isolate()->builtins()->InterpreterEntryTrampoline());
   __ Add(lr, x1, Operand(interpreter_entry_return_pc_offset->value() +
                          Code::kHeaderSize - kHeapObjectTag));
@@ -1373,6 +1347,29 @@
   __ Jump(ip0);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+  // Advance the current bytecode offset stored within the given interpreter
+  // stack frame. This simulates what all bytecode handlers do upon completion
+  // of the underlying operation.
+  __ Ldr(x1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ Ldr(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+  __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(kInterpreterAccumulatorRegister, x1, x2);
+    __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
+    __ Mov(x2, x0);  // Result is the new bytecode offset.
+    __ Pop(kInterpreterAccumulatorRegister);
+  }
+  __ Str(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+  Generate_InterpreterEnterBytecode(masm);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  Generate_InterpreterEnterBytecode(masm);
+}
+
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- x0 : argument count (preserved for callee)
@@ -1381,7 +1378,6 @@
   // -----------------------------------
   // First lookup code, maybe we don't need to compile!
   Label gotta_call_runtime;
-  Label maybe_call_runtime;
   Label try_shared;
   Label loop_top, loop_bottom;
 
@@ -1439,13 +1435,10 @@
          FieldMemOperand(array_pointer,
                          SharedFunctionInfo::kOffsetToPreviousCachedCode));
   __ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
-  __ JumpIfSmi(entry, &maybe_call_runtime);
+  __ JumpIfSmi(entry, &try_shared);
 
   // Found literals and code. Get them into the closure and return.
   __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-  Label install_optimized_code_and_tailcall;
-  __ Bind(&install_optimized_code_and_tailcall);
   __ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
   __ RecordWriteCodeEntryField(closure, entry, x5);
 
@@ -1476,22 +1469,16 @@
   // We found neither literals nor code.
   __ B(&gotta_call_runtime);
 
-  __ Bind(&maybe_call_runtime);
-
-  // Last possibility. Check the context free optimized code map entry.
-  __ Ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
-                                         SharedFunctionInfo::kSharedCodeIndex));
-  __ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
-  __ JumpIfSmi(entry, &try_shared);
-
-  // Store code entry in the closure.
-  __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ B(&install_optimized_code_and_tailcall);
-
   __ Bind(&try_shared);
-  // Is the full code valid?
   __ Ldr(entry,
          FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  // Is the shared function marked for tier up?
+  __ Ldrb(temp, FieldMemOperand(
+                    entry, SharedFunctionInfo::kMarkedForTierUpByteOffset));
+  __ TestAndBranchIfAnySet(
+      temp, 1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte,
+      &gotta_call_runtime);
+  // Is the full code valid?
   __ Ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
   __ Ldr(x5, FieldMemOperand(entry, Code::kFlagsOffset));
   __ and_(x5, x5, Operand(Code::KindField::kMask));
@@ -1863,7 +1850,7 @@
 
   // If the code object is null, just return to the caller.
   Label skip;
-  __ CompareAndBranch(x0, Smi::FromInt(0), ne, &skip);
+  __ CompareAndBranch(x0, Smi::kZero, ne, &skip);
   __ Ret();
 
   __ Bind(&skip);
@@ -2512,8 +2499,8 @@
         __ Push(x0, x1);
         __ Mov(x0, x3);
         __ Push(cp);
-        ToObjectStub stub(masm->isolate());
-        __ CallStub(&stub);
+        __ Call(masm->isolate()->builtins()->ToObject(),
+                RelocInfo::CODE_TARGET);
         __ Pop(cp);
         __ Mov(x3, x0);
         __ Pop(x1, x0);
@@ -2847,7 +2834,7 @@
   // -----------------------------------
   __ SmiTag(x1);
   __ Push(x1);
-  __ Move(cp, Smi::FromInt(0));
+  __ Move(cp, Smi::kZero);
   __ TailCallRuntime(Runtime::kAllocateInNewSpace);
 }
 
@@ -2861,7 +2848,7 @@
   __ SmiTag(x1);
   __ Move(x2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
   __ Push(x1, x2);
-  __ Move(cp, Smi::FromInt(0));
+  __ Move(cp, Smi::kZero);
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
@@ -2874,7 +2861,7 @@
   // -----------------------------------
   MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
   __ Push(x1);
-  __ Move(cp, Smi::FromInt(0));
+  __ Move(cp, Smi::kZero);
   __ TailCallRuntime(Runtime::kAbort);
 }
 
diff --git a/src/builtins/builtins-api.cc b/src/builtins/builtins-api.cc
index aed10b1..defc4dc 100644
--- a/src/builtins/builtins-api.cc
+++ b/src/builtins/builtins-api.cc
@@ -122,7 +122,7 @@
 
 BUILTIN(HandleApiCall) {
   HandleScope scope(isolate);
-  Handle<JSFunction> function = args.target<JSFunction>();
+  Handle<JSFunction> function = args.target();
   Handle<Object> receiver = args.receiver();
   Handle<HeapObject> new_target = args.new_target();
   Handle<FunctionTemplateInfo> fun_data(function->shared()->get_api_func_data(),
diff --git a/src/builtins/builtins-array.cc b/src/builtins/builtins-array.cc
index b4969f1..c09f11b 100644
--- a/src/builtins/builtins-array.cc
+++ b/src/builtins/builtins-array.cc
@@ -6,6 +6,7 @@
 #include "src/builtins/builtins-utils.h"
 
 #include "src/code-factory.h"
+#include "src/contexts.h"
 #include "src/elements.h"
 
 namespace v8 {
@@ -407,14 +408,18 @@
  */
 class ArrayConcatVisitor {
  public:
-  ArrayConcatVisitor(Isolate* isolate, Handle<Object> storage,
+  ArrayConcatVisitor(Isolate* isolate, Handle<HeapObject> storage,
                      bool fast_elements)
       : isolate_(isolate),
         storage_(isolate->global_handles()->Create(*storage)),
         index_offset_(0u),
-        bit_field_(FastElementsField::encode(fast_elements) |
-                   ExceedsLimitField::encode(false) |
-                   IsFixedArrayField::encode(storage->IsFixedArray())) {
+        bit_field_(
+            FastElementsField::encode(fast_elements) |
+            ExceedsLimitField::encode(false) |
+            IsFixedArrayField::encode(storage->IsFixedArray()) |
+            HasSimpleElementsField::encode(storage->IsFixedArray() ||
+                                           storage->map()->instance_type() >
+                                               LAST_CUSTOM_ELEMENTS_RECEIVER)) {
     DCHECK(!(this->fast_elements() && !is_fixed_array()));
   }
 
@@ -503,12 +508,16 @@
   // (otherwise)
   Handle<FixedArray> storage_fixed_array() {
     DCHECK(is_fixed_array());
+    DCHECK(has_simple_elements());
     return Handle<FixedArray>::cast(storage_);
   }
   Handle<JSReceiver> storage_jsreceiver() {
     DCHECK(!is_fixed_array());
     return Handle<JSReceiver>::cast(storage_);
   }
+  bool has_simple_elements() const {
+    return HasSimpleElementsField::decode(bit_field_);
+  }
 
  private:
   // Convert storage to dictionary mode.
@@ -541,12 +550,14 @@
 
   inline void set_storage(FixedArray* storage) {
     DCHECK(is_fixed_array());
+    DCHECK(has_simple_elements());
     storage_ = isolate_->global_handles()->Create(storage);
   }
 
   class FastElementsField : public BitField<bool, 0, 1> {};
   class ExceedsLimitField : public BitField<bool, 1, 1> {};
   class IsFixedArrayField : public BitField<bool, 2, 1> {};
+  class HasSimpleElementsField : public BitField<bool, 3, 1> {};
 
   bool fast_elements() const { return FastElementsField::decode(bit_field_); }
   void set_fast_elements(bool fast) {
@@ -772,7 +783,6 @@
   visitor->increase_index_offset(length);
   return true;
 }
-
 /**
  * A helper function that visits "array" elements of a JSReceiver in numerical
  * order.
@@ -802,7 +812,8 @@
     return IterateElementsSlow(isolate, receiver, length, visitor);
   }
 
-  if (!HasOnlySimpleElements(isolate, *receiver)) {
+  if (!HasOnlySimpleElements(isolate, *receiver) ||
+      !visitor->has_simple_elements()) {
     return IterateElementsSlow(isolate, receiver, length, visitor);
   }
   Handle<JSObject> array = Handle<JSObject>::cast(receiver);
@@ -1071,7 +1082,7 @@
     // In case of failure, fall through.
   }
 
-  Handle<Object> storage;
+  Handle<HeapObject> storage;
   if (fast_case) {
     // The backing storage array must have non-existing elements to preserve
     // holes across concat operations.
@@ -1084,12 +1095,12 @@
     storage = SeededNumberDictionary::New(isolate, at_least_space_for);
   } else {
     DCHECK(species->IsConstructor());
-    Handle<Object> length(Smi::FromInt(0), isolate);
+    Handle<Object> length(Smi::kZero, isolate);
     Handle<Object> storage_object;
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
         isolate, storage_object,
         Execution::New(isolate, species, species, 1, &length));
-    storage = storage_object;
+    storage = Handle<HeapObject>::cast(storage_object);
   }
 
   ArrayConcatVisitor visitor(isolate, storage, fast_case);
@@ -1236,7 +1247,7 @@
   Label call_runtime(assembler), return_true(assembler),
       return_false(assembler);
 
-  assembler->GotoIf(assembler->WordIsSmi(object), &return_false);
+  assembler->GotoIf(assembler->TaggedIsSmi(object), &return_false);
   Node* instance_type = assembler->LoadInstanceType(object);
 
   assembler->GotoIf(assembler->Word32Equal(
@@ -1296,7 +1307,7 @@
   {
     // Handle case where JSArray length is not an Smi in the runtime
     Node* len = assembler->LoadObjectField(array, JSArray::kLengthOffset);
-    assembler->GotoUnless(assembler->WordIsSmi(len), &call_runtime);
+    assembler->GotoUnless(assembler->TaggedIsSmi(len), &call_runtime);
 
     len_var.Bind(assembler->SmiToWord(len));
     assembler->Branch(assembler->WordEqual(len_var.value(), intptr_zero),
@@ -1309,7 +1320,7 @@
         init_k_zero(assembler), init_k_n(assembler);
     Node* tagged_n = assembler->ToInteger(context, start_from);
 
-    assembler->Branch(assembler->WordIsSmi(tagged_n), &init_k_smi,
+    assembler->Branch(assembler->TaggedIsSmi(tagged_n), &init_k_smi,
                       &init_k_heap_num);
 
     assembler->Bind(&init_k_smi);
@@ -1395,7 +1406,7 @@
         undef_loop(assembler, &index_var), not_smi(assembler),
         not_heap_num(assembler);
 
-    assembler->GotoUnless(assembler->WordIsSmi(search_element), &not_smi);
+    assembler->GotoUnless(assembler->TaggedIsSmi(search_element), &not_smi);
     search_num.Bind(assembler->SmiToFloat64(search_element));
     assembler->Goto(&heap_num_loop);
 
@@ -1464,7 +1475,7 @@
         Node* element_k = assembler->LoadFixedArrayElement(
             elements, index_var.value(), 0,
             CodeStubAssembler::INTPTR_PARAMETERS);
-        assembler->GotoUnless(assembler->WordIsSmi(element_k), &not_smi);
+        assembler->GotoUnless(assembler->TaggedIsSmi(element_k), &not_smi);
         assembler->Branch(
             assembler->Float64Equal(search_num.value(),
                                     assembler->SmiToFloat64(element_k)),
@@ -1474,8 +1485,9 @@
         assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
                                                   heap_number_map),
                           &continue_loop);
-        assembler->BranchIfFloat64Equal(
-            search_num.value(), assembler->LoadHeapNumberValue(element_k),
+        assembler->Branch(
+            assembler->Float64Equal(search_num.value(),
+                                    assembler->LoadHeapNumberValue(element_k)),
             &return_true, &continue_loop);
 
         assembler->Bind(&continue_loop);
@@ -1492,7 +1504,7 @@
         Node* element_k = assembler->LoadFixedArrayElement(
             elements, index_var.value(), 0,
             CodeStubAssembler::INTPTR_PARAMETERS);
-        assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+        assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
         assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
                                                   heap_number_map),
                           &continue_loop);
@@ -1514,7 +1526,7 @@
           &return_false);
       Node* element_k = assembler->LoadFixedArrayElement(
           elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
-      assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+      assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
       assembler->GotoUnless(assembler->IsStringInstanceType(
                                 assembler->LoadInstanceType(element_k)),
                             &continue_loop);
@@ -1546,7 +1558,7 @@
 
       Node* element_k = assembler->LoadFixedArrayElement(
           elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
-      assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+      assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
 
       Node* map_k = assembler->LoadMap(element_k);
       assembler->BranchIfSimd128Equal(search_element, map, element_k, map_k,
@@ -1564,7 +1576,8 @@
         hole_loop(assembler, &index_var), search_notnan(assembler);
     Variable search_num(assembler, MachineRepresentation::kFloat64);
 
-    assembler->GotoUnless(assembler->WordIsSmi(search_element), &search_notnan);
+    assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
+                          &search_notnan);
     search_num.Bind(assembler->SmiToFloat64(search_element));
     assembler->Goto(&not_nan_loop);
 
@@ -1588,8 +1601,8 @@
       Node* element_k = assembler->LoadFixedDoubleArrayElement(
           elements, index_var.value(), MachineType::Float64(), 0,
           CodeStubAssembler::INTPTR_PARAMETERS);
-      assembler->BranchIfFloat64Equal(element_k, search_num.value(),
-                                      &return_true, &continue_loop);
+      assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
+                        &return_true, &continue_loop);
       assembler->Bind(&continue_loop);
       index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&not_nan_loop);
@@ -1618,7 +1631,8 @@
         hole_loop(assembler, &index_var), search_notnan(assembler);
     Variable search_num(assembler, MachineRepresentation::kFloat64);
 
-    assembler->GotoUnless(assembler->WordIsSmi(search_element), &search_notnan);
+    assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
+                          &search_notnan);
     search_num.Bind(assembler->SmiToFloat64(search_element));
     assembler->Goto(&not_nan_loop);
 
@@ -1647,8 +1661,8 @@
           elements, index_var.value(), MachineType::Float64(), 0,
           CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
 
-      assembler->BranchIfFloat64Equal(element_k, search_num.value(),
-                                      &return_true, &continue_loop);
+      assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
+                        &return_true, &continue_loop);
       assembler->Bind(&continue_loop);
       index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&not_nan_loop);
@@ -1738,7 +1752,7 @@
   {
     // Handle case where JSArray length is not an Smi in the runtime
     Node* len = assembler->LoadObjectField(array, JSArray::kLengthOffset);
-    assembler->GotoUnless(assembler->WordIsSmi(len), &call_runtime);
+    assembler->GotoUnless(assembler->TaggedIsSmi(len), &call_runtime);
 
     len_var.Bind(assembler->SmiToWord(len));
     assembler->Branch(assembler->WordEqual(len_var.value(), intptr_zero),
@@ -1751,7 +1765,7 @@
         init_k_zero(assembler), init_k_n(assembler);
     Node* tagged_n = assembler->ToInteger(context, start_from);
 
-    assembler->Branch(assembler->WordIsSmi(tagged_n), &init_k_smi,
+    assembler->Branch(assembler->TaggedIsSmi(tagged_n), &init_k_smi,
                       &init_k_heap_num);
 
     assembler->Bind(&init_k_smi);
@@ -1837,7 +1851,7 @@
         undef_loop(assembler, &index_var), not_smi(assembler),
         not_heap_num(assembler);
 
-    assembler->GotoUnless(assembler->WordIsSmi(search_element), &not_smi);
+    assembler->GotoUnless(assembler->TaggedIsSmi(search_element), &not_smi);
     search_num.Bind(assembler->SmiToFloat64(search_element));
     assembler->Goto(&heap_num_loop);
 
@@ -1903,7 +1917,7 @@
         Node* element_k = assembler->LoadFixedArrayElement(
             elements, index_var.value(), 0,
             CodeStubAssembler::INTPTR_PARAMETERS);
-        assembler->GotoUnless(assembler->WordIsSmi(element_k), &not_smi);
+        assembler->GotoUnless(assembler->TaggedIsSmi(element_k), &not_smi);
         assembler->Branch(
             assembler->Float64Equal(search_num.value(),
                                     assembler->SmiToFloat64(element_k)),
@@ -1913,8 +1927,9 @@
         assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
                                                   heap_number_map),
                           &continue_loop);
-        assembler->BranchIfFloat64Equal(
-            search_num.value(), assembler->LoadHeapNumberValue(element_k),
+        assembler->Branch(
+            assembler->Float64Equal(search_num.value(),
+                                    assembler->LoadHeapNumberValue(element_k)),
             &return_found, &continue_loop);
 
         assembler->Bind(&continue_loop);
@@ -1931,7 +1946,7 @@
           &return_not_found);
       Node* element_k = assembler->LoadFixedArrayElement(
           elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
-      assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+      assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
       assembler->GotoUnless(assembler->IsStringInstanceType(
                                 assembler->LoadInstanceType(element_k)),
                             &continue_loop);
@@ -1963,7 +1978,7 @@
 
       Node* element_k = assembler->LoadFixedArrayElement(
           elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
-      assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+      assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
 
       Node* map_k = assembler->LoadMap(element_k);
       assembler->BranchIfSimd128Equal(search_element, map, element_k, map_k,
@@ -1980,7 +1995,8 @@
     Label not_nan_loop(assembler, &index_var), search_notnan(assembler);
     Variable search_num(assembler, MachineRepresentation::kFloat64);
 
-    assembler->GotoUnless(assembler->WordIsSmi(search_element), &search_notnan);
+    assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
+                          &search_notnan);
     search_num.Bind(assembler->SmiToFloat64(search_element));
     assembler->Goto(&not_nan_loop);
 
@@ -2004,8 +2020,8 @@
       Node* element_k = assembler->LoadFixedDoubleArrayElement(
           elements, index_var.value(), MachineType::Float64(), 0,
           CodeStubAssembler::INTPTR_PARAMETERS);
-      assembler->BranchIfFloat64Equal(element_k, search_num.value(),
-                                      &return_found, &continue_loop);
+      assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
+                        &return_found, &continue_loop);
       assembler->Bind(&continue_loop);
       index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&not_nan_loop);
@@ -2017,7 +2033,8 @@
     Label not_nan_loop(assembler, &index_var), search_notnan(assembler);
     Variable search_num(assembler, MachineRepresentation::kFloat64);
 
-    assembler->GotoUnless(assembler->WordIsSmi(search_element), &search_notnan);
+    assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
+                          &search_notnan);
     search_num.Bind(assembler->SmiToFloat64(search_element));
     assembler->Goto(&not_nan_loop);
 
@@ -2044,8 +2061,8 @@
           elements, index_var.value(), MachineType::Float64(), 0,
           CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
 
-      assembler->BranchIfFloat64Equal(element_k, search_num.value(),
-                                      &return_found, &continue_loop);
+      assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
+                        &return_found, &continue_loop);
       assembler->Bind(&continue_loop);
       index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&not_nan_loop);
@@ -2063,5 +2080,555 @@
                                            array, search_element, start_from));
 }
 
+namespace {
+
+template <IterationKind kIterationKind>
+void Generate_ArrayPrototypeIterationMethod(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* context = assembler->Parameter(3);
+
+  Variable var_array(assembler, MachineRepresentation::kTagged);
+  Variable var_map(assembler, MachineRepresentation::kTagged);
+  Variable var_type(assembler, MachineRepresentation::kWord32);
+
+  Label if_isnotobject(assembler, Label::kDeferred);
+  Label create_array_iterator(assembler);
+
+  assembler->GotoIf(assembler->TaggedIsSmi(receiver), &if_isnotobject);
+  var_array.Bind(receiver);
+  var_map.Bind(assembler->LoadMap(receiver));
+  var_type.Bind(assembler->LoadMapInstanceType(var_map.value()));
+  assembler->Branch(assembler->IsJSReceiverInstanceType(var_type.value()),
+                    &create_array_iterator, &if_isnotobject);
+
+  assembler->Bind(&if_isnotobject);
+  {
+    Callable callable = CodeFactory::ToObject(assembler->isolate());
+    Node* result = assembler->CallStub(callable, context, receiver);
+    var_array.Bind(result);
+    var_map.Bind(assembler->LoadMap(result));
+    var_type.Bind(assembler->LoadMapInstanceType(var_map.value()));
+    assembler->Goto(&create_array_iterator);
+  }
+
+  assembler->Bind(&create_array_iterator);
+  assembler->Return(assembler->CreateArrayIterator(
+      var_array.value(), var_map.value(), var_type.value(), context,
+      kIterationKind));
+}
+
+}  // namespace
+
+void Builtins::Generate_ArrayPrototypeValues(CodeStubAssembler* assembler) {
+  Generate_ArrayPrototypeIterationMethod<IterationKind::kValues>(assembler);
+}
+
+void Builtins::Generate_ArrayPrototypeEntries(CodeStubAssembler* assembler) {
+  Generate_ArrayPrototypeIterationMethod<IterationKind::kEntries>(assembler);
+}
+
+void Builtins::Generate_ArrayPrototypeKeys(CodeStubAssembler* assembler) {
+  Generate_ArrayPrototypeIterationMethod<IterationKind::kKeys>(assembler);
+}
+
+void Builtins::Generate_ArrayIteratorPrototypeNext(
+    CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* iterator = assembler->Parameter(0);
+  Node* context = assembler->Parameter(3);
+
+  Variable var_value(assembler, MachineRepresentation::kTagged);
+  Variable var_done(assembler, MachineRepresentation::kTagged);
+
+  // Required, or else `throw_bad_receiver` fails a DCHECK due to these
+  // variables not being bound along all paths, despite not being used.
+  var_done.Bind(assembler->TrueConstant());
+  var_value.Bind(assembler->UndefinedConstant());
+
+  Label throw_bad_receiver(assembler, Label::kDeferred);
+  Label set_done(assembler);
+  Label allocate_key_result(assembler);
+  Label allocate_entry_if_needed(assembler);
+  Label allocate_iterator_result(assembler);
+  Label generic_values(assembler);
+
+  // If O does not have all of the internal slots of an Array Iterator Instance
+  // (22.1.5.3), throw a TypeError exception
+  assembler->GotoIf(assembler->TaggedIsSmi(iterator), &throw_bad_receiver);
+  Node* instance_type = assembler->LoadInstanceType(iterator);
+  assembler->GotoIf(
+      assembler->Uint32LessThan(
+          assembler->Int32Constant(LAST_ARRAY_ITERATOR_TYPE -
+                                   FIRST_ARRAY_ITERATOR_TYPE),
+          assembler->Int32Sub(instance_type, assembler->Int32Constant(
+                                                 FIRST_ARRAY_ITERATOR_TYPE))),
+      &throw_bad_receiver);
+
+  // Let a be O.[[IteratedObject]].
+  Node* array = assembler->LoadObjectField(
+      iterator, JSArrayIterator::kIteratedObjectOffset);
+
+  // Let index be O.[[ArrayIteratorNextIndex]].
+  Node* index =
+      assembler->LoadObjectField(iterator, JSArrayIterator::kNextIndexOffset);
+  Node* orig_map = assembler->LoadObjectField(
+      iterator, JSArrayIterator::kIteratedObjectMapOffset);
+  Node* array_map = assembler->LoadMap(array);
+
+  Label if_isfastarray(assembler), if_isnotfastarray(assembler);
+
+  assembler->Branch(assembler->WordEqual(orig_map, array_map), &if_isfastarray,
+                    &if_isnotfastarray);
+
+  assembler->Bind(&if_isfastarray);
+  {
+    CSA_ASSERT(assembler,
+               assembler->Word32Equal(assembler->LoadMapInstanceType(array_map),
+                                      assembler->Int32Constant(JS_ARRAY_TYPE)));
+
+    Node* length = assembler->LoadObjectField(array, JSArray::kLengthOffset);
+
+    CSA_ASSERT(assembler, assembler->TaggedIsSmi(length));
+    CSA_ASSERT(assembler, assembler->TaggedIsSmi(index));
+
+    assembler->GotoUnless(assembler->SmiBelow(index, length), &set_done);
+
+    Node* one = assembler->SmiConstant(Smi::FromInt(1));
+    assembler->StoreObjectFieldNoWriteBarrier(
+        iterator, JSArrayIterator::kNextIndexOffset,
+        assembler->IntPtrAdd(assembler->BitcastTaggedToWord(index),
+                             assembler->BitcastTaggedToWord(one)));
+
+    var_done.Bind(assembler->FalseConstant());
+    Node* elements = assembler->LoadElements(array);
+
+    static int32_t kInstanceType[] = {
+        JS_FAST_ARRAY_KEY_ITERATOR_TYPE,
+        JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+        JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+        JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+        JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+        JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+        JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+        JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE,
+        JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE,
+        JS_FAST_ARRAY_VALUE_ITERATOR_TYPE,
+        JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE,
+        JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
+        JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
+    };
+
+    Label packed_object_values(assembler), holey_object_values(assembler),
+        packed_double_values(assembler), holey_double_values(assembler);
+    Label* kInstanceTypeHandlers[] = {
+        &allocate_key_result,  &packed_object_values, &holey_object_values,
+        &packed_object_values, &holey_object_values,  &packed_double_values,
+        &holey_double_values,  &packed_object_values, &holey_object_values,
+        &packed_object_values, &holey_object_values,  &packed_double_values,
+        &holey_double_values};
+
+    assembler->Switch(instance_type, &throw_bad_receiver, kInstanceType,
+                      kInstanceTypeHandlers, arraysize(kInstanceType));
+
+    assembler->Bind(&packed_object_values);
+    {
+      var_value.Bind(assembler->LoadFixedArrayElement(
+          elements, index, 0, CodeStubAssembler::SMI_PARAMETERS));
+      assembler->Goto(&allocate_entry_if_needed);
+    }
+
+    assembler->Bind(&packed_double_values);
+    {
+      Node* value = assembler->LoadFixedDoubleArrayElement(
+          elements, index, MachineType::Float64(), 0,
+          CodeStubAssembler::SMI_PARAMETERS);
+      var_value.Bind(assembler->AllocateHeapNumberWithValue(value));
+      assembler->Goto(&allocate_entry_if_needed);
+    }
+
+    assembler->Bind(&holey_object_values);
+    {
+      // Check the array_protector cell, and take the slow path if it's invalid.
+      Node* invalid =
+          assembler->SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+      Node* cell = assembler->LoadRoot(Heap::kArrayProtectorRootIndex);
+      Node* cell_value =
+          assembler->LoadObjectField(cell, PropertyCell::kValueOffset);
+      assembler->GotoIf(assembler->WordEqual(cell_value, invalid),
+                        &generic_values);
+
+      var_value.Bind(assembler->UndefinedConstant());
+      Node* value = assembler->LoadFixedArrayElement(
+          elements, index, 0, CodeStubAssembler::SMI_PARAMETERS);
+      assembler->GotoIf(
+          assembler->WordEqual(value, assembler->TheHoleConstant()),
+          &allocate_entry_if_needed);
+      var_value.Bind(value);
+      assembler->Goto(&allocate_entry_if_needed);
+    }
+
+    assembler->Bind(&holey_double_values);
+    {
+      // Check the array_protector cell, and take the slow path if it's invalid.
+      Node* invalid =
+          assembler->SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+      Node* cell = assembler->LoadRoot(Heap::kArrayProtectorRootIndex);
+      Node* cell_value =
+          assembler->LoadObjectField(cell, PropertyCell::kValueOffset);
+      assembler->GotoIf(assembler->WordEqual(cell_value, invalid),
+                        &generic_values);
+
+      var_value.Bind(assembler->UndefinedConstant());
+      Node* value = assembler->LoadFixedDoubleArrayElement(
+          elements, index, MachineType::Float64(), 0,
+          CodeStubAssembler::SMI_PARAMETERS, &allocate_entry_if_needed);
+      var_value.Bind(assembler->AllocateHeapNumberWithValue(value));
+      assembler->Goto(&allocate_entry_if_needed);
+    }
+  }
+
+  assembler->Bind(&if_isnotfastarray);
+  {
+    Label if_istypedarray(assembler), if_isgeneric(assembler);
+
+    // If a is undefined, return CreateIterResultObject(undefined, true)
+    assembler->GotoIf(
+        assembler->WordEqual(array, assembler->UndefinedConstant()),
+        &allocate_iterator_result);
+
+    Node* array_type = assembler->LoadInstanceType(array);
+    assembler->Branch(
+        assembler->Word32Equal(array_type,
+                               assembler->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+        &if_istypedarray, &if_isgeneric);
+
+    assembler->Bind(&if_isgeneric);
+    {
+      Label if_wasfastarray(assembler);
+
+      Node* length = nullptr;
+      {
+        Variable var_length(assembler, MachineRepresentation::kTagged);
+        Label if_isarray(assembler), if_isnotarray(assembler), done(assembler);
+        assembler->Branch(
+            assembler->Word32Equal(array_type,
+                                   assembler->Int32Constant(JS_ARRAY_TYPE)),
+            &if_isarray, &if_isnotarray);
+
+        assembler->Bind(&if_isarray);
+        {
+          var_length.Bind(
+              assembler->LoadObjectField(array, JSArray::kLengthOffset));
+
+          // Invalidate protector cell if needed
+          assembler->Branch(
+              assembler->WordNotEqual(orig_map, assembler->UndefinedConstant()),
+              &if_wasfastarray, &done);
+
+          assembler->Bind(&if_wasfastarray);
+          {
+            Label if_invalid(assembler, Label::kDeferred);
+            // A fast array iterator transitioned to a slow iterator during
+            // iteration. Invalidate fast_array_iteration_prtoector cell to
+            // prevent potential deopt loops.
+            assembler->StoreObjectFieldNoWriteBarrier(
+                iterator, JSArrayIterator::kIteratedObjectMapOffset,
+                assembler->UndefinedConstant());
+            assembler->GotoIf(
+                assembler->Uint32LessThanOrEqual(
+                    instance_type, assembler->Int32Constant(
+                                       JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
+                &done);
+
+            Node* invalid = assembler->SmiConstant(
+                Smi::FromInt(Isolate::kProtectorInvalid));
+            Node* cell = assembler->LoadRoot(
+                Heap::kFastArrayIterationProtectorRootIndex);
+            assembler->StoreObjectFieldNoWriteBarrier(cell, Cell::kValueOffset,
+                                                      invalid);
+            assembler->Goto(&done);
+          }
+        }
+
+        assembler->Bind(&if_isnotarray);
+        {
+          Node* length_string = assembler->HeapConstant(
+              assembler->isolate()->factory()->length_string());
+          Callable get_property =
+              CodeFactory::GetProperty(assembler->isolate());
+          Node* length =
+              assembler->CallStub(get_property, context, array, length_string);
+          Callable to_length = CodeFactory::ToLength(assembler->isolate());
+          var_length.Bind(assembler->CallStub(to_length, context, length));
+          assembler->Goto(&done);
+        }
+
+        assembler->Bind(&done);
+        length = var_length.value();
+      }
+
+      assembler->GotoUnlessNumberLessThan(index, length, &set_done);
+
+      assembler->StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
+                                  assembler->NumberInc(index));
+      var_done.Bind(assembler->FalseConstant());
+
+      assembler->Branch(
+          assembler->Uint32LessThanOrEqual(
+              instance_type,
+              assembler->Int32Constant(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
+          &allocate_key_result, &generic_values);
+
+      assembler->Bind(&generic_values);
+      {
+        Callable get_property = CodeFactory::GetProperty(assembler->isolate());
+        var_value.Bind(
+            assembler->CallStub(get_property, context, array, index));
+        assembler->Goto(&allocate_entry_if_needed);
+      }
+    }
+
+    assembler->Bind(&if_istypedarray);
+    {
+      Node* length = nullptr;
+      {
+        Variable var_length(assembler, MachineRepresentation::kTagged);
+        Label if_isdetached(assembler, Label::kDeferred),
+            if_isnotdetached(assembler), done(assembler);
+
+        Node* buffer =
+            assembler->LoadObjectField(array, JSTypedArray::kBufferOffset);
+        assembler->Branch(assembler->IsDetachedBuffer(buffer), &if_isdetached,
+                          &if_isnotdetached);
+
+        assembler->Bind(&if_isnotdetached);
+        {
+          var_length.Bind(
+              assembler->LoadObjectField(array, JSTypedArray::kLengthOffset));
+          assembler->Goto(&done);
+        }
+
+        assembler->Bind(&if_isdetached);
+        {
+          // TODO(caitp): If IsDetached(buffer) is true, throw a TypeError, per
+          // https://github.com/tc39/ecma262/issues/713
+          var_length.Bind(assembler->SmiConstant(Smi::kZero));
+          assembler->Goto(&done);
+        }
+
+        assembler->Bind(&done);
+        length = var_length.value();
+      }
+      CSA_ASSERT(assembler, assembler->TaggedIsSmi(length));
+      CSA_ASSERT(assembler, assembler->TaggedIsSmi(index));
+
+      assembler->GotoUnless(assembler->SmiBelow(index, length), &set_done);
+
+      Node* one = assembler->SmiConstant(Smi::FromInt(1));
+      assembler->StoreObjectFieldNoWriteBarrier(
+          iterator, JSArrayIterator::kNextIndexOffset,
+          assembler->IntPtrAdd(assembler->BitcastTaggedToWord(index),
+                               assembler->BitcastTaggedToWord(one)));
+      var_done.Bind(assembler->FalseConstant());
+
+      Node* elements = assembler->LoadElements(array);
+      Node* base_ptr = assembler->LoadObjectField(
+          elements, FixedTypedArrayBase::kBasePointerOffset);
+      Node* external_ptr = assembler->LoadObjectField(
+          elements, FixedTypedArrayBase::kExternalPointerOffset);
+      Node* data_ptr = assembler->IntPtrAdd(base_ptr, external_ptr);
+
+      static int32_t kInstanceType[] = {
+          JS_TYPED_ARRAY_KEY_ITERATOR_TYPE,
+          JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+          JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+          JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+          JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+          JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+          JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+          JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+          JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+          JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+          JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE,
+          JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE,
+          JS_INT8_ARRAY_VALUE_ITERATOR_TYPE,
+          JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE,
+          JS_INT16_ARRAY_VALUE_ITERATOR_TYPE,
+          JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE,
+          JS_INT32_ARRAY_VALUE_ITERATOR_TYPE,
+          JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE,
+          JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE,
+      };
+
+      Label uint8_values(assembler), int8_values(assembler),
+          uint16_values(assembler), int16_values(assembler),
+          uint32_values(assembler), int32_values(assembler),
+          float32_values(assembler), float64_values(assembler);
+      Label* kInstanceTypeHandlers[] = {
+          &allocate_key_result, &uint8_values,  &uint8_values,
+          &int8_values,         &uint16_values, &int16_values,
+          &uint32_values,       &int32_values,  &float32_values,
+          &float64_values,      &uint8_values,  &uint8_values,
+          &int8_values,         &uint16_values, &int16_values,
+          &uint32_values,       &int32_values,  &float32_values,
+          &float64_values,
+      };
+
+      var_done.Bind(assembler->FalseConstant());
+      assembler->Switch(instance_type, &throw_bad_receiver, kInstanceType,
+                        kInstanceTypeHandlers, arraysize(kInstanceType));
+
+      assembler->Bind(&uint8_values);
+      {
+        Node* value_uint8 = assembler->LoadFixedTypedArrayElement(
+            data_ptr, index, UINT8_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
+        var_value.Bind(assembler->SmiFromWord(value_uint8));
+        assembler->Goto(&allocate_entry_if_needed);
+      }
+
+      assembler->Bind(&int8_values);
+      {
+        Node* value_int8 = assembler->LoadFixedTypedArrayElement(
+            data_ptr, index, INT8_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
+        var_value.Bind(assembler->SmiFromWord(value_int8));
+        assembler->Goto(&allocate_entry_if_needed);
+      }
+
+      assembler->Bind(&uint16_values);
+      {
+        Node* value_uint16 = assembler->LoadFixedTypedArrayElement(
+            data_ptr, index, UINT16_ELEMENTS,
+            CodeStubAssembler::SMI_PARAMETERS);
+        var_value.Bind(assembler->SmiFromWord(value_uint16));
+        assembler->Goto(&allocate_entry_if_needed);
+      }
+
+      assembler->Bind(&int16_values);
+      {
+        Node* value_int16 = assembler->LoadFixedTypedArrayElement(
+            data_ptr, index, INT16_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
+        var_value.Bind(assembler->SmiFromWord(value_int16));
+        assembler->Goto(&allocate_entry_if_needed);
+      }
+
+      assembler->Bind(&uint32_values);
+      {
+        Node* value_uint32 = assembler->LoadFixedTypedArrayElement(
+            data_ptr, index, UINT32_ELEMENTS,
+            CodeStubAssembler::SMI_PARAMETERS);
+        var_value.Bind(assembler->ChangeUint32ToTagged(value_uint32));
+        assembler->Goto(&allocate_entry_if_needed);
+      }
+      assembler->Bind(&int32_values);
+      {
+        Node* value_int32 = assembler->LoadFixedTypedArrayElement(
+            data_ptr, index, INT32_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
+        var_value.Bind(assembler->ChangeInt32ToTagged(value_int32));
+        assembler->Goto(&allocate_entry_if_needed);
+      }
+      assembler->Bind(&float32_values);
+      {
+        Node* value_float32 = assembler->LoadFixedTypedArrayElement(
+            data_ptr, index, FLOAT32_ELEMENTS,
+            CodeStubAssembler::SMI_PARAMETERS);
+        var_value.Bind(assembler->AllocateHeapNumberWithValue(
+            assembler->ChangeFloat32ToFloat64(value_float32)));
+        assembler->Goto(&allocate_entry_if_needed);
+      }
+      assembler->Bind(&float64_values);
+      {
+        Node* value_float64 = assembler->LoadFixedTypedArrayElement(
+            data_ptr, index, FLOAT64_ELEMENTS,
+            CodeStubAssembler::SMI_PARAMETERS);
+        var_value.Bind(assembler->AllocateHeapNumberWithValue(value_float64));
+        assembler->Goto(&allocate_entry_if_needed);
+      }
+    }
+  }
+
+  assembler->Bind(&set_done);
+  {
+    assembler->StoreObjectFieldNoWriteBarrier(
+        iterator, JSArrayIterator::kIteratedObjectOffset,
+        assembler->UndefinedConstant());
+    assembler->Goto(&allocate_iterator_result);
+  }
+
+  assembler->Bind(&allocate_key_result);
+  {
+    var_value.Bind(index);
+    var_done.Bind(assembler->FalseConstant());
+    assembler->Goto(&allocate_iterator_result);
+  }
+
+  assembler->Bind(&allocate_entry_if_needed);
+  {
+    assembler->GotoIf(
+        assembler->Int32GreaterThan(
+            instance_type,
+            assembler->Int32Constant(LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE)),
+        &allocate_iterator_result);
+
+    Node* elements = assembler->AllocateFixedArray(FAST_ELEMENTS,
+                                                   assembler->Int32Constant(2));
+    assembler->StoreFixedArrayElement(elements, assembler->Int32Constant(0),
+                                      index, SKIP_WRITE_BARRIER);
+    assembler->StoreFixedArrayElement(elements, assembler->Int32Constant(1),
+                                      var_value.value(), SKIP_WRITE_BARRIER);
+
+    Node* entry = assembler->Allocate(JSArray::kSize);
+    Node* map = assembler->LoadContextElement(
+        assembler->LoadNativeContext(context),
+        Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX);
+
+    assembler->StoreMapNoWriteBarrier(entry, map);
+    assembler->StoreObjectFieldRoot(entry, JSArray::kPropertiesOffset,
+                                    Heap::kEmptyFixedArrayRootIndex);
+    assembler->StoreObjectFieldNoWriteBarrier(entry, JSArray::kElementsOffset,
+                                              elements);
+    assembler->StoreObjectFieldNoWriteBarrier(
+        entry, JSArray::kLengthOffset, assembler->SmiConstant(Smi::FromInt(2)));
+
+    var_value.Bind(entry);
+    assembler->Goto(&allocate_iterator_result);
+  }
+
+  assembler->Bind(&allocate_iterator_result);
+  {
+    Node* result = assembler->Allocate(JSIteratorResult::kSize);
+    Node* map =
+        assembler->LoadContextElement(assembler->LoadNativeContext(context),
+                                      Context::ITERATOR_RESULT_MAP_INDEX);
+    assembler->StoreMapNoWriteBarrier(result, map);
+    assembler->StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
+                                    Heap::kEmptyFixedArrayRootIndex);
+    assembler->StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
+                                    Heap::kEmptyFixedArrayRootIndex);
+    assembler->StoreObjectFieldNoWriteBarrier(
+        result, JSIteratorResult::kValueOffset, var_value.value());
+    assembler->StoreObjectFieldNoWriteBarrier(
+        result, JSIteratorResult::kDoneOffset, var_done.value());
+    assembler->Return(result);
+  }
+
+  assembler->Bind(&throw_bad_receiver);
+  {
+    // The {receiver} is not a valid JSArrayIterator.
+    Node* result = assembler->CallRuntime(
+        Runtime::kThrowIncompatibleMethodReceiver, context,
+        assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
+            "Array Iterator.prototype.next", TENURED)),
+        iterator);
+    assembler->Return(result);
+  }
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/builtins/builtins-arraybuffer.cc b/src/builtins/builtins-arraybuffer.cc
index addf8ac..ad36758 100644
--- a/src/builtins/builtins-arraybuffer.cc
+++ b/src/builtins/builtins-arraybuffer.cc
@@ -14,7 +14,7 @@
 // ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Call]] case.
 BUILTIN(ArrayBufferConstructor) {
   HandleScope scope(isolate);
-  Handle<JSFunction> target = args.target<JSFunction>();
+  Handle<JSFunction> target = args.target();
   DCHECK(*target == target->native_context()->array_buffer_fun() ||
          *target == target->native_context()->shared_array_buffer_fun());
   THROW_NEW_ERROR_RETURN_FAILURE(
@@ -25,7 +25,7 @@
 // ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Construct]] case.
 BUILTIN(ArrayBufferConstructor_ConstructStub) {
   HandleScope scope(isolate);
-  Handle<JSFunction> target = args.target<JSFunction>();
+  Handle<JSFunction> target = args.target();
   Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
   Handle<Object> length = args.atOrUndefined(isolate, 1);
   DCHECK(*target == target->native_context()->array_buffer_fun() ||
diff --git a/src/builtins/builtins-boolean.cc b/src/builtins/builtins-boolean.cc
index 5f5bed1..e7ccf95 100644
--- a/src/builtins/builtins-boolean.cc
+++ b/src/builtins/builtins-boolean.cc
@@ -22,7 +22,7 @@
 BUILTIN(BooleanConstructor_ConstructStub) {
   HandleScope scope(isolate);
   Handle<Object> value = args.atOrUndefined(isolate, 1);
-  Handle<JSFunction> target = args.target<JSFunction>();
+  Handle<JSFunction> target = args.target();
   Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
   DCHECK(*target == target->native_context()->boolean_function());
   Handle<JSObject> result;
diff --git a/src/builtins/builtins-conversion.cc b/src/builtins/builtins-conversion.cc
index 7fbe4f8..0eaf79c 100644
--- a/src/builtins/builtins-conversion.cc
+++ b/src/builtins/builtins-conversion.cc
@@ -60,7 +60,7 @@
     // Verify that the {result} is actually a primitive.
     Label if_resultisprimitive(assembler),
         if_resultisnotprimitive(assembler, Label::kDeferred);
-    assembler->GotoIf(assembler->WordIsSmi(result), &if_resultisprimitive);
+    assembler->GotoIf(assembler->TaggedIsSmi(result), &if_resultisprimitive);
     Node* result_instance_type = assembler->LoadInstanceType(result);
     STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
     assembler->Branch(assembler->Int32LessThanOrEqual(
@@ -162,7 +162,7 @@
   Label is_number(assembler);
   Label runtime(assembler);
 
-  assembler->GotoIf(assembler->WordIsSmi(input), &is_number);
+  assembler->GotoIf(assembler->TaggedIsSmi(input), &is_number);
 
   Node* input_map = assembler->LoadMap(input);
   Node* input_instance_type = assembler->LoadMapInstanceType(input_map);
@@ -183,11 +183,7 @@
   }
 
   assembler->Bind(&is_number);
-  {
-    // TODO(tebbi): inline as soon as NumberToString is in the CodeStubAssembler
-    Callable callable = CodeFactory::NumberToString(assembler->isolate());
-    assembler->Return(assembler->CallStub(callable, context, input));
-  }
+  { assembler->Return(assembler->NumberToString(context, input)); }
 
   assembler->Bind(&not_heap_number);
   {
@@ -252,15 +248,10 @@
     // Check if the {method} is callable.
     Label if_methodiscallable(assembler),
         if_methodisnotcallable(assembler, Label::kDeferred);
-    assembler->GotoIf(assembler->WordIsSmi(method), &if_methodisnotcallable);
+    assembler->GotoIf(assembler->TaggedIsSmi(method), &if_methodisnotcallable);
     Node* method_map = assembler->LoadMap(method);
-    Node* method_bit_field = assembler->LoadMapBitField(method_map);
-    assembler->Branch(
-        assembler->Word32Equal(
-            assembler->Word32And(method_bit_field, assembler->Int32Constant(
-                                                       1 << Map::kIsCallable)),
-            assembler->Int32Constant(0)),
-        &if_methodisnotcallable, &if_methodiscallable);
+    assembler->Branch(assembler->IsCallableMap(method_map),
+                      &if_methodiscallable, &if_methodisnotcallable);
 
     assembler->Bind(&if_methodiscallable);
     {
@@ -270,7 +261,7 @@
       var_result.Bind(result);
 
       // Return the {result} if it is a primitive.
-      assembler->GotoIf(assembler->WordIsSmi(result), &return_result);
+      assembler->GotoIf(assembler->TaggedIsSmi(result), &return_result);
       Node* result_instance_type = assembler->LoadInstanceType(result);
       STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
       assembler->GotoIf(assembler->Int32LessThanOrEqual(
@@ -319,5 +310,168 @@
   assembler->Return(assembler->BooleanConstant(false));
 }
 
+void Builtins::Generate_ToLength(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* context = assembler->Parameter(1);
+
+  // We might need to loop once for ToNumber conversion.
+  Variable var_len(assembler, MachineRepresentation::kTagged);
+  Label loop(assembler, &var_len);
+  var_len.Bind(assembler->Parameter(0));
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    // Shared entry points.
+    Label return_len(assembler),
+        return_two53minus1(assembler, Label::kDeferred),
+        return_zero(assembler, Label::kDeferred);
+
+    // Load the current {len} value.
+    Node* len = var_len.value();
+
+    // Check if {len} is a positive Smi.
+    assembler->GotoIf(assembler->WordIsPositiveSmi(len), &return_len);
+
+    // Check if {len} is a (negative) Smi.
+    assembler->GotoIf(assembler->TaggedIsSmi(len), &return_zero);
+
+    // Check if {len} is a HeapNumber.
+    Label if_lenisheapnumber(assembler),
+        if_lenisnotheapnumber(assembler, Label::kDeferred);
+    assembler->Branch(assembler->IsHeapNumberMap(assembler->LoadMap(len)),
+                      &if_lenisheapnumber, &if_lenisnotheapnumber);
+
+    assembler->Bind(&if_lenisheapnumber);
+    {
+      // Load the floating-point value of {len}.
+      Node* len_value = assembler->LoadHeapNumberValue(len);
+
+      // Check if {len} is not greater than zero.
+      assembler->GotoUnless(assembler->Float64GreaterThan(
+                                len_value, assembler->Float64Constant(0.0)),
+                            &return_zero);
+
+      // Check if {len} is greater than or equal to 2^53-1.
+      assembler->GotoIf(
+          assembler->Float64GreaterThanOrEqual(
+              len_value, assembler->Float64Constant(kMaxSafeInteger)),
+          &return_two53minus1);
+
+      // Round the {len} towards -Infinity.
+      Node* value = assembler->Float64Floor(len_value);
+      Node* result = assembler->ChangeFloat64ToTagged(value);
+      assembler->Return(result);
+    }
+
+    assembler->Bind(&if_lenisnotheapnumber);
+    {
+      // Need to convert {len} to a Number first.
+      Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
+      var_len.Bind(assembler->CallStub(callable, context, len));
+      assembler->Goto(&loop);
+    }
+
+    assembler->Bind(&return_len);
+    assembler->Return(var_len.value());
+
+    assembler->Bind(&return_two53minus1);
+    assembler->Return(assembler->NumberConstant(kMaxSafeInteger));
+
+    assembler->Bind(&return_zero);
+    assembler->Return(assembler->SmiConstant(Smi::kZero));
+  }
+}
+
+void Builtins::Generate_ToInteger(CodeStubAssembler* assembler) {
+  typedef TypeConversionDescriptor Descriptor;
+
+  compiler::Node* input = assembler->Parameter(Descriptor::kArgument);
+  compiler::Node* context = assembler->Parameter(Descriptor::kContext);
+
+  assembler->Return(assembler->ToInteger(context, input));
+}
+
+// ES6 section 7.1.13 ToObject (argument)
+void Builtins::Generate_ToObject(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+  typedef TypeConversionDescriptor Descriptor;
+
+  Label if_number(assembler, Label::kDeferred), if_notsmi(assembler),
+      if_jsreceiver(assembler), if_noconstructor(assembler, Label::kDeferred),
+      if_wrapjsvalue(assembler);
+
+  Node* object = assembler->Parameter(Descriptor::kArgument);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  Variable constructor_function_index_var(assembler,
+                                          MachineType::PointerRepresentation());
+
+  assembler->Branch(assembler->TaggedIsSmi(object), &if_number, &if_notsmi);
+
+  assembler->Bind(&if_notsmi);
+  Node* map = assembler->LoadMap(object);
+
+  assembler->GotoIf(assembler->IsHeapNumberMap(map), &if_number);
+
+  Node* instance_type = assembler->LoadMapInstanceType(map);
+  assembler->GotoIf(assembler->IsJSReceiverInstanceType(instance_type),
+                    &if_jsreceiver);
+
+  Node* constructor_function_index =
+      assembler->LoadMapConstructorFunctionIndex(map);
+  assembler->GotoIf(assembler->WordEqual(constructor_function_index,
+                                         assembler->IntPtrConstant(
+                                             Map::kNoConstructorFunctionIndex)),
+                    &if_noconstructor);
+  constructor_function_index_var.Bind(constructor_function_index);
+  assembler->Goto(&if_wrapjsvalue);
+
+  assembler->Bind(&if_number);
+  constructor_function_index_var.Bind(
+      assembler->IntPtrConstant(Context::NUMBER_FUNCTION_INDEX));
+  assembler->Goto(&if_wrapjsvalue);
+
+  assembler->Bind(&if_wrapjsvalue);
+  Node* native_context = assembler->LoadNativeContext(context);
+  Node* constructor = assembler->LoadFixedArrayElement(
+      native_context, constructor_function_index_var.value(), 0,
+      CodeStubAssembler::INTPTR_PARAMETERS);
+  Node* initial_map = assembler->LoadObjectField(
+      constructor, JSFunction::kPrototypeOrInitialMapOffset);
+  Node* js_value = assembler->Allocate(JSValue::kSize);
+  assembler->StoreMapNoWriteBarrier(js_value, initial_map);
+  assembler->StoreObjectFieldRoot(js_value, JSValue::kPropertiesOffset,
+                                  Heap::kEmptyFixedArrayRootIndex);
+  assembler->StoreObjectFieldRoot(js_value, JSObject::kElementsOffset,
+                                  Heap::kEmptyFixedArrayRootIndex);
+  assembler->StoreObjectField(js_value, JSValue::kValueOffset, object);
+  assembler->Return(js_value);
+
+  assembler->Bind(&if_noconstructor);
+  assembler->TailCallRuntime(
+      Runtime::kThrowUndefinedOrNullToObject, context,
+      assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
+          "ToObject", TENURED)));
+
+  assembler->Bind(&if_jsreceiver);
+  assembler->Return(object);
+}
+
+// ES6 section 12.5.5 typeof operator
+void Builtins::Generate_Typeof(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef TypeofDescriptor Descriptor;
+
+  Node* object = assembler->Parameter(Descriptor::kObject);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  assembler->Return(assembler->Typeof(object, context));
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/builtins/builtins-dataview.cc b/src/builtins/builtins-dataview.cc
index 3d14e31..45a5fd9 100644
--- a/src/builtins/builtins-dataview.cc
+++ b/src/builtins/builtins-dataview.cc
@@ -23,7 +23,7 @@
 // ES6 section 24.2.2 The DataView Constructor for the [[Construct]] case.
 BUILTIN(DataViewConstructor_ConstructStub) {
   HandleScope scope(isolate);
-  Handle<JSFunction> target = args.target<JSFunction>();
+  Handle<JSFunction> target = args.target();
   Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
   Handle<Object> buffer = args.atOrUndefined(isolate, 1);
   Handle<Object> byte_offset = args.atOrUndefined(isolate, 2);
@@ -88,7 +88,7 @@
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
                                      JSObject::New(target, new_target));
   for (int i = 0; i < ArrayBufferView::kInternalFieldCount; ++i) {
-    Handle<JSDataView>::cast(result)->SetInternalField(i, Smi::FromInt(0));
+    Handle<JSDataView>::cast(result)->SetInternalField(i, Smi::kZero);
   }
 
   // 12. Set O's [[ViewedArrayBuffer]] internal slot to buffer.
diff --git a/src/builtins/builtins-date.cc b/src/builtins/builtins-date.cc
index 205c8c9..949620b 100644
--- a/src/builtins/builtins-date.cc
+++ b/src/builtins/builtins-date.cc
@@ -203,7 +203,7 @@
 BUILTIN(DateConstructor_ConstructStub) {
   HandleScope scope(isolate);
   int const argc = args.length() - 1;
-  Handle<JSFunction> target = args.target<JSFunction>();
+  Handle<JSFunction> target = args.target();
   Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
   double time_val;
   if (argc == 0) {
@@ -919,7 +919,7 @@
 
   Label receiver_not_date(assembler, Label::kDeferred);
 
-  assembler->GotoIf(assembler->WordIsSmi(receiver), &receiver_not_date);
+  assembler->GotoIf(assembler->TaggedIsSmi(receiver), &receiver_not_date);
   Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
   assembler->GotoIf(
       assembler->Word32NotEqual(receiver_instance_type,
diff --git a/src/builtins/builtins-error.cc b/src/builtins/builtins-error.cc
index c2a7b99..24ae56b 100644
--- a/src/builtins/builtins-error.cc
+++ b/src/builtins/builtins-error.cc
@@ -28,7 +28,7 @@
   }
 
   RETURN_RESULT_OR_FAILURE(
-      isolate, ErrorUtils::Construct(isolate, args.target<JSFunction>(),
+      isolate, ErrorUtils::Construct(isolate, args.target(),
                                      Handle<Object>::cast(args.new_target()),
                                      args.atOrUndefined(isolate, 1), mode,
                                      caller, false));
@@ -55,7 +55,7 @@
 
   Handle<Object> stack_trace =
       isolate->CaptureSimpleStackTrace(object, mode, caller);
-  if (!stack_trace->IsJSArray()) return *isolate->factory()->undefined_value();
+  if (!stack_trace->IsJSArray()) return isolate->heap()->undefined_value();
 
   Handle<Object> formatted_stack_trace;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
diff --git a/src/builtins/builtins-function.cc b/src/builtins/builtins-function.cc
index 0a631bf..9a8ee79 100644
--- a/src/builtins/builtins-function.cc
+++ b/src/builtins/builtins-function.cc
@@ -21,7 +21,7 @@
   DCHECK_LE(1, args.length());
   int const argc = args.length() - 1;
 
-  Handle<JSFunction> target = args.target<JSFunction>();
+  Handle<JSFunction> target = args.target();
   Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
 
   if (!Builtins::AllowDynamicFunction(isolate, target, target_global_proxy)) {
@@ -198,7 +198,7 @@
   if (!target->IsJSFunction() ||
       length_lookup.state() != LookupIterator::ACCESSOR ||
       !length_lookup.GetAccessors()->IsAccessorInfo()) {
-    Handle<Object> length(Smi::FromInt(0), isolate);
+    Handle<Object> length(Smi::kZero, isolate);
     Maybe<PropertyAttributes> attributes =
         JSReceiver::GetPropertyAttributes(&length_lookup);
     if (!attributes.IsJust()) return isolate->heap()->exception();
diff --git a/src/builtins/builtins-generator.cc b/src/builtins/builtins-generator.cc
index 93b2e48..fe1f2d2 100644
--- a/src/builtins/builtins-generator.cc
+++ b/src/builtins/builtins-generator.cc
@@ -26,7 +26,8 @@
 
   // Check if the {receiver} is actually a JSGeneratorObject.
   Label if_receiverisincompatible(assembler, Label::kDeferred);
-  assembler->GotoIf(assembler->WordIsSmi(receiver), &if_receiverisincompatible);
+  assembler->GotoIf(assembler->TaggedIsSmi(receiver),
+                    &if_receiverisincompatible);
   Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
   assembler->GotoUnless(assembler->Word32Equal(
                             receiver_instance_type,
diff --git a/src/builtins/builtins-global.cc b/src/builtins/builtins-global.cc
index 2205788..1fa0967 100644
--- a/src/builtins/builtins-global.cc
+++ b/src/builtins/builtins-global.cc
@@ -83,7 +83,7 @@
 BUILTIN(GlobalEval) {
   HandleScope scope(isolate);
   Handle<Object> x = args.atOrUndefined(isolate, 1);
-  Handle<JSFunction> target = args.target<JSFunction>();
+  Handle<JSFunction> target = args.target();
   Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
   if (!x->IsString()) return *x;
   if (!Builtins::AllowDynamicFunction(isolate, target, target_global_proxy)) {
@@ -121,7 +121,7 @@
     Node* num = var_num.value();
 
     // Check if {num} is a Smi or a HeapObject.
-    assembler->GotoIf(assembler->WordIsSmi(num), &return_true);
+    assembler->GotoIf(assembler->TaggedIsSmi(num), &return_true);
 
     // Check if {num} is a HeapNumber.
     Label if_numisheapnumber(assembler),
@@ -176,7 +176,7 @@
     Node* num = var_num.value();
 
     // Check if {num} is a Smi or a HeapObject.
-    assembler->GotoIf(assembler->WordIsSmi(num), &return_false);
+    assembler->GotoIf(assembler->TaggedIsSmi(num), &return_false);
 
     // Check if {num} is a HeapNumber.
     Label if_numisheapnumber(assembler),
diff --git a/src/builtins/builtins-handler.cc b/src/builtins/builtins-handler.cc
index ebbc978..88597f8 100644
--- a/src/builtins/builtins-handler.cc
+++ b/src/builtins/builtins-handler.cc
@@ -2,18 +2,15 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
 #include "src/ic/handler-compiler.h"
 #include "src/ic/ic.h"
+#include "src/ic/keyed-store-generic.h"
 
 namespace v8 {
 namespace internal {
 
-void Builtins::Generate_KeyedLoadIC_Megamorphic(MacroAssembler* masm) {
-  KeyedLoadIC::GenerateMegamorphic(masm);
-}
-
 void Builtins::Generate_KeyedLoadIC_Megamorphic_TF(
     CodeStubAssembler* assembler) {
   typedef compiler::Node Node;
@@ -44,6 +41,32 @@
   KeyedStoreIC::GenerateMegamorphic(masm, STRICT);
 }
 
+void KeyedStoreICMegamorphic(CodeStubAssembler* assembler, LanguageMode mode) {
+  typedef compiler::Node Node;
+  typedef StoreWithVectorDescriptor Descriptor;
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* name = assembler->Parameter(Descriptor::kName);
+  Node* value = assembler->Parameter(Descriptor::kValue);
+  Node* slot = assembler->Parameter(Descriptor::kSlot);
+  Node* vector = assembler->Parameter(Descriptor::kVector);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
+                                         vector);
+  KeyedStoreGenericGenerator::Generate(assembler, &p, mode);
+}
+
+void Builtins::Generate_KeyedStoreIC_Megamorphic_TF(
+    CodeStubAssembler* assembler) {
+  KeyedStoreICMegamorphic(assembler, SLOPPY);
+}
+
+void Builtins::Generate_KeyedStoreIC_Megamorphic_Strict_TF(
+    CodeStubAssembler* assembler) {
+  KeyedStoreICMegamorphic(assembler, STRICT);
+}
+
 void Builtins::Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
   KeyedStoreIC::GenerateMiss(masm);
 }
diff --git a/src/builtins/builtins-iterator.cc b/src/builtins/builtins-iterator.cc
index 7b91e36..7f74c20 100644
--- a/src/builtins/builtins-iterator.cc
+++ b/src/builtins/builtins-iterator.cc
@@ -2,8 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/frames-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -13,5 +14,55 @@
   assembler->Return(assembler->Parameter(0));
 }
 
+BUILTIN(ModuleNamespaceIterator) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  Handle<Object> receiver = args.at<Object>(0);
+
+  if (!receiver->IsJSModuleNamespace()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+                              isolate->factory()->iterator_symbol(), receiver));
+  }
+  auto ns = Handle<JSModuleNamespace>::cast(receiver);
+
+  Handle<FixedArray> names =
+      KeyAccumulator::GetKeys(ns, KeyCollectionMode::kOwnOnly, SKIP_SYMBOLS)
+          .ToHandleChecked();
+  return *isolate->factory()->NewJSFixedArrayIterator(names);
+}
+
+BUILTIN(FixedArrayIteratorNext) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  Handle<Object> receiver = args.at<Object>(0);
+
+  // It is an error if this function is called on anything other than the
+  // particular iterator object for which the function was created.
+  if (!receiver->IsJSFixedArrayIterator() ||
+      Handle<JSFixedArrayIterator>::cast(receiver)->initial_next() !=
+          *args.target()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+                              isolate->factory()->next_string(), receiver));
+  }
+
+  auto iterator = Handle<JSFixedArrayIterator>::cast(receiver);
+  Handle<Object> value;
+  bool done;
+
+  int index = iterator->index();
+  if (index < iterator->array()->length()) {
+    value = handle(iterator->array()->get(index), isolate);
+    done = false;
+    iterator->set_index(index + 1);
+  } else {
+    value = isolate->factory()->undefined_value();
+    done = true;
+  }
+
+  return *isolate->factory()->NewJSIteratorResult(value, done);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/builtins/builtins-math.cc b/src/builtins/builtins-math.cc
index e8d429e..30f12ba 100644
--- a/src/builtins/builtins-math.cc
+++ b/src/builtins/builtins-math.cc
@@ -15,99 +15,94 @@
 
 // ES6 section - 20.2.2.1 Math.abs ( x )
 void Builtins::Generate_MathAbs(CodeStubAssembler* assembler) {
-  using compiler::Node;
-  Node* x = assembler->Parameter(1);
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
   Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Abs(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
-}
 
-// ES6 section 20.2.2.2 Math.acos ( x )
-void Builtins::Generate_MathAcos(CodeStubAssembler* assembler) {
-  using compiler::Node;
+  // We might need to loop once for ToNumber conversion.
+  Variable var_x(assembler, MachineRepresentation::kTagged);
+  Label loop(assembler, &var_x);
+  var_x.Bind(assembler->Parameter(1));
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    // Load the current {x} value.
+    Node* x = var_x.value();
 
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Acos(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
-}
+    // Check if {x} is a Smi or a HeapObject.
+    Label if_xissmi(assembler), if_xisnotsmi(assembler);
+    assembler->Branch(assembler->TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
 
-// ES6 section 20.2.2.3 Math.acosh ( x )
-void Builtins::Generate_MathAcosh(CodeStubAssembler* assembler) {
-  using compiler::Node;
+    assembler->Bind(&if_xissmi);
+    {
+      // Check if {x} is already positive.
+      Label if_xispositive(assembler), if_xisnotpositive(assembler);
+      assembler->BranchIfSmiLessThanOrEqual(
+          assembler->SmiConstant(Smi::FromInt(0)), x, &if_xispositive,
+          &if_xisnotpositive);
 
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Acosh(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
-}
+      assembler->Bind(&if_xispositive);
+      {
+        // Just return the input {x}.
+        assembler->Return(x);
+      }
 
-// ES6 section 20.2.2.4 Math.asin ( x )
-void Builtins::Generate_MathAsin(CodeStubAssembler* assembler) {
-  using compiler::Node;
+      assembler->Bind(&if_xisnotpositive);
+      {
+        // Try to negate the {x} value.
+        Node* pair = assembler->IntPtrSubWithOverflow(
+            assembler->IntPtrConstant(0), assembler->BitcastTaggedToWord(x));
+        Node* overflow = assembler->Projection(1, pair);
+        Label if_overflow(assembler, Label::kDeferred),
+            if_notoverflow(assembler);
+        assembler->Branch(overflow, &if_overflow, &if_notoverflow);
 
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Asin(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
-}
+        assembler->Bind(&if_notoverflow);
+        {
+          // There is a Smi representation for negated {x}.
+          Node* result = assembler->Projection(0, pair);
+          result = assembler->BitcastWordToTagged(result);
+          assembler->Return(result);
+        }
 
-// ES6 section 20.2.2.5 Math.asinh ( x )
-void Builtins::Generate_MathAsinh(CodeStubAssembler* assembler) {
-  using compiler::Node;
+        assembler->Bind(&if_overflow);
+        {
+          Node* result = assembler->NumberConstant(0.0 - Smi::kMinValue);
+          assembler->Return(result);
+        }
+      }
+    }
 
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Asinh(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
-}
+    assembler->Bind(&if_xisnotsmi);
+    {
+      // Check if {x} is a HeapNumber.
+      Label if_xisheapnumber(assembler),
+          if_xisnotheapnumber(assembler, Label::kDeferred);
+      assembler->Branch(
+          assembler->WordEqual(assembler->LoadMap(x),
+                               assembler->HeapNumberMapConstant()),
+          &if_xisheapnumber, &if_xisnotheapnumber);
 
-// ES6 section 20.2.2.6 Math.atan ( x )
-void Builtins::Generate_MathAtan(CodeStubAssembler* assembler) {
-  using compiler::Node;
+      assembler->Bind(&if_xisheapnumber);
+      {
+        Node* x_value = assembler->LoadHeapNumberValue(x);
+        Node* value = assembler->Float64Abs(x_value);
+        Node* result = assembler->AllocateHeapNumberWithValue(value);
+        assembler->Return(result);
+      }
 
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Atan(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
-}
-
-// ES6 section 20.2.2.7 Math.atanh ( x )
-void Builtins::Generate_MathAtanh(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Atanh(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
-}
-
-// ES6 section 20.2.2.8 Math.atan2 ( y, x )
-void Builtins::Generate_MathAtan2(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* y = assembler->Parameter(1);
-  Node* x = assembler->Parameter(2);
-  Node* context = assembler->Parameter(5);
-  Node* y_value = assembler->TruncateTaggedToFloat64(context, y);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Atan2(y_value, x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
+      assembler->Bind(&if_xisnotheapnumber);
+      {
+        // Need to convert {x} to a Number first.
+        Callable callable =
+            CodeFactory::NonNumberToNumber(assembler->isolate());
+        var_x.Bind(assembler->CallStub(callable, context, x));
+        assembler->Goto(&loop);
+      }
+    }
+  }
 }
 
 namespace {
@@ -133,7 +128,7 @@
 
     // Check if {x} is a Smi or a HeapObject.
     Label if_xissmi(assembler), if_xisnotsmi(assembler);
-    assembler->Branch(assembler->WordIsSmi(x), &if_xissmi, &if_xisnotsmi);
+    assembler->Branch(assembler->TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
 
     assembler->Bind(&if_xissmi);
     {
@@ -171,8 +166,65 @@
   }
 }
 
+void Generate_MathUnaryOperation(
+    CodeStubAssembler* assembler,
+    compiler::Node* (CodeStubAssembler::*float64op)(compiler::Node*)) {
+  typedef compiler::Node Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = (assembler->*float64op)(x_value);
+  Node* result = assembler->AllocateHeapNumberWithValue(value);
+  assembler->Return(result);
+}
+
 }  // namespace
 
+// ES6 section 20.2.2.2 Math.acos ( x )
+void Builtins::Generate_MathAcos(CodeStubAssembler* assembler) {
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Acos);
+}
+
+// ES6 section 20.2.2.3 Math.acosh ( x )
+void Builtins::Generate_MathAcosh(CodeStubAssembler* assembler) {
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Acosh);
+}
+
+// ES6 section 20.2.2.4 Math.asin ( x )
+void Builtins::Generate_MathAsin(CodeStubAssembler* assembler) {
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Asin);
+}
+
+// ES6 section 20.2.2.5 Math.asinh ( x )
+void Builtins::Generate_MathAsinh(CodeStubAssembler* assembler) {
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Asinh);
+}
+
+// ES6 section 20.2.2.6 Math.atan ( x )
+void Builtins::Generate_MathAtan(CodeStubAssembler* assembler) {
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Atan);
+}
+
+// ES6 section 20.2.2.7 Math.atanh ( x )
+void Builtins::Generate_MathAtanh(CodeStubAssembler* assembler) {
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Atanh);
+}
+
+// ES6 section 20.2.2.8 Math.atan2 ( y, x )
+void Builtins::Generate_MathAtan2(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* y = assembler->Parameter(1);
+  Node* x = assembler->Parameter(2);
+  Node* context = assembler->Parameter(5);
+  Node* y_value = assembler->TruncateTaggedToFloat64(context, y);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Atan2(y_value, x_value);
+  Node* result = assembler->AllocateHeapNumberWithValue(value);
+  assembler->Return(result);
+}
+
 // ES6 section 20.2.2.10 Math.ceil ( x )
 void Builtins::Generate_MathCeil(CodeStubAssembler* assembler) {
   Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Ceil);
@@ -180,14 +232,7 @@
 
 // ES6 section 20.2.2.9 Math.cbrt ( x )
 void Builtins::Generate_MathCbrt(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Cbrt(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Cbrt);
 }
 
 // ES6 section 20.2.2.11 Math.clz32 ( x )
@@ -214,7 +259,7 @@
 
     // Check if {x} is a Smi or a HeapObject.
     Label if_xissmi(assembler), if_xisnotsmi(assembler);
-    assembler->Branch(assembler->WordIsSmi(x), &if_xissmi, &if_xisnotsmi);
+    assembler->Branch(assembler->TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
 
     assembler->Bind(&if_xissmi);
     {
@@ -260,38 +305,22 @@
 
 // ES6 section 20.2.2.12 Math.cos ( x )
 void Builtins::Generate_MathCos(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Cos(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Cos);
 }
 
 // ES6 section 20.2.2.13 Math.cosh ( x )
 void Builtins::Generate_MathCosh(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Cosh(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Cosh);
 }
 
 // ES6 section 20.2.2.14 Math.exp ( x )
 void Builtins::Generate_MathExp(CodeStubAssembler* assembler) {
-  using compiler::Node;
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Exp);
+}
 
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Exp(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
+// ES6 section 20.2.2.15 Math.expm1 ( x )
+void Builtins::Generate_MathExpm1(CodeStubAssembler* assembler) {
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Expm1);
 }
 
 // ES6 section 20.2.2.16 Math.floor ( x )
@@ -308,7 +337,7 @@
   Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
   Node* value32 = assembler->TruncateFloat64ToFloat32(x_value);
   Node* value = assembler->ChangeFloat32ToFloat64(value32);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
+  Node* result = assembler->AllocateHeapNumberWithValue(value);
   assembler->Return(result);
 }
 
@@ -316,7 +345,7 @@
 BUILTIN(MathHypot) {
   HandleScope scope(isolate);
   int const length = args.length() - 1;
-  if (length == 0) return Smi::FromInt(0);
+  if (length == 0) return Smi::kZero;
   DCHECK_LT(0, length);
   double max = 0;
   bool one_arg_is_nan = false;
@@ -341,11 +370,11 @@
   }
 
   if (one_arg_is_nan) {
-    return *isolate->factory()->nan_value();
+    return isolate->heap()->nan_value();
   }
 
   if (max == 0) {
-    return Smi::FromInt(0);
+    return Smi::kZero;
   }
   DCHECK_GT(max, 0);
 
@@ -380,62 +409,22 @@
 
 // ES6 section 20.2.2.20 Math.log ( x )
 void Builtins::Generate_MathLog(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Log(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log);
 }
 
 // ES6 section 20.2.2.21 Math.log1p ( x )
 void Builtins::Generate_MathLog1p(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Log1p(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log1p);
 }
 
 // ES6 section 20.2.2.22 Math.log10 ( x )
 void Builtins::Generate_MathLog10(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Log10(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log10);
 }
 
 // ES6 section 20.2.2.23 Math.log2 ( x )
 void Builtins::Generate_MathLog2(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Log2(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
-}
-
-// ES6 section 20.2.2.15 Math.expm1 ( x )
-void Builtins::Generate_MathExpm1(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Expm1(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log2);
 }
 
 // ES6 section 20.2.2.26 Math.pow ( x, y )
@@ -452,6 +441,46 @@
   assembler->Return(result);
 }
 
+// ES6 section 20.2.2.27 Math.random ( )
+void Builtins::Generate_MathRandom(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* context = assembler->Parameter(3);
+  Node* native_context = assembler->LoadNativeContext(context);
+
+  // Load cache index.
+  CodeStubAssembler::Variable smi_index(assembler,
+                                        MachineRepresentation::kTagged);
+  smi_index.Bind(assembler->LoadContextElement(
+      native_context, Context::MATH_RANDOM_INDEX_INDEX));
+
+  // Cached random numbers are exhausted if index is 0. Go to slow path.
+  CodeStubAssembler::Label if_cached(assembler);
+  assembler->GotoIf(assembler->SmiAbove(smi_index.value(),
+                                        assembler->SmiConstant(Smi::kZero)),
+                    &if_cached);
+
+  // Cache exhausted, populate the cache. Return value is the new index.
+  smi_index.Bind(
+      assembler->CallRuntime(Runtime::kGenerateRandomNumbers, context));
+  assembler->Goto(&if_cached);
+
+  // Compute next index by decrement.
+  assembler->Bind(&if_cached);
+  Node* new_smi_index = assembler->SmiSub(
+      smi_index.value(), assembler->SmiConstant(Smi::FromInt(1)));
+  assembler->StoreContextElement(
+      native_context, Context::MATH_RANDOM_INDEX_INDEX, new_smi_index);
+
+  // Load and return next cached random number.
+  Node* array = assembler->LoadContextElement(native_context,
+                                              Context::MATH_RANDOM_CACHE_INDEX);
+  Node* random = assembler->LoadFixedDoubleArrayElement(
+      array, new_smi_index, MachineType::Float64(), 0,
+      CodeStubAssembler::SMI_PARAMETERS);
+  assembler->Return(assembler->AllocateHeapNumberWithValue(random));
+}
+
 // ES6 section 20.2.2.28 Math.round ( x )
 void Builtins::Generate_MathRound(CodeStubAssembler* assembler) {
   Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Round);
@@ -486,62 +515,27 @@
 
 // ES6 section 20.2.2.30 Math.sin ( x )
 void Builtins::Generate_MathSin(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Sin(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Sin);
 }
 
 // ES6 section 20.2.2.31 Math.sinh ( x )
 void Builtins::Generate_MathSinh(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Sinh(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Sinh);
 }
 
 // ES6 section 20.2.2.32 Math.sqrt ( x )
 void Builtins::Generate_MathSqrt(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Sqrt(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Sqrt);
 }
 
 // ES6 section 20.2.2.33 Math.tan ( x )
 void Builtins::Generate_MathTan(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Tan(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Tan);
 }
 
 // ES6 section 20.2.2.34 Math.tanh ( x )
 void Builtins::Generate_MathTanh(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Tanh(x_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
+  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Tanh);
 }
 
 // ES6 section 20.2.2.35 Math.trunc ( x )
diff --git a/src/builtins/builtins-number.cc b/src/builtins/builtins-number.cc
index 1762844..3e2bc55 100644
--- a/src/builtins/builtins-number.cc
+++ b/src/builtins/builtins-number.cc
@@ -2,8 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
 
 namespace v8 {
 namespace internal {
@@ -21,7 +22,7 @@
   Label return_true(assembler), return_false(assembler);
 
   // Check if {number} is a Smi.
-  assembler->GotoIf(assembler->WordIsSmi(number), &return_true);
+  assembler->GotoIf(assembler->TaggedIsSmi(number), &return_true);
 
   // Check if {number} is a HeapNumber.
   assembler->GotoUnless(
@@ -52,7 +53,7 @@
   Label return_true(assembler), return_false(assembler);
 
   // Check if {number} is a Smi.
-  assembler->GotoIf(assembler->WordIsSmi(number), &return_true);
+  assembler->GotoIf(assembler->TaggedIsSmi(number), &return_true);
 
   // Check if {number} is a HeapNumber.
   assembler->GotoUnless(
@@ -67,9 +68,10 @@
   Node* integer = assembler->Float64Trunc(number_value);
 
   // Check if {number}s value matches the integer (ruling out the infinities).
-  assembler->BranchIfFloat64Equal(assembler->Float64Sub(number_value, integer),
-                                  assembler->Float64Constant(0.0), &return_true,
-                                  &return_false);
+  assembler->Branch(
+      assembler->Float64Equal(assembler->Float64Sub(number_value, integer),
+                              assembler->Float64Constant(0.0)),
+      &return_true, &return_false);
 
   assembler->Bind(&return_true);
   assembler->Return(assembler->BooleanConstant(true));
@@ -88,7 +90,7 @@
   Label return_true(assembler), return_false(assembler);
 
   // Check if {number} is a Smi.
-  assembler->GotoIf(assembler->WordIsSmi(number), &return_false);
+  assembler->GotoIf(assembler->TaggedIsSmi(number), &return_false);
 
   // Check if {number} is a HeapNumber.
   assembler->GotoUnless(
@@ -117,7 +119,7 @@
   Label return_true(assembler), return_false(assembler);
 
   // Check if {number} is a Smi.
-  assembler->GotoIf(assembler->WordIsSmi(number), &return_true);
+  assembler->GotoIf(assembler->TaggedIsSmi(number), &return_true);
 
   // Check if {number} is a HeapNumber.
   assembler->GotoUnless(
@@ -138,9 +140,10 @@
       &return_false);
 
   // Check if the {integer} value is in safe integer range.
-  assembler->BranchIfFloat64LessThanOrEqual(
-      assembler->Float64Abs(integer),
-      assembler->Float64Constant(kMaxSafeInteger), &return_true, &return_false);
+  assembler->Branch(assembler->Float64LessThanOrEqual(
+                        assembler->Float64Abs(integer),
+                        assembler->Float64Constant(kMaxSafeInteger)),
+                    &return_true, &return_false);
 
   assembler->Bind(&return_true);
   assembler->Return(assembler->BooleanConstant(true));
@@ -149,6 +152,209 @@
   assembler->Return(assembler->BooleanConstant(false));
 }
 
+// ES6 section 20.1.2.12 Number.parseFloat ( string )
+void Builtins::Generate_NumberParseFloat(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* context = assembler->Parameter(4);
+
+  // We might need to loop once for ToString conversion.
+  Variable var_input(assembler, MachineRepresentation::kTagged);
+  Label loop(assembler, &var_input);
+  var_input.Bind(assembler->Parameter(1));
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    // Load the current {input} value.
+    Node* input = var_input.value();
+
+    // Check if the {input} is a HeapObject or a Smi.
+    Label if_inputissmi(assembler), if_inputisnotsmi(assembler);
+    assembler->Branch(assembler->TaggedIsSmi(input), &if_inputissmi,
+                      &if_inputisnotsmi);
+
+    assembler->Bind(&if_inputissmi);
+    {
+      // The {input} is already a Number, no need to do anything.
+      assembler->Return(input);
+    }
+
+    assembler->Bind(&if_inputisnotsmi);
+    {
+      // The {input} is a HeapObject, check if it's already a String.
+      Label if_inputisstring(assembler), if_inputisnotstring(assembler);
+      Node* input_map = assembler->LoadMap(input);
+      Node* input_instance_type = assembler->LoadMapInstanceType(input_map);
+      assembler->Branch(assembler->IsStringInstanceType(input_instance_type),
+                        &if_inputisstring, &if_inputisnotstring);
+
+      assembler->Bind(&if_inputisstring);
+      {
+        // The {input} is already a String, check if {input} contains
+        // a cached array index.
+        Label if_inputcached(assembler), if_inputnotcached(assembler);
+        Node* input_hash = assembler->LoadNameHashField(input);
+        Node* input_bit = assembler->Word32And(
+            input_hash,
+            assembler->Int32Constant(String::kContainsCachedArrayIndexMask));
+        assembler->Branch(
+            assembler->Word32Equal(input_bit, assembler->Int32Constant(0)),
+            &if_inputcached, &if_inputnotcached);
+
+        assembler->Bind(&if_inputcached);
+        {
+          // Just return the {input}s cached array index.
+          Node* input_array_index =
+              assembler->DecodeWordFromWord32<String::ArrayIndexValueBits>(
+                  input_hash);
+          assembler->Return(assembler->SmiTag(input_array_index));
+        }
+
+        assembler->Bind(&if_inputnotcached);
+        {
+          // Need to fall back to the runtime to convert {input} to double.
+          assembler->Return(assembler->CallRuntime(Runtime::kStringParseFloat,
+                                                   context, input));
+        }
+      }
+
+      assembler->Bind(&if_inputisnotstring);
+      {
+        // The {input} is neither a String nor a Smi, check for HeapNumber.
+        Label if_inputisnumber(assembler),
+            if_inputisnotnumber(assembler, Label::kDeferred);
+        assembler->Branch(
+            assembler->WordEqual(input_map, assembler->HeapNumberMapConstant()),
+            &if_inputisnumber, &if_inputisnotnumber);
+
+        assembler->Bind(&if_inputisnumber);
+        {
+          // The {input} is already a Number, take care of -0.
+          Label if_inputiszero(assembler), if_inputisnotzero(assembler);
+          Node* input_value = assembler->LoadHeapNumberValue(input);
+          assembler->Branch(assembler->Float64Equal(
+                                input_value, assembler->Float64Constant(0.0)),
+                            &if_inputiszero, &if_inputisnotzero);
+
+          assembler->Bind(&if_inputiszero);
+          assembler->Return(assembler->SmiConstant(0));
+
+          assembler->Bind(&if_inputisnotzero);
+          assembler->Return(input);
+        }
+
+        assembler->Bind(&if_inputisnotnumber);
+        {
+          // Need to convert the {input} to String first.
+          // TODO(bmeurer): This could be more efficient if necessary.
+          Callable callable = CodeFactory::ToString(assembler->isolate());
+          var_input.Bind(assembler->CallStub(callable, context, input));
+          assembler->Goto(&loop);
+        }
+      }
+    }
+  }
+}
+
+// ES6 section 20.1.2.13 Number.parseInt ( string, radix )
+void Builtins::Generate_NumberParseInt(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* input = assembler->Parameter(1);
+  Node* radix = assembler->Parameter(2);
+  Node* context = assembler->Parameter(5);
+
+  // Check if {radix} is treated as 10 (i.e. undefined, 0 or 10).
+  Label if_radix10(assembler), if_generic(assembler, Label::kDeferred);
+  assembler->GotoIf(assembler->WordEqual(radix, assembler->UndefinedConstant()),
+                    &if_radix10);
+  assembler->GotoIf(
+      assembler->WordEqual(radix, assembler->SmiConstant(Smi::FromInt(10))),
+      &if_radix10);
+  assembler->GotoIf(
+      assembler->WordEqual(radix, assembler->SmiConstant(Smi::FromInt(0))),
+      &if_radix10);
+  assembler->Goto(&if_generic);
+
+  assembler->Bind(&if_radix10);
+  {
+    // Check if we can avoid the ToString conversion on {input}.
+    Label if_inputissmi(assembler), if_inputisheapnumber(assembler),
+        if_inputisstring(assembler);
+    assembler->GotoIf(assembler->TaggedIsSmi(input), &if_inputissmi);
+    Node* input_map = assembler->LoadMap(input);
+    assembler->GotoIf(
+        assembler->WordEqual(input_map, assembler->HeapNumberMapConstant()),
+        &if_inputisheapnumber);
+    Node* input_instance_type = assembler->LoadMapInstanceType(input_map);
+    assembler->Branch(assembler->IsStringInstanceType(input_instance_type),
+                      &if_inputisstring, &if_generic);
+
+    assembler->Bind(&if_inputissmi);
+    {
+      // Just return the {input}.
+      assembler->Return(input);
+    }
+
+    assembler->Bind(&if_inputisheapnumber);
+    {
+      // Check if the {input} value is in Signed32 range.
+      Label if_inputissigned32(assembler);
+      Node* input_value = assembler->LoadHeapNumberValue(input);
+      Node* input_value32 = assembler->TruncateFloat64ToWord32(input_value);
+      assembler->GotoIf(
+          assembler->Float64Equal(
+              input_value, assembler->ChangeInt32ToFloat64(input_value32)),
+          &if_inputissigned32);
+
+      // Check if the absolute {input} value is in the ]0.01,1e9[ range.
+      Node* input_value_abs = assembler->Float64Abs(input_value);
+
+      assembler->GotoUnless(
+          assembler->Float64LessThan(input_value_abs,
+                                     assembler->Float64Constant(1e9)),
+          &if_generic);
+      assembler->Branch(assembler->Float64LessThan(
+                            assembler->Float64Constant(0.01), input_value_abs),
+                        &if_inputissigned32, &if_generic);
+
+      // Return the truncated int32 value, and return the tagged result.
+      assembler->Bind(&if_inputissigned32);
+      Node* result = assembler->ChangeInt32ToTagged(input_value32);
+      assembler->Return(result);
+    }
+
+    assembler->Bind(&if_inputisstring);
+    {
+      // Check if the String {input} has a cached array index.
+      Node* input_hash = assembler->LoadNameHashField(input);
+      Node* input_bit = assembler->Word32And(
+          input_hash,
+          assembler->Int32Constant(String::kContainsCachedArrayIndexMask));
+      assembler->GotoIf(
+          assembler->Word32NotEqual(input_bit, assembler->Int32Constant(0)),
+          &if_generic);
+
+      // Return the cached array index as result.
+      Node* input_index =
+          assembler->DecodeWordFromWord32<String::ArrayIndexValueBits>(
+              input_hash);
+      Node* result = assembler->SmiTag(input_index);
+      assembler->Return(result);
+    }
+  }
+
+  assembler->Bind(&if_generic);
+  {
+    Node* result =
+        assembler->CallRuntime(Runtime::kStringParseInt, context, input, radix);
+    assembler->Return(result);
+  }
+}
+
 // ES6 section 20.1.3.2 Number.prototype.toExponential ( fractionDigits )
 BUILTIN(NumberPrototypeToExponential) {
   HandleScope scope(isolate);
@@ -369,5 +575,1244 @@
   assembler->Return(result);
 }
 
+// static
+void Builtins::Generate_Add(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* left = assembler->Parameter(0);
+  Node* right = assembler->Parameter(1);
+  Node* context = assembler->Parameter(2);
+
+  // Shared entry for floating point addition.
+  Label do_fadd(assembler);
+  Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
+      var_fadd_rhs(assembler, MachineRepresentation::kFloat64);
+
+  // We might need to loop several times due to ToPrimitive, ToString and/or
+  // ToNumber conversions.
+  Variable var_lhs(assembler, MachineRepresentation::kTagged),
+      var_rhs(assembler, MachineRepresentation::kTagged),
+      var_result(assembler, MachineRepresentation::kTagged);
+  Variable* loop_vars[2] = {&var_lhs, &var_rhs};
+  Label loop(assembler, 2, loop_vars), end(assembler),
+      string_add_convert_left(assembler, Label::kDeferred),
+      string_add_convert_right(assembler, Label::kDeferred);
+  var_lhs.Bind(left);
+  var_rhs.Bind(right);
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    // Load the current {lhs} and {rhs} values.
+    Node* lhs = var_lhs.value();
+    Node* rhs = var_rhs.value();
+
+    // Check if the {lhs} is a Smi or a HeapObject.
+    Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
+    assembler->Branch(assembler->TaggedIsSmi(lhs), &if_lhsissmi,
+                      &if_lhsisnotsmi);
+
+    assembler->Bind(&if_lhsissmi);
+    {
+      // Check if the {rhs} is also a Smi.
+      Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+      assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
+                        &if_rhsisnotsmi);
+
+      assembler->Bind(&if_rhsissmi);
+      {
+        // Try fast Smi addition first.
+        Node* pair = assembler->IntPtrAddWithOverflow(
+            assembler->BitcastTaggedToWord(lhs),
+            assembler->BitcastTaggedToWord(rhs));
+        Node* overflow = assembler->Projection(1, pair);
+
+        // Check if the Smi additon overflowed.
+        Label if_overflow(assembler), if_notoverflow(assembler);
+        assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+        assembler->Bind(&if_overflow);
+        {
+          var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
+          var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
+          assembler->Goto(&do_fadd);
+        }
+
+        assembler->Bind(&if_notoverflow);
+        var_result.Bind(assembler->BitcastWordToTaggedSigned(
+            assembler->Projection(0, pair)));
+        assembler->Goto(&end);
+      }
+
+      assembler->Bind(&if_rhsisnotsmi);
+      {
+        // Load the map of {rhs}.
+        Node* rhs_map = assembler->LoadMap(rhs);
+
+        // Check if the {rhs} is a HeapNumber.
+        Label if_rhsisnumber(assembler),
+            if_rhsisnotnumber(assembler, Label::kDeferred);
+        assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
+                          &if_rhsisnotnumber);
+
+        assembler->Bind(&if_rhsisnumber);
+        {
+          var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
+          var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+          assembler->Goto(&do_fadd);
+        }
+
+        assembler->Bind(&if_rhsisnotnumber);
+        {
+          // Load the instance type of {rhs}.
+          Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
+
+          // Check if the {rhs} is a String.
+          Label if_rhsisstring(assembler, Label::kDeferred),
+              if_rhsisnotstring(assembler, Label::kDeferred);
+          assembler->Branch(assembler->IsStringInstanceType(rhs_instance_type),
+                            &if_rhsisstring, &if_rhsisnotstring);
+
+          assembler->Bind(&if_rhsisstring);
+          {
+            var_lhs.Bind(lhs);
+            var_rhs.Bind(rhs);
+            assembler->Goto(&string_add_convert_left);
+          }
+
+          assembler->Bind(&if_rhsisnotstring);
+          {
+            // Check if {rhs} is a JSReceiver.
+            Label if_rhsisreceiver(assembler, Label::kDeferred),
+                if_rhsisnotreceiver(assembler, Label::kDeferred);
+            assembler->Branch(
+                assembler->IsJSReceiverInstanceType(rhs_instance_type),
+                &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+            assembler->Bind(&if_rhsisreceiver);
+            {
+              // Convert {rhs} to a primitive first passing no hint.
+              Callable callable =
+                  CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
+              var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+              assembler->Goto(&loop);
+            }
+
+            assembler->Bind(&if_rhsisnotreceiver);
+            {
+              // Convert {rhs} to a Number first.
+              Callable callable =
+                  CodeFactory::NonNumberToNumber(assembler->isolate());
+              var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+              assembler->Goto(&loop);
+            }
+          }
+        }
+      }
+    }
+
+    assembler->Bind(&if_lhsisnotsmi);
+    {
+      // Load the map and instance type of {lhs}.
+      Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+
+      // Check if {lhs} is a String.
+      Label if_lhsisstring(assembler), if_lhsisnotstring(assembler);
+      assembler->Branch(assembler->IsStringInstanceType(lhs_instance_type),
+                        &if_lhsisstring, &if_lhsisnotstring);
+
+      assembler->Bind(&if_lhsisstring);
+      {
+        var_lhs.Bind(lhs);
+        var_rhs.Bind(rhs);
+        assembler->Goto(&string_add_convert_right);
+      }
+
+      assembler->Bind(&if_lhsisnotstring);
+      {
+        // Check if {rhs} is a Smi.
+        Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+        assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
+                          &if_rhsisnotsmi);
+
+        assembler->Bind(&if_rhsissmi);
+        {
+          // Check if {lhs} is a Number.
+          Label if_lhsisnumber(assembler),
+              if_lhsisnotnumber(assembler, Label::kDeferred);
+          assembler->Branch(assembler->Word32Equal(
+                                lhs_instance_type,
+                                assembler->Int32Constant(HEAP_NUMBER_TYPE)),
+                            &if_lhsisnumber, &if_lhsisnotnumber);
+
+          assembler->Bind(&if_lhsisnumber);
+          {
+            // The {lhs} is a HeapNumber, the {rhs} is a Smi, just add them.
+            var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+            var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
+            assembler->Goto(&do_fadd);
+          }
+
+          assembler->Bind(&if_lhsisnotnumber);
+          {
+            // The {lhs} is neither a Number nor a String, and the {rhs} is a
+            // Smi.
+            Label if_lhsisreceiver(assembler, Label::kDeferred),
+                if_lhsisnotreceiver(assembler, Label::kDeferred);
+            assembler->Branch(
+                assembler->IsJSReceiverInstanceType(lhs_instance_type),
+                &if_lhsisreceiver, &if_lhsisnotreceiver);
+
+            assembler->Bind(&if_lhsisreceiver);
+            {
+              // Convert {lhs} to a primitive first passing no hint.
+              Callable callable =
+                  CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
+              var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+              assembler->Goto(&loop);
+            }
+
+            assembler->Bind(&if_lhsisnotreceiver);
+            {
+              // Convert {lhs} to a Number first.
+              Callable callable =
+                  CodeFactory::NonNumberToNumber(assembler->isolate());
+              var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+              assembler->Goto(&loop);
+            }
+          }
+        }
+
+        assembler->Bind(&if_rhsisnotsmi);
+        {
+          // Load the instance type of {rhs}.
+          Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+
+          // Check if {rhs} is a String.
+          Label if_rhsisstring(assembler), if_rhsisnotstring(assembler);
+          assembler->Branch(assembler->IsStringInstanceType(rhs_instance_type),
+                            &if_rhsisstring, &if_rhsisnotstring);
+
+          assembler->Bind(&if_rhsisstring);
+          {
+            var_lhs.Bind(lhs);
+            var_rhs.Bind(rhs);
+            assembler->Goto(&string_add_convert_left);
+          }
+
+          assembler->Bind(&if_rhsisnotstring);
+          {
+            // Check if {lhs} is a HeapNumber.
+            Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
+            assembler->Branch(assembler->Word32Equal(
+                                  lhs_instance_type,
+                                  assembler->Int32Constant(HEAP_NUMBER_TYPE)),
+                              &if_lhsisnumber, &if_lhsisnotnumber);
+
+            assembler->Bind(&if_lhsisnumber);
+            {
+              // Check if {rhs} is also a HeapNumber.
+              Label if_rhsisnumber(assembler),
+                  if_rhsisnotnumber(assembler, Label::kDeferred);
+              assembler->Branch(assembler->Word32Equal(
+                                    rhs_instance_type,
+                                    assembler->Int32Constant(HEAP_NUMBER_TYPE)),
+                                &if_rhsisnumber, &if_rhsisnotnumber);
+
+              assembler->Bind(&if_rhsisnumber);
+              {
+                // Perform a floating point addition.
+                var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+                var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+                assembler->Goto(&do_fadd);
+              }
+
+              assembler->Bind(&if_rhsisnotnumber);
+              {
+                // Check if {rhs} is a JSReceiver.
+                Label if_rhsisreceiver(assembler, Label::kDeferred),
+                    if_rhsisnotreceiver(assembler, Label::kDeferred);
+                assembler->Branch(
+                    assembler->IsJSReceiverInstanceType(rhs_instance_type),
+                    &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+                assembler->Bind(&if_rhsisreceiver);
+                {
+                  // Convert {rhs} to a primitive first passing no hint.
+                  Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+                      assembler->isolate());
+                  var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+                  assembler->Goto(&loop);
+                }
+
+                assembler->Bind(&if_rhsisnotreceiver);
+                {
+                  // Convert {rhs} to a Number first.
+                  Callable callable =
+                      CodeFactory::NonNumberToNumber(assembler->isolate());
+                  var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+                  assembler->Goto(&loop);
+                }
+              }
+            }
+
+            assembler->Bind(&if_lhsisnotnumber);
+            {
+              // Check if {lhs} is a JSReceiver.
+              Label if_lhsisreceiver(assembler, Label::kDeferred),
+                  if_lhsisnotreceiver(assembler);
+              assembler->Branch(
+                  assembler->IsJSReceiverInstanceType(lhs_instance_type),
+                  &if_lhsisreceiver, &if_lhsisnotreceiver);
+
+              assembler->Bind(&if_lhsisreceiver);
+              {
+                // Convert {lhs} to a primitive first passing no hint.
+                Callable callable =
+                    CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
+                var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+                assembler->Goto(&loop);
+              }
+
+              assembler->Bind(&if_lhsisnotreceiver);
+              {
+                // Check if {rhs} is a JSReceiver.
+                Label if_rhsisreceiver(assembler, Label::kDeferred),
+                    if_rhsisnotreceiver(assembler, Label::kDeferred);
+                assembler->Branch(
+                    assembler->IsJSReceiverInstanceType(rhs_instance_type),
+                    &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+                assembler->Bind(&if_rhsisreceiver);
+                {
+                  // Convert {rhs} to a primitive first passing no hint.
+                  Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+                      assembler->isolate());
+                  var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+                  assembler->Goto(&loop);
+                }
+
+                assembler->Bind(&if_rhsisnotreceiver);
+                {
+                  // Convert {lhs} to a Number first.
+                  Callable callable =
+                      CodeFactory::NonNumberToNumber(assembler->isolate());
+                  var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+                  assembler->Goto(&loop);
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+  assembler->Bind(&string_add_convert_left);
+  {
+    // Convert {lhs}, which is a Smi, to a String and concatenate the
+    // resulting string with the String {rhs}.
+    Callable callable = CodeFactory::StringAdd(
+        assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
+    var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
+                                        var_rhs.value()));
+    assembler->Goto(&end);
+  }
+
+  assembler->Bind(&string_add_convert_right);
+  {
+    // Convert {lhs}, which is a Smi, to a String and concatenate the
+    // resulting string with the String {rhs}.
+    Callable callable = CodeFactory::StringAdd(
+        assembler->isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
+    var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
+                                        var_rhs.value()));
+    assembler->Goto(&end);
+  }
+
+  assembler->Bind(&do_fadd);
+  {
+    Node* lhs_value = var_fadd_lhs.value();
+    Node* rhs_value = var_fadd_rhs.value();
+    Node* value = assembler->Float64Add(lhs_value, rhs_value);
+    Node* result = assembler->AllocateHeapNumberWithValue(value);
+    var_result.Bind(result);
+    assembler->Goto(&end);
+  }
+  assembler->Bind(&end);
+  assembler->Return(var_result.value());
+}
+
+void Builtins::Generate_Subtract(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* left = assembler->Parameter(0);
+  Node* right = assembler->Parameter(1);
+  Node* context = assembler->Parameter(2);
+
+  // Shared entry for floating point subtraction.
+  Label do_fsub(assembler), end(assembler);
+  Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
+      var_fsub_rhs(assembler, MachineRepresentation::kFloat64);
+
+  // We might need to loop several times due to ToPrimitive and/or ToNumber
+  // conversions.
+  Variable var_lhs(assembler, MachineRepresentation::kTagged),
+      var_rhs(assembler, MachineRepresentation::kTagged),
+      var_result(assembler, MachineRepresentation::kTagged);
+  Variable* loop_vars[2] = {&var_lhs, &var_rhs};
+  Label loop(assembler, 2, loop_vars);
+  var_lhs.Bind(left);
+  var_rhs.Bind(right);
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    // Load the current {lhs} and {rhs} values.
+    Node* lhs = var_lhs.value();
+    Node* rhs = var_rhs.value();
+
+    // Check if the {lhs} is a Smi or a HeapObject.
+    Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
+    assembler->Branch(assembler->TaggedIsSmi(lhs), &if_lhsissmi,
+                      &if_lhsisnotsmi);
+
+    assembler->Bind(&if_lhsissmi);
+    {
+      // Check if the {rhs} is also a Smi.
+      Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+      assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
+                        &if_rhsisnotsmi);
+
+      assembler->Bind(&if_rhsissmi);
+      {
+        // Try a fast Smi subtraction first.
+        Node* pair = assembler->IntPtrSubWithOverflow(
+            assembler->BitcastTaggedToWord(lhs),
+            assembler->BitcastTaggedToWord(rhs));
+        Node* overflow = assembler->Projection(1, pair);
+
+        // Check if the Smi subtraction overflowed.
+        Label if_overflow(assembler), if_notoverflow(assembler);
+        assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+        assembler->Bind(&if_overflow);
+        {
+          // The result doesn't fit into Smi range.
+          var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
+          var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
+          assembler->Goto(&do_fsub);
+        }
+
+        assembler->Bind(&if_notoverflow);
+        var_result.Bind(assembler->BitcastWordToTaggedSigned(
+            assembler->Projection(0, pair)));
+        assembler->Goto(&end);
+      }
+
+      assembler->Bind(&if_rhsisnotsmi);
+      {
+        // Load the map of the {rhs}.
+        Node* rhs_map = assembler->LoadMap(rhs);
+
+        // Check if {rhs} is a HeapNumber.
+        Label if_rhsisnumber(assembler),
+            if_rhsisnotnumber(assembler, Label::kDeferred);
+        assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
+                          &if_rhsisnotnumber);
+
+        assembler->Bind(&if_rhsisnumber);
+        {
+          // Perform a floating point subtraction.
+          var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
+          var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+          assembler->Goto(&do_fsub);
+        }
+
+        assembler->Bind(&if_rhsisnotnumber);
+        {
+          // Convert the {rhs} to a Number first.
+          Callable callable =
+              CodeFactory::NonNumberToNumber(assembler->isolate());
+          var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+          assembler->Goto(&loop);
+        }
+      }
+    }
+
+    assembler->Bind(&if_lhsisnotsmi);
+    {
+      // Load the map of the {lhs}.
+      Node* lhs_map = assembler->LoadMap(lhs);
+
+      // Check if the {lhs} is a HeapNumber.
+      Label if_lhsisnumber(assembler),
+          if_lhsisnotnumber(assembler, Label::kDeferred);
+      Node* number_map = assembler->HeapNumberMapConstant();
+      assembler->Branch(assembler->WordEqual(lhs_map, number_map),
+                        &if_lhsisnumber, &if_lhsisnotnumber);
+
+      assembler->Bind(&if_lhsisnumber);
+      {
+        // Check if the {rhs} is a Smi.
+        Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+        assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
+                          &if_rhsisnotsmi);
+
+        assembler->Bind(&if_rhsissmi);
+        {
+          // Perform a floating point subtraction.
+          var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+          var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
+          assembler->Goto(&do_fsub);
+        }
+
+        assembler->Bind(&if_rhsisnotsmi);
+        {
+          // Load the map of the {rhs}.
+          Node* rhs_map = assembler->LoadMap(rhs);
+
+          // Check if the {rhs} is a HeapNumber.
+          Label if_rhsisnumber(assembler),
+              if_rhsisnotnumber(assembler, Label::kDeferred);
+          assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+                            &if_rhsisnumber, &if_rhsisnotnumber);
+
+          assembler->Bind(&if_rhsisnumber);
+          {
+            // Perform a floating point subtraction.
+            var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+            var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+            assembler->Goto(&do_fsub);
+          }
+
+          assembler->Bind(&if_rhsisnotnumber);
+          {
+            // Convert the {rhs} to a Number first.
+            Callable callable =
+                CodeFactory::NonNumberToNumber(assembler->isolate());
+            var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+            assembler->Goto(&loop);
+          }
+        }
+      }
+
+      assembler->Bind(&if_lhsisnotnumber);
+      {
+        // Convert the {lhs} to a Number first.
+        Callable callable =
+            CodeFactory::NonNumberToNumber(assembler->isolate());
+        var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+        assembler->Goto(&loop);
+      }
+    }
+  }
+
+  assembler->Bind(&do_fsub);
+  {
+    Node* lhs_value = var_fsub_lhs.value();
+    Node* rhs_value = var_fsub_rhs.value();
+    Node* value = assembler->Float64Sub(lhs_value, rhs_value);
+    var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
+    assembler->Goto(&end);
+  }
+  assembler->Bind(&end);
+  assembler->Return(var_result.value());
+}
+
+void Builtins::Generate_Multiply(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* left = assembler->Parameter(0);
+  Node* right = assembler->Parameter(1);
+  Node* context = assembler->Parameter(2);
+
+  // Shared entry point for floating point multiplication.
+  Label do_fmul(assembler), return_result(assembler);
+  Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
+      var_rhs_float64(assembler, MachineRepresentation::kFloat64);
+
+  Node* number_map = assembler->HeapNumberMapConstant();
+
+  // We might need to loop one or two times due to ToNumber conversions.
+  Variable var_lhs(assembler, MachineRepresentation::kTagged),
+      var_rhs(assembler, MachineRepresentation::kTagged),
+      var_result(assembler, MachineRepresentation::kTagged);
+  Variable* loop_variables[] = {&var_lhs, &var_rhs};
+  Label loop(assembler, 2, loop_variables);
+  var_lhs.Bind(left);
+  var_rhs.Bind(right);
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    Node* lhs = var_lhs.value();
+    Node* rhs = var_rhs.value();
+
+    Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
+    assembler->Branch(assembler->TaggedIsSmi(lhs), &lhs_is_smi,
+                      &lhs_is_not_smi);
+
+    assembler->Bind(&lhs_is_smi);
+    {
+      Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
+      assembler->Branch(assembler->TaggedIsSmi(rhs), &rhs_is_smi,
+                        &rhs_is_not_smi);
+
+      assembler->Bind(&rhs_is_smi);
+      {
+        // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
+        // in case of overflow.
+        var_result.Bind(assembler->SmiMul(lhs, rhs));
+        assembler->Goto(&return_result);
+      }
+
+      assembler->Bind(&rhs_is_not_smi);
+      {
+        Node* rhs_map = assembler->LoadMap(rhs);
+
+        // Check if {rhs} is a HeapNumber.
+        Label rhs_is_number(assembler),
+            rhs_is_not_number(assembler, Label::kDeferred);
+        assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+                          &rhs_is_number, &rhs_is_not_number);
+
+        assembler->Bind(&rhs_is_number);
+        {
+          // Convert {lhs} to a double and multiply it with the value of {rhs}.
+          var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
+          var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
+          assembler->Goto(&do_fmul);
+        }
+
+        assembler->Bind(&rhs_is_not_number);
+        {
+          // Multiplication is commutative, swap {lhs} with {rhs} and loop.
+          var_lhs.Bind(rhs);
+          var_rhs.Bind(lhs);
+          assembler->Goto(&loop);
+        }
+      }
+    }
+
+    assembler->Bind(&lhs_is_not_smi);
+    {
+      Node* lhs_map = assembler->LoadMap(lhs);
+
+      // Check if {lhs} is a HeapNumber.
+      Label lhs_is_number(assembler),
+          lhs_is_not_number(assembler, Label::kDeferred);
+      assembler->Branch(assembler->WordEqual(lhs_map, number_map),
+                        &lhs_is_number, &lhs_is_not_number);
+
+      assembler->Bind(&lhs_is_number);
+      {
+        // Check if {rhs} is a Smi.
+        Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
+        assembler->Branch(assembler->TaggedIsSmi(rhs), &rhs_is_smi,
+                          &rhs_is_not_smi);
+
+        assembler->Bind(&rhs_is_smi);
+        {
+          // Convert {rhs} to a double and multiply it with the value of {lhs}.
+          var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
+          var_rhs_float64.Bind(assembler->SmiToFloat64(rhs));
+          assembler->Goto(&do_fmul);
+        }
+
+        assembler->Bind(&rhs_is_not_smi);
+        {
+          Node* rhs_map = assembler->LoadMap(rhs);
+
+          // Check if {rhs} is a HeapNumber.
+          Label rhs_is_number(assembler),
+              rhs_is_not_number(assembler, Label::kDeferred);
+          assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+                            &rhs_is_number, &rhs_is_not_number);
+
+          assembler->Bind(&rhs_is_number);
+          {
+            // Both {lhs} and {rhs} are HeapNumbers. Load their values and
+            // multiply them.
+            var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
+            var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
+            assembler->Goto(&do_fmul);
+          }
+
+          assembler->Bind(&rhs_is_not_number);
+          {
+            // Multiplication is commutative, swap {lhs} with {rhs} and loop.
+            var_lhs.Bind(rhs);
+            var_rhs.Bind(lhs);
+            assembler->Goto(&loop);
+          }
+        }
+      }
+
+      assembler->Bind(&lhs_is_not_number);
+      {
+        // Convert {lhs} to a Number and loop.
+        Callable callable =
+            CodeFactory::NonNumberToNumber(assembler->isolate());
+        var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+        assembler->Goto(&loop);
+      }
+    }
+  }
+
+  assembler->Bind(&do_fmul);
+  {
+    Node* value =
+        assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
+    Node* result = assembler->AllocateHeapNumberWithValue(value);
+    var_result.Bind(result);
+    assembler->Goto(&return_result);
+  }
+
+  assembler->Bind(&return_result);
+  assembler->Return(var_result.value());
+}
+
+void Builtins::Generate_Divide(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* left = assembler->Parameter(0);
+  Node* right = assembler->Parameter(1);
+  Node* context = assembler->Parameter(2);
+
+  // Shared entry point for floating point division.
+  Label do_fdiv(assembler), end(assembler);
+  Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
+      var_divisor_float64(assembler, MachineRepresentation::kFloat64);
+
+  Node* number_map = assembler->HeapNumberMapConstant();
+
+  // We might need to loop one or two times due to ToNumber conversions.
+  Variable var_dividend(assembler, MachineRepresentation::kTagged),
+      var_divisor(assembler, MachineRepresentation::kTagged),
+      var_result(assembler, MachineRepresentation::kTagged);
+  Variable* loop_variables[] = {&var_dividend, &var_divisor};
+  Label loop(assembler, 2, loop_variables);
+  var_dividend.Bind(left);
+  var_divisor.Bind(right);
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    Node* dividend = var_dividend.value();
+    Node* divisor = var_divisor.value();
+
+    Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
+    assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
+                      &dividend_is_not_smi);
+
+    assembler->Bind(&dividend_is_smi);
+    {
+      Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+      assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
+                        &divisor_is_not_smi);
+
+      assembler->Bind(&divisor_is_smi);
+      {
+        Label bailout(assembler);
+
+        // Do floating point division if {divisor} is zero.
+        assembler->GotoIf(
+            assembler->WordEqual(divisor, assembler->IntPtrConstant(0)),
+            &bailout);
+
+        // Do floating point division {dividend} is zero and {divisor} is
+        // negative.
+        Label dividend_is_zero(assembler), dividend_is_not_zero(assembler);
+        assembler->Branch(
+            assembler->WordEqual(dividend, assembler->IntPtrConstant(0)),
+            &dividend_is_zero, &dividend_is_not_zero);
+
+        assembler->Bind(&dividend_is_zero);
+        {
+          assembler->GotoIf(
+              assembler->IntPtrLessThan(divisor, assembler->IntPtrConstant(0)),
+              &bailout);
+          assembler->Goto(&dividend_is_not_zero);
+        }
+        assembler->Bind(&dividend_is_not_zero);
+
+        Node* untagged_divisor = assembler->SmiUntag(divisor);
+        Node* untagged_dividend = assembler->SmiUntag(dividend);
+
+        // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
+        // if the Smi size is 31) and {divisor} is -1.
+        Label divisor_is_minus_one(assembler),
+            divisor_is_not_minus_one(assembler);
+        assembler->Branch(assembler->Word32Equal(untagged_divisor,
+                                                 assembler->Int32Constant(-1)),
+                          &divisor_is_minus_one, &divisor_is_not_minus_one);
+
+        assembler->Bind(&divisor_is_minus_one);
+        {
+          assembler->GotoIf(
+              assembler->Word32Equal(
+                  untagged_dividend,
+                  assembler->Int32Constant(
+                      kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))),
+              &bailout);
+          assembler->Goto(&divisor_is_not_minus_one);
+        }
+        assembler->Bind(&divisor_is_not_minus_one);
+
+        // TODO(epertoso): consider adding a machine instruction that returns
+        // both the result and the remainder.
+        Node* untagged_result =
+            assembler->Int32Div(untagged_dividend, untagged_divisor);
+        Node* truncated =
+            assembler->Int32Mul(untagged_result, untagged_divisor);
+        // Do floating point division if the remainder is not 0.
+        assembler->GotoIf(
+            assembler->Word32NotEqual(untagged_dividend, truncated), &bailout);
+        var_result.Bind(assembler->SmiTag(untagged_result));
+        assembler->Goto(&end);
+
+        // Bailout: convert {dividend} and {divisor} to double and do double
+        // division.
+        assembler->Bind(&bailout);
+        {
+          var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+          var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+          assembler->Goto(&do_fdiv);
+        }
+      }
+
+      assembler->Bind(&divisor_is_not_smi);
+      {
+        Node* divisor_map = assembler->LoadMap(divisor);
+
+        // Check if {divisor} is a HeapNumber.
+        Label divisor_is_number(assembler),
+            divisor_is_not_number(assembler, Label::kDeferred);
+        assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+                          &divisor_is_number, &divisor_is_not_number);
+
+        assembler->Bind(&divisor_is_number);
+        {
+          // Convert {dividend} to a double and divide it with the value of
+          // {divisor}.
+          var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+          var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+          assembler->Goto(&do_fdiv);
+        }
+
+        assembler->Bind(&divisor_is_not_number);
+        {
+          // Convert {divisor} to a number and loop.
+          Callable callable =
+              CodeFactory::NonNumberToNumber(assembler->isolate());
+          var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+          assembler->Goto(&loop);
+        }
+      }
+    }
+
+    assembler->Bind(&dividend_is_not_smi);
+    {
+      Node* dividend_map = assembler->LoadMap(dividend);
+
+      // Check if {dividend} is a HeapNumber.
+      Label dividend_is_number(assembler),
+          dividend_is_not_number(assembler, Label::kDeferred);
+      assembler->Branch(assembler->WordEqual(dividend_map, number_map),
+                        &dividend_is_number, &dividend_is_not_number);
+
+      assembler->Bind(&dividend_is_number);
+      {
+        // Check if {divisor} is a Smi.
+        Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+        assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
+                          &divisor_is_not_smi);
+
+        assembler->Bind(&divisor_is_smi);
+        {
+          // Convert {divisor} to a double and use it for a floating point
+          // division.
+          var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+          var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+          assembler->Goto(&do_fdiv);
+        }
+
+        assembler->Bind(&divisor_is_not_smi);
+        {
+          Node* divisor_map = assembler->LoadMap(divisor);
+
+          // Check if {divisor} is a HeapNumber.
+          Label divisor_is_number(assembler),
+              divisor_is_not_number(assembler, Label::kDeferred);
+          assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+                            &divisor_is_number, &divisor_is_not_number);
+
+          assembler->Bind(&divisor_is_number);
+          {
+            // Both {dividend} and {divisor} are HeapNumbers. Load their values
+            // and divide them.
+            var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+            var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+            assembler->Goto(&do_fdiv);
+          }
+
+          assembler->Bind(&divisor_is_not_number);
+          {
+            // Convert {divisor} to a number and loop.
+            Callable callable =
+                CodeFactory::NonNumberToNumber(assembler->isolate());
+            var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+            assembler->Goto(&loop);
+          }
+        }
+      }
+
+      assembler->Bind(&dividend_is_not_number);
+      {
+        // Convert {dividend} to a Number and loop.
+        Callable callable =
+            CodeFactory::NonNumberToNumber(assembler->isolate());
+        var_dividend.Bind(assembler->CallStub(callable, context, dividend));
+        assembler->Goto(&loop);
+      }
+    }
+  }
+
+  assembler->Bind(&do_fdiv);
+  {
+    Node* value = assembler->Float64Div(var_dividend_float64.value(),
+                                        var_divisor_float64.value());
+    var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
+    assembler->Goto(&end);
+  }
+  assembler->Bind(&end);
+  assembler->Return(var_result.value());
+}
+
+void Builtins::Generate_Modulus(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* left = assembler->Parameter(0);
+  Node* right = assembler->Parameter(1);
+  Node* context = assembler->Parameter(2);
+
+  Variable var_result(assembler, MachineRepresentation::kTagged);
+  Label return_result(assembler, &var_result);
+
+  // Shared entry point for floating point modulus.
+  Label do_fmod(assembler);
+  Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
+      var_divisor_float64(assembler, MachineRepresentation::kFloat64);
+
+  Node* number_map = assembler->HeapNumberMapConstant();
+
+  // We might need to loop one or two times due to ToNumber conversions.
+  Variable var_dividend(assembler, MachineRepresentation::kTagged),
+      var_divisor(assembler, MachineRepresentation::kTagged);
+  Variable* loop_variables[] = {&var_dividend, &var_divisor};
+  Label loop(assembler, 2, loop_variables);
+  var_dividend.Bind(left);
+  var_divisor.Bind(right);
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    Node* dividend = var_dividend.value();
+    Node* divisor = var_divisor.value();
+
+    Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
+    assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
+                      &dividend_is_not_smi);
+
+    assembler->Bind(&dividend_is_smi);
+    {
+      Label dividend_is_not_zero(assembler);
+      Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+      assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
+                        &divisor_is_not_smi);
+
+      assembler->Bind(&divisor_is_smi);
+      {
+        // Compute the modulus of two Smis.
+        var_result.Bind(assembler->SmiMod(dividend, divisor));
+        assembler->Goto(&return_result);
+      }
+
+      assembler->Bind(&divisor_is_not_smi);
+      {
+        Node* divisor_map = assembler->LoadMap(divisor);
+
+        // Check if {divisor} is a HeapNumber.
+        Label divisor_is_number(assembler),
+            divisor_is_not_number(assembler, Label::kDeferred);
+        assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+                          &divisor_is_number, &divisor_is_not_number);
+
+        assembler->Bind(&divisor_is_number);
+        {
+          // Convert {dividend} to a double and compute its modulus with the
+          // value of {dividend}.
+          var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+          var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+          assembler->Goto(&do_fmod);
+        }
+
+        assembler->Bind(&divisor_is_not_number);
+        {
+          // Convert {divisor} to a number and loop.
+          Callable callable =
+              CodeFactory::NonNumberToNumber(assembler->isolate());
+          var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+          assembler->Goto(&loop);
+        }
+      }
+    }
+
+    assembler->Bind(&dividend_is_not_smi);
+    {
+      Node* dividend_map = assembler->LoadMap(dividend);
+
+      // Check if {dividend} is a HeapNumber.
+      Label dividend_is_number(assembler),
+          dividend_is_not_number(assembler, Label::kDeferred);
+      assembler->Branch(assembler->WordEqual(dividend_map, number_map),
+                        &dividend_is_number, &dividend_is_not_number);
+
+      assembler->Bind(&dividend_is_number);
+      {
+        // Check if {divisor} is a Smi.
+        Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+        assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
+                          &divisor_is_not_smi);
+
+        assembler->Bind(&divisor_is_smi);
+        {
+          // Convert {divisor} to a double and compute {dividend}'s modulus with
+          // it.
+          var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+          var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+          assembler->Goto(&do_fmod);
+        }
+
+        assembler->Bind(&divisor_is_not_smi);
+        {
+          Node* divisor_map = assembler->LoadMap(divisor);
+
+          // Check if {divisor} is a HeapNumber.
+          Label divisor_is_number(assembler),
+              divisor_is_not_number(assembler, Label::kDeferred);
+          assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+                            &divisor_is_number, &divisor_is_not_number);
+
+          assembler->Bind(&divisor_is_number);
+          {
+            // Both {dividend} and {divisor} are HeapNumbers. Load their values
+            // and compute their modulus.
+            var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+            var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+            assembler->Goto(&do_fmod);
+          }
+
+          assembler->Bind(&divisor_is_not_number);
+          {
+            // Convert {divisor} to a number and loop.
+            Callable callable =
+                CodeFactory::NonNumberToNumber(assembler->isolate());
+            var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+            assembler->Goto(&loop);
+          }
+        }
+      }
+
+      assembler->Bind(&dividend_is_not_number);
+      {
+        // Convert {dividend} to a Number and loop.
+        Callable callable =
+            CodeFactory::NonNumberToNumber(assembler->isolate());
+        var_dividend.Bind(assembler->CallStub(callable, context, dividend));
+        assembler->Goto(&loop);
+      }
+    }
+  }
+
+  assembler->Bind(&do_fmod);
+  {
+    Node* value = assembler->Float64Mod(var_dividend_float64.value(),
+                                        var_divisor_float64.value());
+    var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
+    assembler->Goto(&return_result);
+  }
+
+  assembler->Bind(&return_result);
+  assembler->Return(var_result.value());
+}
+
+void Builtins::Generate_ShiftLeft(CodeStubAssembler* assembler) {
+  compiler::Node* left = assembler->Parameter(0);
+  compiler::Node* right = assembler->Parameter(1);
+  compiler::Node* context = assembler->Parameter(2);
+
+  using compiler::Node;
+
+  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+  Node* shift_count =
+      assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
+  Node* value = assembler->Word32Shl(lhs_value, shift_count);
+  Node* result = assembler->ChangeInt32ToTagged(value);
+  assembler->Return(result);
+}
+
+void Builtins::Generate_ShiftRight(CodeStubAssembler* assembler) {
+  compiler::Node* left = assembler->Parameter(0);
+  compiler::Node* right = assembler->Parameter(1);
+  compiler::Node* context = assembler->Parameter(2);
+
+  using compiler::Node;
+
+  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+  Node* shift_count =
+      assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
+  Node* value = assembler->Word32Sar(lhs_value, shift_count);
+  Node* result = assembler->ChangeInt32ToTagged(value);
+  assembler->Return(result);
+}
+
+void Builtins::Generate_ShiftRightLogical(CodeStubAssembler* assembler) {
+  compiler::Node* left = assembler->Parameter(0);
+  compiler::Node* right = assembler->Parameter(1);
+  compiler::Node* context = assembler->Parameter(2);
+
+  using compiler::Node;
+
+  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+  Node* shift_count =
+      assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
+  Node* value = assembler->Word32Shr(lhs_value, shift_count);
+  Node* result = assembler->ChangeUint32ToTagged(value);
+  assembler->Return(result);
+}
+
+void Builtins::Generate_BitwiseAnd(CodeStubAssembler* assembler) {
+  compiler::Node* left = assembler->Parameter(0);
+  compiler::Node* right = assembler->Parameter(1);
+  compiler::Node* context = assembler->Parameter(2);
+
+  using compiler::Node;
+
+  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+  Node* value = assembler->Word32And(lhs_value, rhs_value);
+  Node* result = assembler->ChangeInt32ToTagged(value);
+  assembler->Return(result);
+}
+
+void Builtins::Generate_BitwiseOr(CodeStubAssembler* assembler) {
+  compiler::Node* left = assembler->Parameter(0);
+  compiler::Node* right = assembler->Parameter(1);
+  compiler::Node* context = assembler->Parameter(2);
+
+  using compiler::Node;
+
+  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+  Node* value = assembler->Word32Or(lhs_value, rhs_value);
+  Node* result = assembler->ChangeInt32ToTagged(value);
+  assembler->Return(result);
+}
+
+void Builtins::Generate_BitwiseXor(CodeStubAssembler* assembler) {
+  compiler::Node* left = assembler->Parameter(0);
+  compiler::Node* right = assembler->Parameter(1);
+  compiler::Node* context = assembler->Parameter(2);
+
+  using compiler::Node;
+
+  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+  Node* value = assembler->Word32Xor(lhs_value, rhs_value);
+  Node* result = assembler->ChangeInt32ToTagged(value);
+  assembler->Return(result);
+}
+
+void Builtins::Generate_LessThan(CodeStubAssembler* assembler) {
+  compiler::Node* lhs = assembler->Parameter(0);
+  compiler::Node* rhs = assembler->Parameter(1);
+  compiler::Node* context = assembler->Parameter(2);
+
+  assembler->Return(assembler->RelationalComparison(
+      CodeStubAssembler::kLessThan, lhs, rhs, context));
+}
+
+void Builtins::Generate_LessThanOrEqual(CodeStubAssembler* assembler) {
+  compiler::Node* lhs = assembler->Parameter(0);
+  compiler::Node* rhs = assembler->Parameter(1);
+  compiler::Node* context = assembler->Parameter(2);
+
+  assembler->Return(assembler->RelationalComparison(
+      CodeStubAssembler::kLessThanOrEqual, lhs, rhs, context));
+}
+
+void Builtins::Generate_GreaterThan(CodeStubAssembler* assembler) {
+  compiler::Node* lhs = assembler->Parameter(0);
+  compiler::Node* rhs = assembler->Parameter(1);
+  compiler::Node* context = assembler->Parameter(2);
+
+  assembler->Return(assembler->RelationalComparison(
+      CodeStubAssembler::kGreaterThan, lhs, rhs, context));
+}
+
+void Builtins::Generate_GreaterThanOrEqual(CodeStubAssembler* assembler) {
+  compiler::Node* lhs = assembler->Parameter(0);
+  compiler::Node* rhs = assembler->Parameter(1);
+  compiler::Node* context = assembler->Parameter(2);
+
+  assembler->Return(assembler->RelationalComparison(
+      CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context));
+}
+
+void Builtins::Generate_Equal(CodeStubAssembler* assembler) {
+  compiler::Node* lhs = assembler->Parameter(0);
+  compiler::Node* rhs = assembler->Parameter(1);
+  compiler::Node* context = assembler->Parameter(2);
+
+  assembler->Return(assembler->Equal(CodeStubAssembler::kDontNegateResult, lhs,
+                                     rhs, context));
+}
+
+void Builtins::Generate_NotEqual(CodeStubAssembler* assembler) {
+  compiler::Node* lhs = assembler->Parameter(0);
+  compiler::Node* rhs = assembler->Parameter(1);
+  compiler::Node* context = assembler->Parameter(2);
+
+  assembler->Return(
+      assembler->Equal(CodeStubAssembler::kNegateResult, lhs, rhs, context));
+}
+
+void Builtins::Generate_StrictEqual(CodeStubAssembler* assembler) {
+  compiler::Node* lhs = assembler->Parameter(0);
+  compiler::Node* rhs = assembler->Parameter(1);
+  compiler::Node* context = assembler->Parameter(2);
+
+  assembler->Return(assembler->StrictEqual(CodeStubAssembler::kDontNegateResult,
+                                           lhs, rhs, context));
+}
+
+void Builtins::Generate_StrictNotEqual(CodeStubAssembler* assembler) {
+  compiler::Node* lhs = assembler->Parameter(0);
+  compiler::Node* rhs = assembler->Parameter(1);
+  compiler::Node* context = assembler->Parameter(2);
+
+  assembler->Return(assembler->StrictEqual(CodeStubAssembler::kNegateResult,
+                                           lhs, rhs, context));
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/builtins/builtins-object.cc b/src/builtins/builtins-object.cc
index 78df2d6..abb5c47 100644
--- a/src/builtins/builtins-object.cc
+++ b/src/builtins/builtins-object.cc
@@ -28,7 +28,7 @@
 
   // Smi receivers do not have own properties.
   Label if_objectisnotsmi(assembler);
-  assembler->Branch(assembler->WordIsSmi(object), &return_false,
+  assembler->Branch(assembler->TaggedIsSmi(object), &return_false,
                     &if_objectisnotsmi);
   assembler->Bind(&if_objectisnotsmi);
 
@@ -46,6 +46,10 @@
                                &return_false, &call_runtime);
 
   assembler->Bind(&keyisindex);
+  // Handle negative keys in the runtime.
+  assembler->GotoIf(assembler->IntPtrLessThan(var_index.value(),
+                                              assembler->IntPtrConstant(0)),
+                    &call_runtime);
   assembler->TryLookupElement(object, map, instance_type, var_index.value(),
                               &return_true, &return_false, &call_runtime);
 
@@ -224,7 +228,7 @@
   typedef CodeStubAssembler::Label Label;
 
   Label if_notsmi(assembler);
-  assembler->Branch(assembler->WordIsSmi(object), if_notstring, &if_notsmi);
+  assembler->Branch(assembler->TaggedIsSmi(object), if_notstring, &if_notsmi);
 
   assembler->Bind(&if_notsmi);
   {
@@ -296,13 +300,13 @@
   Node* context = assembler->Parameter(3);
 
   assembler->GotoIf(
-      assembler->Word32Equal(receiver, assembler->UndefinedConstant()),
+      assembler->WordEqual(receiver, assembler->UndefinedConstant()),
       &return_undefined);
 
-  assembler->GotoIf(assembler->Word32Equal(receiver, assembler->NullConstant()),
+  assembler->GotoIf(assembler->WordEqual(receiver, assembler->NullConstant()),
                     &return_null);
 
-  assembler->GotoIf(assembler->WordIsSmi(receiver), &return_number);
+  assembler->GotoIf(assembler->TaggedIsSmi(receiver), &return_number);
 
   Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
   ReturnIfPrimitive(assembler, receiver_instance_type, &return_string,
@@ -427,7 +431,7 @@
     assembler->Bind(&return_jsvalue);
     {
       Node* value = assembler->LoadJSValueValue(receiver);
-      assembler->GotoIf(assembler->WordIsSmi(value), &return_number);
+      assembler->GotoIf(assembler->TaggedIsSmi(value), &return_number);
 
       ReturnIfPrimitive(assembler, assembler->LoadInstanceType(value),
                         &return_string, &return_boolean, &return_number);
@@ -443,13 +447,8 @@
       Node* map = assembler->LoadMap(receiver);
 
       // Return object if the proxy {receiver} is not callable.
-      assembler->Branch(
-          assembler->Word32Equal(
-              assembler->Word32And(
-                  assembler->LoadMapBitField(map),
-                  assembler->Int32Constant(1 << Map::kIsCallable)),
-              assembler->Int32Constant(0)),
-          &return_object, &return_function);
+      assembler->Branch(assembler->IsCallableMap(map), &return_function,
+                        &return_object);
     }
 
     // Default
@@ -459,57 +458,95 @@
   }
 }
 
-// ES6 section 19.1.2.2 Object.create ( O [ , Properties ] )
-// TODO(verwaest): Support the common cases with precached map directly in
-// an Object.create stub.
-BUILTIN(ObjectCreate) {
-  HandleScope scope(isolate);
-  Handle<Object> prototype = args.atOrUndefined(isolate, 1);
-  if (!prototype->IsNull(isolate) && !prototype->IsJSReceiver()) {
-    THROW_NEW_ERROR_RETURN_FAILURE(
-        isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, prototype));
+void Builtins::Generate_ObjectCreate(CodeStubAssembler* a) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* prototype = a->Parameter(1);
+  Node* properties = a->Parameter(2);
+  Node* context = a->Parameter(3 + 2);
+
+  Label call_runtime(a, Label::kDeferred), prototype_valid(a), no_properties(a);
+  {
+    a->Comment("Argument 1 check: prototype");
+    a->GotoIf(a->WordEqual(prototype, a->NullConstant()), &prototype_valid);
+    a->BranchIfJSReceiver(prototype, &prototype_valid, &call_runtime);
   }
 
-  // Generate the map with the specified {prototype} based on the Object
-  // function's initial map from the current native context.
-  // TODO(bmeurer): Use a dedicated cache for Object.create; think about
-  // slack tracking for Object.create.
-  Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
-                  isolate);
-  if (map->prototype() != *prototype) {
-    if (prototype->IsNull(isolate)) {
-      map = isolate->object_with_null_prototype_map();
-    } else if (prototype->IsJSObject()) {
-      Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
-      if (!js_prototype->map()->is_prototype_map()) {
-        JSObject::OptimizeAsPrototype(js_prototype, FAST_PROTOTYPE);
-      }
-      Handle<PrototypeInfo> info =
-          Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
-      // TODO(verwaest): Use inobject slack tracking for this map.
-      if (info->HasObjectCreateMap()) {
-        map = handle(info->ObjectCreateMap(), isolate);
-      } else {
-        map = Map::CopyInitialMap(map);
-        Map::SetPrototype(map, prototype, FAST_PROTOTYPE);
-        PrototypeInfo::SetObjectCreateMap(info, map);
-      }
-    } else {
-      map = Map::TransitionToPrototype(map, prototype, REGULAR_PROTOTYPE);
+  a->Bind(&prototype_valid);
+  {
+    a->Comment("Argument 2 check: properties");
+    // Check that we have a simple object
+    a->GotoIf(a->TaggedIsSmi(properties), &call_runtime);
+    // Undefined implies no properties.
+    a->GotoIf(a->WordEqual(properties, a->UndefinedConstant()), &no_properties);
+    Node* properties_map = a->LoadMap(properties);
+    a->GotoIf(a->IsSpecialReceiverMap(properties_map), &call_runtime);
+    // Stay on the fast path only if there are no elements.
+    a->GotoUnless(a->WordEqual(a->LoadElements(properties),
+                               a->LoadRoot(Heap::kEmptyFixedArrayRootIndex)),
+                  &call_runtime);
+    // Handle dictionary objects or fast objects with properties in runtime.
+    Node* bit_field3 = a->LoadMapBitField3(properties_map);
+    a->GotoIf(a->IsSetWord32<Map::DictionaryMap>(bit_field3), &call_runtime);
+    a->Branch(a->IsSetWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3),
+              &call_runtime, &no_properties);
+  }
+
+  // Create a new object with the given prototype.
+  a->Bind(&no_properties);
+  {
+    Variable map(a, MachineRepresentation::kTagged);
+    Variable properties(a, MachineRepresentation::kTagged);
+    Label non_null_proto(a), instantiate_map(a), good(a);
+
+    a->Branch(a->WordEqual(prototype, a->NullConstant()), &good,
+              &non_null_proto);
+
+    a->Bind(&good);
+    {
+      map.Bind(a->LoadContextElement(
+          context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP));
+      properties.Bind(
+          a->AllocateNameDictionary(NameDictionary::kInitialCapacity));
+      a->Goto(&instantiate_map);
+    }
+
+    a->Bind(&non_null_proto);
+    {
+      properties.Bind(a->EmptyFixedArrayConstant());
+      Node* object_function =
+          a->LoadContextElement(context, Context::OBJECT_FUNCTION_INDEX);
+      Node* object_function_map = a->LoadObjectField(
+          object_function, JSFunction::kPrototypeOrInitialMapOffset);
+      map.Bind(object_function_map);
+      a->GotoIf(a->WordEqual(prototype, a->LoadMapPrototype(map.value())),
+                &instantiate_map);
+      // Try loading the prototype info.
+      Node* prototype_info =
+          a->LoadMapPrototypeInfo(a->LoadMap(prototype), &call_runtime);
+      a->Comment("Load ObjectCreateMap from PrototypeInfo");
+      Node* weak_cell =
+          a->LoadObjectField(prototype_info, PrototypeInfo::kObjectCreateMap);
+      a->GotoIf(a->WordEqual(weak_cell, a->UndefinedConstant()), &call_runtime);
+      map.Bind(a->LoadWeakCellValue(weak_cell, &call_runtime));
+      a->Goto(&instantiate_map);
+    }
+
+    a->Bind(&instantiate_map);
+    {
+      Node* instance =
+          a->AllocateJSObjectFromMap(map.value(), properties.value());
+      a->Return(instance);
     }
   }
 
-  // Actually allocate the object.
-  Handle<JSObject> object = isolate->factory()->NewJSObjectFromMap(map);
-
-  // Define the properties if properties was specified and is not undefined.
-  Handle<Object> properties = args.atOrUndefined(isolate, 2);
-  if (!properties->IsUndefined(isolate)) {
-    RETURN_FAILURE_ON_EXCEPTION(
-        isolate, JSReceiver::DefineProperties(isolate, object, properties));
+  a->Bind(&call_runtime);
+  {
+    a->Return(
+        a->CallRuntime(Runtime::kObjectCreate, context, prototype, properties));
   }
-
-  return *object;
 }
 
 // ES6 section 19.1.2.3 Object.defineProperties
@@ -688,6 +725,85 @@
                            JSReceiver::GetPrototype(isolate, receiver));
 }
 
+// ES6 section 19.1.2.21 Object.setPrototypeOf ( O, proto )
+BUILTIN(ObjectSetPrototypeOf) {
+  HandleScope scope(isolate);
+
+  // 1. Let O be ? RequireObjectCoercible(O).
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  if (object->IsNull(isolate) || object->IsUndefined(isolate)) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Object.setPrototypeOf")));
+  }
+
+  // 2. If Type(proto) is neither Object nor Null, throw a TypeError exception.
+  Handle<Object> proto = args.atOrUndefined(isolate, 2);
+  if (!proto->IsNull(isolate) && !proto->IsJSReceiver()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, proto));
+  }
+
+  // 3. If Type(O) is not Object, return O.
+  if (!object->IsJSReceiver()) return *object;
+  Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
+
+  // 4. Let status be ? O.[[SetPrototypeOf]](proto).
+  // 5. If status is false, throw a TypeError exception.
+  MAYBE_RETURN(
+      JSReceiver::SetPrototype(receiver, proto, true, Object::THROW_ON_ERROR),
+      isolate->heap()->exception());
+
+  // 6. Return O.
+  return *receiver;
+}
+
+// ES6 section B.2.2.1.1 get Object.prototype.__proto__
+BUILTIN(ObjectPrototypeGetProto) {
+  HandleScope scope(isolate);
+  // 1. Let O be ? ToObject(this value).
+  Handle<JSReceiver> receiver;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, receiver, Object::ToObject(isolate, args.receiver()));
+
+  // 2. Return ? O.[[GetPrototypeOf]]().
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           JSReceiver::GetPrototype(isolate, receiver));
+}
+
+// ES6 section B.2.2.1.2 set Object.prototype.__proto__
+BUILTIN(ObjectPrototypeSetProto) {
+  HandleScope scope(isolate);
+  // 1. Let O be ? RequireObjectCoercible(this value).
+  Handle<Object> object = args.receiver();
+  if (object->IsNull(isolate) || object->IsUndefined(isolate)) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "set Object.prototype.__proto__")));
+  }
+
+  // 2. If Type(proto) is neither Object nor Null, return undefined.
+  Handle<Object> proto = args.at<Object>(1);
+  if (!proto->IsNull(isolate) && !proto->IsJSReceiver()) {
+    return isolate->heap()->undefined_value();
+  }
+
+  // 3. If Type(O) is not Object, return undefined.
+  if (!object->IsJSReceiver()) return isolate->heap()->undefined_value();
+  Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
+
+  // 4. Let status be ? O.[[SetPrototypeOf]](proto).
+  // 5. If status is false, throw a TypeError exception.
+  MAYBE_RETURN(
+      JSReceiver::SetPrototype(receiver, proto, true, Object::THROW_ON_ERROR),
+      isolate->heap()->exception());
+
+  // Return undefined.
+  return isolate->heap()->undefined_value();
+}
+
 // ES6 section 19.1.2.6 Object.getOwnPropertyDescriptor ( O, P )
 BUILTIN(ObjectGetOwnPropertyDescriptor) {
   HandleScope scope(isolate);
@@ -906,6 +1022,39 @@
   return *object;
 }
 
+void Builtins::Generate_HasProperty(CodeStubAssembler* assembler) {
+  typedef HasPropertyDescriptor Descriptor;
+  typedef compiler::Node Node;
+
+  Node* key = assembler->Parameter(Descriptor::kKey);
+  Node* object = assembler->Parameter(Descriptor::kObject);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  assembler->Return(
+      assembler->HasProperty(object, key, context, Runtime::kHasProperty));
+}
+
+void Builtins::Generate_ForInFilter(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef ForInFilterDescriptor Descriptor;
+
+  Node* key = assembler->Parameter(Descriptor::kKey);
+  Node* object = assembler->Parameter(Descriptor::kObject);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  assembler->Return(assembler->ForInFilter(key, object, context));
+}
+
+void Builtins::Generate_InstanceOf(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef CompareDescriptor Descriptor;
+  Node* object = assembler->Parameter(Descriptor::kLeft);
+  Node* callable = assembler->Parameter(Descriptor::kRight);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  assembler->Return(assembler->InstanceOf(object, callable, context));
+}
+
 // ES6 section 7.3.19 OrdinaryHasInstance ( C, O )
 void Builtins::Generate_OrdinaryHasInstance(CodeStubAssembler* assembler) {
   typedef compiler::Node Node;
diff --git a/src/builtins/builtins-promise.cc b/src/builtins/builtins-promise.cc
new file mode 100644
index 0000000..9f5d7c8
--- /dev/null
+++ b/src/builtins/builtins-promise.cc
@@ -0,0 +1,84 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+
+#include "src/promise-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// ES#sec-promise-resolve-functions
+// Promise Resolve Functions
+BUILTIN(PromiseResolveClosure) {
+  HandleScope scope(isolate);
+
+  Handle<Context> context(isolate->context(), isolate);
+
+  if (PromiseUtils::HasAlreadyVisited(context)) {
+    return isolate->heap()->undefined_value();
+  }
+
+  PromiseUtils::SetAlreadyVisited(context);
+  Handle<JSObject> promise = handle(PromiseUtils::GetPromise(context), isolate);
+  Handle<Object> value = args.atOrUndefined(isolate, 1);
+
+  MaybeHandle<Object> maybe_result;
+  Handle<Object> argv[] = {promise, value};
+  RETURN_FAILURE_ON_EXCEPTION(
+      isolate, Execution::Call(isolate, isolate->promise_resolve(),
+                               isolate->factory()->undefined_value(),
+                               arraysize(argv), argv));
+  return isolate->heap()->undefined_value();
+}
+
+// ES#sec-promise-reject-functions
+// Promise Reject Functions
+BUILTIN(PromiseRejectClosure) {
+  HandleScope scope(isolate);
+
+  Handle<Context> context(isolate->context(), isolate);
+
+  if (PromiseUtils::HasAlreadyVisited(context)) {
+    return isolate->heap()->undefined_value();
+  }
+
+  PromiseUtils::SetAlreadyVisited(context);
+  Handle<Object> value = args.atOrUndefined(isolate, 1);
+  Handle<JSObject> promise = handle(PromiseUtils::GetPromise(context), isolate);
+  Handle<Object> debug_event =
+      handle(PromiseUtils::GetDebugEvent(context), isolate);
+  MaybeHandle<Object> maybe_result;
+  Handle<Object> argv[] = {promise, value, debug_event};
+  RETURN_FAILURE_ON_EXCEPTION(
+      isolate, Execution::Call(isolate, isolate->promise_internal_reject(),
+                               isolate->factory()->undefined_value(),
+                               arraysize(argv), argv));
+  return isolate->heap()->undefined_value();
+}
+
+// ES#sec-createresolvingfunctions
+// CreateResolvingFunctions ( promise )
+BUILTIN(CreateResolvingFunctions) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(3, args.length());
+
+  Handle<JSObject> promise = args.at<JSObject>(1);
+  Handle<Object> debug_event = args.at<Object>(2);
+  Handle<JSFunction> resolve, reject;
+
+  PromiseUtils::CreateResolvingFunctions(isolate, promise, debug_event,
+                                         &resolve, &reject);
+
+  Handle<FixedArray> result = isolate->factory()->NewFixedArray(2);
+  result->set(0, *resolve);
+  result->set(1, *reject);
+
+  return *isolate->factory()->NewJSArrayWithElements(result, FAST_ELEMENTS, 2,
+                                                     NOT_TENURED);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-regexp.cc b/src/builtins/builtins-regexp.cc
index 371221f..5f8d18b 100644
--- a/src/builtins/builtins-regexp.cc
+++ b/src/builtins/builtins-regexp.cc
@@ -7,6 +7,8 @@
 
 #include "src/code-factory.h"
 #include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-utils.h"
+#include "src/string-builder.h"
 
 namespace v8 {
 namespace internal {
@@ -16,27 +18,6 @@
 
 namespace {
 
-// ES#sec-isregexp IsRegExp ( argument )
-Maybe<bool> IsRegExp(Isolate* isolate, Handle<Object> object) {
-  if (!object->IsJSReceiver()) return Just(false);
-
-  Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
-
-  if (isolate->regexp_function()->initial_map() == receiver->map()) {
-    // Fast-path for unmodified JSRegExp instances.
-    return Just(true);
-  }
-
-  Handle<Object> match;
-  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate, match,
-      JSObject::GetProperty(receiver, isolate->factory()->match_symbol()),
-      Nothing<bool>());
-
-  if (!match->IsUndefined(isolate)) return Just(match->BooleanValue());
-  return Just(object->IsJSRegExp());
-}
-
 Handle<String> PatternFlags(Isolate* isolate, Handle<JSRegExp> regexp) {
   static const int kMaxFlagsLength = 5 + 1;  // 5 flags and '\0';
   char flags_string[kMaxFlagsLength];
@@ -58,10 +39,10 @@
 
 // ES#sec-regexpinitialize
 // Runtime Semantics: RegExpInitialize ( obj, pattern, flags )
-MaybeHandle<JSRegExp> RegExpInitialize(Isolate* isolate,
-                                       Handle<JSRegExp> regexp,
-                                       Handle<Object> pattern,
-                                       Handle<Object> flags) {
+MUST_USE_RESULT MaybeHandle<JSRegExp> RegExpInitialize(Isolate* isolate,
+                                                       Handle<JSRegExp> regexp,
+                                                       Handle<Object> pattern,
+                                                       Handle<Object> flags) {
   Handle<String> pattern_string;
   if (pattern->IsUndefined(isolate)) {
     pattern_string = isolate->factory()->empty_string();
@@ -79,9 +60,7 @@
   }
 
   // TODO(jgruber): We could avoid the flags back and forth conversions.
-  RETURN_RESULT(isolate,
-                JSRegExp::Initialize(regexp, pattern_string, flags_string),
-                JSRegExp);
+  return JSRegExp::Initialize(regexp, pattern_string, flags_string);
 }
 
 }  // namespace
@@ -99,7 +78,8 @@
 
   bool pattern_is_regexp;
   {
-    Maybe<bool> maybe_pattern_is_regexp = IsRegExp(isolate, pattern);
+    Maybe<bool> maybe_pattern_is_regexp =
+        RegExpUtils::IsRegExp(isolate, pattern);
     if (maybe_pattern_is_regexp.IsNothing()) {
       DCHECK(isolate->has_pending_exception());
       return isolate->heap()->exception();
@@ -158,36 +138,77 @@
                            RegExpInitialize(isolate, regexp, pattern, flags));
 }
 
+BUILTIN(RegExpPrototypeCompile) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSRegExp, regexp, "RegExp.prototype.compile");
+
+  Handle<Object> pattern = args.atOrUndefined(isolate, 1);
+  Handle<Object> flags = args.atOrUndefined(isolate, 2);
+
+  if (pattern->IsJSRegExp()) {
+    Handle<JSRegExp> pattern_regexp = Handle<JSRegExp>::cast(pattern);
+
+    if (!flags->IsUndefined(isolate)) {
+      THROW_NEW_ERROR_RETURN_FAILURE(
+          isolate, NewTypeError(MessageTemplate::kRegExpFlags));
+    }
+
+    flags = PatternFlags(isolate, pattern_regexp);
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, pattern,
+        Object::GetProperty(pattern, isolate->factory()->source_string()));
+  }
+
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, regexp, RegExpInitialize(isolate, regexp, pattern, flags));
+
+  // Return undefined for compatibility with JSC.
+  // See http://crbug.com/585775 for web compat details.
+
+  return isolate->heap()->undefined_value();
+}
+
 namespace {
 
+compiler::Node* FastLoadLastIndex(CodeStubAssembler* a, compiler::Node* context,
+                                  compiler::Node* regexp) {
+  // Load the in-object field.
+  static const int field_offset =
+      JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
+  return a->LoadObjectField(regexp, field_offset);
+}
+
+compiler::Node* SlowLoadLastIndex(CodeStubAssembler* a, compiler::Node* context,
+                                  compiler::Node* regexp) {
+  // Load through the GetProperty stub.
+  typedef compiler::Node Node;
+
+  Node* const name =
+      a->HeapConstant(a->isolate()->factory()->lastIndex_string());
+  Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
+  return a->CallStub(getproperty_callable, context, regexp, name);
+}
+
 compiler::Node* LoadLastIndex(CodeStubAssembler* a, compiler::Node* context,
                               compiler::Node* has_initialmap,
                               compiler::Node* regexp) {
   typedef CodeStubAssembler::Variable Variable;
   typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
 
   Variable var_value(a, MachineRepresentation::kTagged);
 
-  Label out(a), if_unmodified(a), if_modified(a, Label::kDeferred);
+  Label out(a), if_unmodified(a), if_modified(a);
   a->Branch(has_initialmap, &if_unmodified, &if_modified);
 
   a->Bind(&if_unmodified);
   {
-    // Load the in-object field.
-    static const int field_offset =
-        JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
-    var_value.Bind(a->LoadObjectField(regexp, field_offset));
+    var_value.Bind(FastLoadLastIndex(a, context, regexp));
     a->Goto(&out);
   }
 
   a->Bind(&if_modified);
   {
-    // Load through the GetProperty stub.
-    Node* const name =
-        a->HeapConstant(a->isolate()->factory()->last_index_string());
-    Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
-    var_value.Bind(a->CallStub(getproperty_callable, context, regexp, name));
+    var_value.Bind(SlowLoadLastIndex(a, context, regexp));
     a->Goto(&out);
   }
 
@@ -195,33 +216,46 @@
   return var_value.value();
 }
 
+// The fast-path of StoreLastIndex when regexp is guaranteed to be an unmodified
+// JSRegExp instance.
+void FastStoreLastIndex(CodeStubAssembler* a, compiler::Node* context,
+                        compiler::Node* regexp, compiler::Node* value) {
+  // Store the in-object field.
+  static const int field_offset =
+      JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
+  a->StoreObjectField(regexp, field_offset, value);
+}
+
+void SlowStoreLastIndex(CodeStubAssembler* a, compiler::Node* context,
+                        compiler::Node* regexp, compiler::Node* value) {
+  // Store through runtime.
+  // TODO(ishell): Use SetPropertyStub here once available.
+  typedef compiler::Node Node;
+
+  Node* const name =
+      a->HeapConstant(a->isolate()->factory()->lastIndex_string());
+  Node* const language_mode = a->SmiConstant(Smi::FromInt(STRICT));
+  a->CallRuntime(Runtime::kSetProperty, context, regexp, name, value,
+                 language_mode);
+}
+
 void StoreLastIndex(CodeStubAssembler* a, compiler::Node* context,
                     compiler::Node* has_initialmap, compiler::Node* regexp,
                     compiler::Node* value) {
   typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
 
-  Label out(a), if_unmodified(a), if_modified(a, Label::kDeferred);
+  Label out(a), if_unmodified(a), if_modified(a);
   a->Branch(has_initialmap, &if_unmodified, &if_modified);
 
   a->Bind(&if_unmodified);
   {
-    // Store the in-object field.
-    static const int field_offset =
-        JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
-    a->StoreObjectField(regexp, field_offset, value);
+    FastStoreLastIndex(a, context, regexp, value);
     a->Goto(&out);
   }
 
   a->Bind(&if_modified);
   {
-    // Store through runtime.
-    // TODO(ishell): Use SetPropertyStub here once available.
-    Node* const name =
-        a->HeapConstant(a->isolate()->factory()->last_index_string());
-    Node* const language_mode = a->SmiConstant(Smi::FromInt(STRICT));
-    a->CallRuntime(Runtime::kSetProperty, context, regexp, name, value,
-                   language_mode);
+    SlowStoreLastIndex(a, context, regexp, value);
     a->Goto(&out);
   }
 
@@ -231,7 +265,7 @@
 compiler::Node* ConstructNewResultFromMatchInfo(Isolate* isolate,
                                                 CodeStubAssembler* a,
                                                 compiler::Node* context,
-                                                compiler::Node* match_elements,
+                                                compiler::Node* match_info,
                                                 compiler::Node* string) {
   typedef CodeStubAssembler::Variable Variable;
   typedef CodeStubAssembler::Label Label;
@@ -241,13 +275,14 @@
 
   CodeStubAssembler::ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
   Node* const num_indices = a->SmiUntag(a->LoadFixedArrayElement(
-      match_elements, a->IntPtrConstant(RegExpImpl::kLastCaptureCount), 0,
+      match_info, a->IntPtrConstant(RegExpMatchInfo::kNumberOfCapturesIndex), 0,
       mode));
   Node* const num_results = a->SmiTag(a->WordShr(num_indices, 1));
   Node* const start = a->LoadFixedArrayElement(
-      match_elements, a->IntPtrConstant(RegExpImpl::kFirstCapture), 0, mode);
+      match_info, a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), 0,
+      mode);
   Node* const end = a->LoadFixedArrayElement(
-      match_elements, a->IntPtrConstant(RegExpImpl::kFirstCapture + 1), 0,
+      match_info, a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 1), 0,
       mode);
 
   // Calculate the substring of the first match before creating the result array
@@ -264,13 +299,14 @@
   a->GotoIf(a->SmiEqual(num_results, a->SmiConstant(Smi::FromInt(1))), &out);
 
   // Store all remaining captures.
-  Node* const limit =
-      a->IntPtrAdd(a->IntPtrConstant(RegExpImpl::kFirstCapture), num_indices);
+  Node* const limit = a->IntPtrAdd(
+      a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), num_indices);
 
   Variable var_from_cursor(a, MachineType::PointerRepresentation());
   Variable var_to_cursor(a, MachineType::PointerRepresentation());
 
-  var_from_cursor.Bind(a->IntPtrConstant(RegExpImpl::kFirstCapture + 2));
+  var_from_cursor.Bind(
+      a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 2));
   var_to_cursor.Bind(a->IntPtrConstant(1));
 
   Variable* vars[] = {&var_from_cursor, &var_to_cursor};
@@ -281,15 +317,14 @@
   {
     Node* const from_cursor = var_from_cursor.value();
     Node* const to_cursor = var_to_cursor.value();
-    Node* const start = a->LoadFixedArrayElement(match_elements, from_cursor);
+    Node* const start = a->LoadFixedArrayElement(match_info, from_cursor);
 
     Label next_iter(a);
     a->GotoIf(a->SmiEqual(start, a->SmiConstant(Smi::FromInt(-1))), &next_iter);
 
     Node* const from_cursor_plus1 =
         a->IntPtrAdd(from_cursor, a->IntPtrConstant(1));
-    Node* const end =
-        a->LoadFixedArrayElement(match_elements, from_cursor_plus1);
+    Node* const end = a->LoadFixedArrayElement(match_info, from_cursor_plus1);
 
     Node* const capture = a->SubString(context, string, start, end);
     a->StoreFixedArrayElement(result_elements, to_cursor, capture);
@@ -305,29 +340,29 @@
   return result;
 }
 
-}  // namespace
-
 // ES#sec-regexp.prototype.exec
 // RegExp.prototype.exec ( string )
-void Builtins::Generate_RegExpPrototypeExec(CodeStubAssembler* a) {
+compiler::Node* RegExpPrototypeExecInternal(CodeStubAssembler* a,
+                                            compiler::Node* context,
+                                            compiler::Node* maybe_receiver,
+                                            compiler::Node* maybe_string) {
   typedef CodeStubAssembler::Variable Variable;
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
 
   Isolate* const isolate = a->isolate();
 
-  Node* const receiver = a->Parameter(0);
-  Node* const maybe_string = a->Parameter(1);
-  Node* const context = a->Parameter(4);
-
   Node* const null = a->NullConstant();
   Node* const int_zero = a->IntPtrConstant(0);
-  Node* const smi_zero = a->SmiConstant(Smi::FromInt(0));
+  Node* const smi_zero = a->SmiConstant(Smi::kZero);
 
-  // Ensure {receiver} is a JSRegExp.
+  Variable var_result(a, MachineRepresentation::kTagged);
+  Label out(a);
+
+  // Ensure {maybe_receiver} is a JSRegExp.
   Node* const regexp_map = a->ThrowIfNotInstanceType(
-      context, receiver, JS_REGEXP_TYPE, "RegExp.prototype.exec");
-  Node* const regexp = receiver;
+      context, maybe_receiver, JS_REGEXP_TYPE, "RegExp.prototype.exec");
+  Node* const regexp = maybe_receiver;
 
   // Check whether the regexp instance is unmodified.
   Node* const native_context = a->LoadNativeContext(context);
@@ -369,14 +404,15 @@
       var_lastindex.Bind(lastindex);
 
       Label if_isoob(a, Label::kDeferred);
-      a->GotoUnless(a->WordIsSmi(lastindex), &if_isoob);
+      a->GotoUnless(a->TaggedIsSmi(lastindex), &if_isoob);
       a->GotoUnless(a->SmiLessThanOrEqual(lastindex, string_length), &if_isoob);
       a->Goto(&run_exec);
 
       a->Bind(&if_isoob);
       {
         StoreLastIndex(a, context, has_initialmap, regexp, smi_zero);
-        a->Return(null);
+        var_result.Bind(null);
+        a->Goto(&out);
       }
     }
 
@@ -400,7 +436,7 @@
     match_indices = a->CallStub(exec_callable, context, regexp, string,
                                 var_lastindex.value(), last_match_info);
 
-    // {match_indices} is either null or the RegExpLastMatchInfo array.
+    // {match_indices} is either null or the RegExpMatchInfo array.
     // Return early if exec failed, possibly updating last index.
     a->GotoUnless(a->WordEqual(match_indices, null), &successful_match);
 
@@ -411,19 +447,19 @@
     a->Goto(&return_null);
 
     a->Bind(&return_null);
-    a->Return(null);
+    var_result.Bind(null);
+    a->Goto(&out);
   }
 
   Label construct_result(a);
   a->Bind(&successful_match);
   {
-    Node* const match_elements = a->LoadElements(match_indices);
-
     a->GotoUnless(should_update_last_index, &construct_result);
 
     // Update the new last index from {match_indices}.
     Node* const new_lastindex = a->LoadFixedArrayElement(
-        match_elements, a->IntPtrConstant(RegExpImpl::kFirstCapture + 1));
+        match_indices,
+        a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 1));
 
     StoreLastIndex(a, context, has_initialmap, regexp, new_lastindex);
     a->Goto(&construct_result);
@@ -431,9 +467,1637 @@
     a->Bind(&construct_result);
     {
       Node* result = ConstructNewResultFromMatchInfo(isolate, a, context,
-                                                     match_elements, string);
+                                                     match_indices, string);
+      var_result.Bind(result);
+      a->Goto(&out);
+    }
+  }
+
+  a->Bind(&out);
+  return var_result.value();
+}
+
+}  // namespace
+
+// ES#sec-regexp.prototype.exec
+// RegExp.prototype.exec ( string )
+void Builtins::Generate_RegExpPrototypeExec(CodeStubAssembler* a) {
+  typedef compiler::Node Node;
+
+  Node* const maybe_receiver = a->Parameter(0);
+  Node* const maybe_string = a->Parameter(1);
+  Node* const context = a->Parameter(4);
+
+  Node* const result =
+      RegExpPrototypeExecInternal(a, context, maybe_receiver, maybe_string);
+  a->Return(result);
+}
+
+namespace {
+
+compiler::Node* ThrowIfNotJSReceiver(CodeStubAssembler* a, Isolate* isolate,
+                                     compiler::Node* context,
+                                     compiler::Node* value,
+                                     MessageTemplate::Template msg_template,
+                                     char const* method_name) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Label out(a), throw_exception(a, Label::kDeferred);
+  Variable var_value_map(a, MachineRepresentation::kTagged);
+
+  a->GotoIf(a->TaggedIsSmi(value), &throw_exception);
+
+  // Load the instance type of the {value}.
+  var_value_map.Bind(a->LoadMap(value));
+  Node* const value_instance_type =
+      a->LoadMapInstanceType(var_value_map.value());
+
+  a->Branch(a->IsJSReceiverInstanceType(value_instance_type), &out,
+            &throw_exception);
+
+  // The {value} is not a compatible receiver for this method.
+  a->Bind(&throw_exception);
+  {
+    Node* const message_id = a->SmiConstant(Smi::FromInt(msg_template));
+    Node* const method_name_str = a->HeapConstant(
+        isolate->factory()->NewStringFromAsciiChecked(method_name, TENURED));
+
+    Callable callable = CodeFactory::ToString(isolate);
+    Node* const value_str = a->CallStub(callable, context, value);
+
+    a->CallRuntime(Runtime::kThrowTypeError, context, message_id,
+                   method_name_str, value_str);
+    var_value_map.Bind(a->UndefinedConstant());
+    a->Goto(&out);  // Never reached.
+  }
+
+  a->Bind(&out);
+  return var_value_map.value();
+}
+
+compiler::Node* IsInitialRegExpMap(CodeStubAssembler* a,
+                                   compiler::Node* context,
+                                   compiler::Node* map) {
+  typedef compiler::Node Node;
+
+  Node* const native_context = a->LoadNativeContext(context);
+  Node* const regexp_fun =
+      a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+  Node* const initial_map =
+      a->LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+  Node* const has_initialmap = a->WordEqual(map, initial_map);
+
+  return has_initialmap;
+}
+
+// RegExp fast path implementations rely on unmodified JSRegExp instances.
+// We use a fairly coarse granularity for this and simply check whether both
+// the regexp itself is unmodified (i.e. its map has not changed) and its
+// prototype is unmodified.
+void BranchIfFastPath(CodeStubAssembler* a, compiler::Node* context,
+                      compiler::Node* map,
+                      CodeStubAssembler::Label* if_isunmodified,
+                      CodeStubAssembler::Label* if_ismodified) {
+  typedef compiler::Node Node;
+
+  Node* const native_context = a->LoadNativeContext(context);
+  Node* const regexp_fun =
+      a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+  Node* const initial_map =
+      a->LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+  Node* const has_initialmap = a->WordEqual(map, initial_map);
+
+  a->GotoUnless(has_initialmap, if_ismodified);
+
+  Node* const initial_proto_initial_map = a->LoadContextElement(
+      native_context, Context::REGEXP_PROTOTYPE_MAP_INDEX);
+  Node* const proto_map = a->LoadMap(a->LoadMapPrototype(map));
+  Node* const proto_has_initialmap =
+      a->WordEqual(proto_map, initial_proto_initial_map);
+
+  // TODO(ishell): Update this check once map changes for constant field
+  // tracking are landing.
+
+  a->Branch(proto_has_initialmap, if_isunmodified, if_ismodified);
+}
+
+}  // namespace
+
+void Builtins::Generate_RegExpPrototypeFlagsGetter(CodeStubAssembler* a) {
+  typedef CodeStubAssembler::Variable Variable;
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* const receiver = a->Parameter(0);
+  Node* const context = a->Parameter(3);
+
+  Isolate* isolate = a->isolate();
+  Node* const int_zero = a->IntPtrConstant(0);
+  Node* const int_one = a->IntPtrConstant(1);
+
+  Node* const map = ThrowIfNotJSReceiver(a, isolate, context, receiver,
+                                         MessageTemplate::kRegExpNonObject,
+                                         "RegExp.prototype.flags");
+
+  Variable var_length(a, MachineType::PointerRepresentation());
+  Variable var_flags(a, MachineType::PointerRepresentation());
+
+  // First, count the number of characters we will need and check which flags
+  // are set.
+
+  var_length.Bind(int_zero);
+
+  Label if_isunmodifiedjsregexp(a),
+      if_isnotunmodifiedjsregexp(a, Label::kDeferred);
+  a->Branch(IsInitialRegExpMap(a, context, map), &if_isunmodifiedjsregexp,
+            &if_isnotunmodifiedjsregexp);
+
+  Label construct_string(a);
+  a->Bind(&if_isunmodifiedjsregexp);
+  {
+    // Refer to JSRegExp's flag property on the fast-path.
+    Node* const flags_smi =
+        a->LoadObjectField(receiver, JSRegExp::kFlagsOffset);
+    Node* const flags_intptr = a->SmiUntag(flags_smi);
+    var_flags.Bind(flags_intptr);
+
+    Label label_global(a), label_ignorecase(a), label_multiline(a),
+        label_unicode(a), label_sticky(a);
+
+#define CASE_FOR_FLAG(FLAG, LABEL, NEXT_LABEL)                        \
+  do {                                                                \
+    a->Bind(&LABEL);                                                  \
+    Node* const mask = a->IntPtrConstant(FLAG);                       \
+    a->GotoIf(a->WordEqual(a->WordAnd(flags_intptr, mask), int_zero), \
+              &NEXT_LABEL);                                           \
+    var_length.Bind(a->IntPtrAdd(var_length.value(), int_one));       \
+    a->Goto(&NEXT_LABEL);                                             \
+  } while (false)
+
+    a->Goto(&label_global);
+    CASE_FOR_FLAG(JSRegExp::kGlobal, label_global, label_ignorecase);
+    CASE_FOR_FLAG(JSRegExp::kIgnoreCase, label_ignorecase, label_multiline);
+    CASE_FOR_FLAG(JSRegExp::kMultiline, label_multiline, label_unicode);
+    CASE_FOR_FLAG(JSRegExp::kUnicode, label_unicode, label_sticky);
+    CASE_FOR_FLAG(JSRegExp::kSticky, label_sticky, construct_string);
+#undef CASE_FOR_FLAG
+  }
+
+  a->Bind(&if_isnotunmodifiedjsregexp);
+  {
+    // Fall back to GetProperty stub on the slow-path.
+    var_flags.Bind(int_zero);
+
+    Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
+    Label label_global(a), label_ignorecase(a), label_multiline(a),
+        label_unicode(a), label_sticky(a);
+
+#define CASE_FOR_FLAG(NAME, FLAG, LABEL, NEXT_LABEL)                          \
+  do {                                                                        \
+    a->Bind(&LABEL);                                                          \
+    Node* const name =                                                        \
+        a->HeapConstant(isolate->factory()->NewStringFromAsciiChecked(NAME)); \
+    Node* const flag =                                                        \
+        a->CallStub(getproperty_callable, context, receiver, name);           \
+    Label if_isflagset(a);                                                    \
+    a->BranchIfToBooleanIsTrue(flag, &if_isflagset, &NEXT_LABEL);             \
+    a->Bind(&if_isflagset);                                                   \
+    var_length.Bind(a->IntPtrAdd(var_length.value(), int_one));               \
+    var_flags.Bind(a->WordOr(var_flags.value(), a->IntPtrConstant(FLAG)));    \
+    a->Goto(&NEXT_LABEL);                                                     \
+  } while (false)
+
+    a->Goto(&label_global);
+    CASE_FOR_FLAG("global", JSRegExp::kGlobal, label_global, label_ignorecase);
+    CASE_FOR_FLAG("ignoreCase", JSRegExp::kIgnoreCase, label_ignorecase,
+                  label_multiline);
+    CASE_FOR_FLAG("multiline", JSRegExp::kMultiline, label_multiline,
+                  label_unicode);
+    CASE_FOR_FLAG("unicode", JSRegExp::kUnicode, label_unicode, label_sticky);
+    CASE_FOR_FLAG("sticky", JSRegExp::kSticky, label_sticky, construct_string);
+#undef CASE_FOR_FLAG
+  }
+
+  // Allocate a string of the required length and fill it with the corresponding
+  // char for each set flag.
+
+  a->Bind(&construct_string);
+  {
+    Node* const result =
+        a->AllocateSeqOneByteString(context, var_length.value());
+    Node* const flags_intptr = var_flags.value();
+
+    Variable var_offset(a, MachineType::PointerRepresentation());
+    var_offset.Bind(
+        a->IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+    Label label_global(a), label_ignorecase(a), label_multiline(a),
+        label_unicode(a), label_sticky(a), out(a);
+
+#define CASE_FOR_FLAG(FLAG, CHAR, LABEL, NEXT_LABEL)                  \
+  do {                                                                \
+    a->Bind(&LABEL);                                                  \
+    Node* const mask = a->IntPtrConstant(FLAG);                       \
+    a->GotoIf(a->WordEqual(a->WordAnd(flags_intptr, mask), int_zero), \
+              &NEXT_LABEL);                                           \
+    Node* const value = a->IntPtrConstant(CHAR);                      \
+    a->StoreNoWriteBarrier(MachineRepresentation::kWord8, result,     \
+                           var_offset.value(), value);                \
+    var_offset.Bind(a->IntPtrAdd(var_offset.value(), int_one));       \
+    a->Goto(&NEXT_LABEL);                                             \
+  } while (false)
+
+    a->Goto(&label_global);
+    CASE_FOR_FLAG(JSRegExp::kGlobal, 'g', label_global, label_ignorecase);
+    CASE_FOR_FLAG(JSRegExp::kIgnoreCase, 'i', label_ignorecase,
+                  label_multiline);
+    CASE_FOR_FLAG(JSRegExp::kMultiline, 'm', label_multiline, label_unicode);
+    CASE_FOR_FLAG(JSRegExp::kUnicode, 'u', label_unicode, label_sticky);
+    CASE_FOR_FLAG(JSRegExp::kSticky, 'y', label_sticky, out);
+#undef CASE_FOR_FLAG
+
+    a->Bind(&out);
+    a->Return(result);
+  }
+}
+
+// ES6 21.2.5.10.
+BUILTIN(RegExpPrototypeSourceGetter) {
+  HandleScope scope(isolate);
+
+  Handle<Object> recv = args.receiver();
+  if (!recv->IsJSRegExp()) {
+    Handle<JSFunction> regexp_fun = isolate->regexp_function();
+    if (*recv == regexp_fun->prototype()) {
+      isolate->CountUsage(v8::Isolate::kRegExpPrototypeSourceGetter);
+      return *isolate->factory()->NewStringFromAsciiChecked("(?:)");
+    }
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kRegExpNonRegExp,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "RegExp.prototype.source")));
+  }
+
+  Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(recv);
+  return regexp->source();
+}
+
+BUILTIN(RegExpPrototypeToString) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSReceiver, recv, "RegExp.prototype.toString");
+
+  if (*recv == isolate->regexp_function()->prototype()) {
+    isolate->CountUsage(v8::Isolate::kRegExpPrototypeToString);
+  }
+
+  IncrementalStringBuilder builder(isolate);
+
+  builder.AppendCharacter('/');
+  {
+    Handle<Object> source;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, source,
+        JSReceiver::GetProperty(recv, isolate->factory()->source_string()));
+    Handle<String> source_str;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, source_str,
+                                       Object::ToString(isolate, source));
+    builder.AppendString(source_str);
+  }
+
+  builder.AppendCharacter('/');
+  {
+    Handle<Object> flags;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, flags,
+        JSReceiver::GetProperty(recv, isolate->factory()->flags_string()));
+    Handle<String> flags_str;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, flags_str,
+                                       Object::ToString(isolate, flags));
+    builder.AppendString(flags_str);
+  }
+
+  RETURN_RESULT_OR_FAILURE(isolate, builder.Finish());
+}
+
+// ES6 21.2.4.2.
+BUILTIN(RegExpPrototypeSpeciesGetter) {
+  HandleScope scope(isolate);
+  return *args.receiver();
+}
+
+namespace {
+
+// Fast-path implementation for flag checks on an unmodified JSRegExp instance.
+compiler::Node* FastFlagGetter(CodeStubAssembler* a,
+                               compiler::Node* const regexp,
+                               JSRegExp::Flag flag) {
+  typedef compiler::Node Node;
+
+  Node* const smi_zero = a->SmiConstant(Smi::kZero);
+  Node* const flags = a->LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+  Node* const mask = a->SmiConstant(Smi::FromInt(flag));
+  Node* const is_flag_set = a->WordNotEqual(a->WordAnd(flags, mask), smi_zero);
+
+  return is_flag_set;
+}
+
+void Generate_FlagGetter(CodeStubAssembler* a, JSRegExp::Flag flag,
+                         v8::Isolate::UseCounterFeature counter,
+                         const char* method_name) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* const receiver = a->Parameter(0);
+  Node* const context = a->Parameter(3);
+
+  Isolate* isolate = a->isolate();
+
+  // Check whether we have an unmodified regexp instance.
+  Label if_isunmodifiedjsregexp(a),
+      if_isnotunmodifiedjsregexp(a, Label::kDeferred);
+
+  a->GotoIf(a->TaggedIsSmi(receiver), &if_isnotunmodifiedjsregexp);
+
+  Node* const receiver_map = a->LoadMap(receiver);
+  Node* const instance_type = a->LoadMapInstanceType(receiver_map);
+
+  a->Branch(a->Word32Equal(instance_type, a->Int32Constant(JS_REGEXP_TYPE)),
+            &if_isunmodifiedjsregexp, &if_isnotunmodifiedjsregexp);
+
+  a->Bind(&if_isunmodifiedjsregexp);
+  {
+    // Refer to JSRegExp's flag property on the fast-path.
+    Node* const is_flag_set = FastFlagGetter(a, receiver, flag);
+    a->Return(a->Select(is_flag_set, a->TrueConstant(), a->FalseConstant()));
+  }
+
+  a->Bind(&if_isnotunmodifiedjsregexp);
+  {
+    Node* const native_context = a->LoadNativeContext(context);
+    Node* const regexp_fun =
+        a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+    Node* const initial_map = a->LoadObjectField(
+        regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+    Node* const initial_prototype = a->LoadMapPrototype(initial_map);
+
+    Label if_isprototype(a), if_isnotprototype(a);
+    a->Branch(a->WordEqual(receiver, initial_prototype), &if_isprototype,
+              &if_isnotprototype);
+
+    a->Bind(&if_isprototype);
+    {
+      Node* const counter_smi = a->SmiConstant(Smi::FromInt(counter));
+      a->CallRuntime(Runtime::kIncrementUseCounter, context, counter_smi);
+      a->Return(a->UndefinedConstant());
+    }
+
+    a->Bind(&if_isnotprototype);
+    {
+      Node* const message_id =
+          a->SmiConstant(Smi::FromInt(MessageTemplate::kRegExpNonRegExp));
+      Node* const method_name_str = a->HeapConstant(
+          isolate->factory()->NewStringFromAsciiChecked(method_name));
+      a->CallRuntime(Runtime::kThrowTypeError, context, message_id,
+                     method_name_str);
+      a->Return(a->UndefinedConstant());  // Never reached.
+    }
+  }
+}
+
+}  // namespace
+
+// ES6 21.2.5.4.
+void Builtins::Generate_RegExpPrototypeGlobalGetter(CodeStubAssembler* a) {
+  Generate_FlagGetter(a, JSRegExp::kGlobal,
+                      v8::Isolate::kRegExpPrototypeOldFlagGetter,
+                      "RegExp.prototype.global");
+}
+
+// ES6 21.2.5.5.
+void Builtins::Generate_RegExpPrototypeIgnoreCaseGetter(CodeStubAssembler* a) {
+  Generate_FlagGetter(a, JSRegExp::kIgnoreCase,
+                      v8::Isolate::kRegExpPrototypeOldFlagGetter,
+                      "RegExp.prototype.ignoreCase");
+}
+
+// ES6 21.2.5.7.
+void Builtins::Generate_RegExpPrototypeMultilineGetter(CodeStubAssembler* a) {
+  Generate_FlagGetter(a, JSRegExp::kMultiline,
+                      v8::Isolate::kRegExpPrototypeOldFlagGetter,
+                      "RegExp.prototype.multiline");
+}
+
+// ES6 21.2.5.12.
+void Builtins::Generate_RegExpPrototypeStickyGetter(CodeStubAssembler* a) {
+  Generate_FlagGetter(a, JSRegExp::kSticky,
+                      v8::Isolate::kRegExpPrototypeStickyGetter,
+                      "RegExp.prototype.sticky");
+}
+
+// ES6 21.2.5.15.
+void Builtins::Generate_RegExpPrototypeUnicodeGetter(CodeStubAssembler* a) {
+  Generate_FlagGetter(a, JSRegExp::kUnicode,
+                      v8::Isolate::kRegExpPrototypeUnicodeGetter,
+                      "RegExp.prototype.unicode");
+}
+
+// The properties $1..$9 are the first nine capturing substrings of the last
+// successful match, or ''.  The function RegExpMakeCaptureGetter will be
+// called with indices from 1 to 9.
+#define DEFINE_CAPTURE_GETTER(i)                        \
+  BUILTIN(RegExpCapture##i##Getter) {                   \
+    HandleScope scope(isolate);                         \
+    return *RegExpUtils::GenericCaptureGetter(          \
+        isolate, isolate->regexp_last_match_info(), i); \
+  }
+DEFINE_CAPTURE_GETTER(1)
+DEFINE_CAPTURE_GETTER(2)
+DEFINE_CAPTURE_GETTER(3)
+DEFINE_CAPTURE_GETTER(4)
+DEFINE_CAPTURE_GETTER(5)
+DEFINE_CAPTURE_GETTER(6)
+DEFINE_CAPTURE_GETTER(7)
+DEFINE_CAPTURE_GETTER(8)
+DEFINE_CAPTURE_GETTER(9)
+#undef DEFINE_CAPTURE_GETTER
+
+// The properties `input` and `$_` are aliases for each other.  When this
+// value is set, the value it is set to is coerced to a string.
+// Getter and setter for the input.
+
+BUILTIN(RegExpInputGetter) {
+  HandleScope scope(isolate);
+  Handle<Object> obj(isolate->regexp_last_match_info()->LastInput(), isolate);
+  return obj->IsUndefined(isolate) ? isolate->heap()->empty_string()
+                                   : String::cast(*obj);
+}
+
+BUILTIN(RegExpInputSetter) {
+  HandleScope scope(isolate);
+  Handle<Object> value = args.atOrUndefined(isolate, 1);
+  Handle<String> str;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, str,
+                                     Object::ToString(isolate, value));
+  isolate->regexp_last_match_info()->SetLastInput(*str);
+  return isolate->heap()->undefined_value();
+}
+
+// Getters for the static properties lastMatch, lastParen, leftContext, and
+// rightContext of the RegExp constructor.  The properties are computed based
+// on the captures array of the last successful match and the subject string
+// of the last successful match.
+BUILTIN(RegExpLastMatchGetter) {
+  HandleScope scope(isolate);
+  return *RegExpUtils::GenericCaptureGetter(
+      isolate, isolate->regexp_last_match_info(), 0);
+}
+
+BUILTIN(RegExpLastParenGetter) {
+  HandleScope scope(isolate);
+  Handle<RegExpMatchInfo> match_info = isolate->regexp_last_match_info();
+  const int length = match_info->NumberOfCaptureRegisters();
+  if (length <= 2) return isolate->heap()->empty_string();  // No captures.
+
+  DCHECK_EQ(0, length % 2);
+  const int last_capture = (length / 2) - 1;
+
+  // We match the SpiderMonkey behavior: return the substring defined by the
+  // last pair (after the first pair) of elements of the capture array even if
+  // it is empty.
+  return *RegExpUtils::GenericCaptureGetter(isolate, match_info, last_capture);
+}
+
+BUILTIN(RegExpLeftContextGetter) {
+  HandleScope scope(isolate);
+  Handle<RegExpMatchInfo> match_info = isolate->regexp_last_match_info();
+  const int start_index = match_info->Capture(0);
+  Handle<String> last_subject(match_info->LastSubject());
+  return *isolate->factory()->NewSubString(last_subject, 0, start_index);
+}
+
+BUILTIN(RegExpRightContextGetter) {
+  HandleScope scope(isolate);
+  Handle<RegExpMatchInfo> match_info = isolate->regexp_last_match_info();
+  const int start_index = match_info->Capture(1);
+  Handle<String> last_subject(match_info->LastSubject());
+  const int len = last_subject->length();
+  return *isolate->factory()->NewSubString(last_subject, start_index, len);
+}
+
+namespace {
+
+// ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
+compiler::Node* RegExpExec(CodeStubAssembler* a, compiler::Node* context,
+                           compiler::Node* recv, compiler::Node* string) {
+  typedef CodeStubAssembler::Variable Variable;
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Isolate* isolate = a->isolate();
+
+  Node* const null = a->NullConstant();
+
+  Variable var_result(a, MachineRepresentation::kTagged);
+  Label out(a), call_builtin_exec(a), slow_path(a, Label::kDeferred);
+
+  Node* const map = a->LoadMap(recv);
+  BranchIfFastPath(a, context, map, &call_builtin_exec, &slow_path);
+
+  a->Bind(&call_builtin_exec);
+  {
+    Node* const result = RegExpPrototypeExecInternal(a, context, recv, string);
+    var_result.Bind(result);
+    a->Goto(&out);
+  }
+
+  a->Bind(&slow_path);
+  {
+    // Take the slow path of fetching the exec property, calling it, and
+    // verifying its return value.
+
+    // Get the exec property.
+    Node* const name = a->HeapConstant(isolate->factory()->exec_string());
+    Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
+    Node* const exec = a->CallStub(getproperty_callable, context, recv, name);
+
+    // Is {exec} callable?
+    Label if_iscallable(a), if_isnotcallable(a);
+
+    a->GotoIf(a->TaggedIsSmi(exec), &if_isnotcallable);
+
+    Node* const exec_map = a->LoadMap(exec);
+    a->Branch(a->IsCallableMap(exec_map), &if_iscallable, &if_isnotcallable);
+
+    a->Bind(&if_iscallable);
+    {
+      Callable call_callable = CodeFactory::Call(isolate);
+      Node* const result =
+          a->CallJS(call_callable, context, exec, recv, string);
+
+      var_result.Bind(result);
+      a->GotoIf(a->WordEqual(result, null), &out);
+
+      ThrowIfNotJSReceiver(a, isolate, context, result,
+                           MessageTemplate::kInvalidRegExpExecResult, "unused");
+
+      a->Goto(&out);
+    }
+
+    a->Bind(&if_isnotcallable);
+    {
+      a->ThrowIfNotInstanceType(context, recv, JS_REGEXP_TYPE,
+                                "RegExp.prototype.exec");
+      a->Goto(&call_builtin_exec);
+    }
+  }
+
+  a->Bind(&out);
+  return var_result.value();
+}
+
+}  // namespace
+
+// ES#sec-regexp.prototype.test
+// RegExp.prototype.test ( S )
+void Builtins::Generate_RegExpPrototypeTest(CodeStubAssembler* a) {
+  typedef compiler::Node Node;
+
+  Isolate* const isolate = a->isolate();
+
+  Node* const maybe_receiver = a->Parameter(0);
+  Node* const maybe_string = a->Parameter(1);
+  Node* const context = a->Parameter(4);
+
+  // Ensure {maybe_receiver} is a JSReceiver.
+  ThrowIfNotJSReceiver(a, isolate, context, maybe_receiver,
+                       MessageTemplate::kIncompatibleMethodReceiver,
+                       "RegExp.prototype.test");
+  Node* const receiver = maybe_receiver;
+
+  // Convert {maybe_string} to a String.
+  Node* const string = a->ToString(context, maybe_string);
+
+  // Call exec.
+  Node* const match_indices = RegExpExec(a, context, receiver, string);
+
+  // Return true iff exec matched successfully.
+  Node* const result = a->Select(a->WordEqual(match_indices, a->NullConstant()),
+                                 a->FalseConstant(), a->TrueConstant());
+  a->Return(result);
+}
+
+// ES#sec-regexp.prototype-@@match
+// RegExp.prototype [ @@match ] ( string )
+BUILTIN(RegExpPrototypeMatch) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSReceiver, recv, "RegExp.prototype.@@match");
+
+  Handle<Object> string_obj = args.atOrUndefined(isolate, 1);
+
+  Handle<String> string;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, string,
+                                     Object::ToString(isolate, string_obj));
+
+  Handle<Object> global_obj;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, global_obj,
+      JSReceiver::GetProperty(recv, isolate->factory()->global_string()));
+  const bool global = global_obj->BooleanValue();
+
+  if (!global) {
+    RETURN_RESULT_OR_FAILURE(
+        isolate,
+        RegExpUtils::RegExpExec(isolate, recv, string,
+                                isolate->factory()->undefined_value()));
+  }
+
+  Handle<Object> unicode_obj;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, unicode_obj,
+      JSReceiver::GetProperty(recv, isolate->factory()->unicode_string()));
+  const bool unicode = unicode_obj->BooleanValue();
+
+  RETURN_FAILURE_ON_EXCEPTION(isolate,
+                              RegExpUtils::SetLastIndex(isolate, recv, 0));
+
+  static const int kInitialArraySize = 8;
+  Handle<FixedArray> elems =
+      isolate->factory()->NewFixedArrayWithHoles(kInitialArraySize);
+
+  int n = 0;
+  for (;; n++) {
+    Handle<Object> result;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, result,
+        RegExpUtils::RegExpExec(isolate, recv, string,
+                                isolate->factory()->undefined_value()));
+
+    if (result->IsNull(isolate)) {
+      if (n == 0) return isolate->heap()->null_value();
+      break;
+    }
+
+    Handle<Object> match_obj;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match_obj,
+                                       Object::GetElement(isolate, result, 0));
+
+    Handle<String> match;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match,
+                                       Object::ToString(isolate, match_obj));
+
+    elems = FixedArray::SetAndGrow(elems, n, match);
+
+    if (match->length() == 0) {
+      RETURN_FAILURE_ON_EXCEPTION(isolate, RegExpUtils::SetAdvancedStringIndex(
+                                               isolate, recv, string, unicode));
+    }
+  }
+
+  elems->Shrink(n);
+  return *isolate->factory()->NewJSArrayWithElements(elems);
+}
+
+namespace {
+
+void Generate_RegExpPrototypeSearchBody(CodeStubAssembler* a,
+                                        compiler::Node* const receiver,
+                                        compiler::Node* const string,
+                                        compiler::Node* const context,
+                                        bool is_fastpath) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Isolate* const isolate = a->isolate();
+
+  Node* const smi_zero = a->SmiConstant(Smi::kZero);
+
+  // Grab the initial value of last index.
+  Node* const previous_last_index =
+      is_fastpath ? FastLoadLastIndex(a, context, receiver)
+                  : SlowLoadLastIndex(a, context, receiver);
+
+  // Ensure last index is 0.
+  if (is_fastpath) {
+    FastStoreLastIndex(a, context, receiver, smi_zero);
+  } else {
+    Label next(a);
+    a->GotoIf(a->SameValue(previous_last_index, smi_zero, context), &next);
+
+    SlowStoreLastIndex(a, context, receiver, smi_zero);
+    a->Goto(&next);
+    a->Bind(&next);
+  }
+
+  // Call exec.
+  Node* const match_indices =
+      is_fastpath ? RegExpPrototypeExecInternal(a, context, receiver, string)
+                  : RegExpExec(a, context, receiver, string);
+
+  // Reset last index if necessary.
+  if (is_fastpath) {
+    FastStoreLastIndex(a, context, receiver, previous_last_index);
+  } else {
+    Label next(a);
+    Node* const current_last_index = SlowLoadLastIndex(a, context, receiver);
+
+    a->GotoIf(a->SameValue(current_last_index, previous_last_index, context),
+              &next);
+
+    SlowStoreLastIndex(a, context, receiver, previous_last_index);
+    a->Goto(&next);
+    a->Bind(&next);
+  }
+
+  // Return -1 if no match was found.
+  {
+    Label next(a);
+    a->GotoUnless(a->WordEqual(match_indices, a->NullConstant()), &next);
+    a->Return(a->SmiConstant(-1));
+    a->Bind(&next);
+  }
+
+  // Return the index of the match.
+  {
+    Label fast_result(a), slow_result(a, Label::kDeferred);
+
+    Node* const native_context = a->LoadNativeContext(context);
+    Node* const initial_regexp_result_map =
+        a->LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX);
+    Node* const match_indices_map = a->LoadMap(match_indices);
+
+    a->Branch(a->WordEqual(match_indices_map, initial_regexp_result_map),
+              &fast_result, &slow_result);
+
+    a->Bind(&fast_result);
+    {
+      Node* const index =
+          a->LoadObjectField(match_indices, JSRegExpResult::kIndexOffset,
+                             MachineType::AnyTagged());
+      a->Return(index);
+    }
+
+    a->Bind(&slow_result);
+    {
+      Node* const name = a->HeapConstant(isolate->factory()->index_string());
+      Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
+      Node* const index =
+          a->CallStub(getproperty_callable, context, match_indices, name);
+      a->Return(index);
+    }
+  }
+}
+
+}  // namespace
+
+// ES#sec-regexp.prototype-@@search
+// RegExp.prototype [ @@search ] ( string )
+void Builtins::Generate_RegExpPrototypeSearch(CodeStubAssembler* a) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Isolate* const isolate = a->isolate();
+
+  Node* const maybe_receiver = a->Parameter(0);
+  Node* const maybe_string = a->Parameter(1);
+  Node* const context = a->Parameter(4);
+
+  // Ensure {maybe_receiver} is a JSReceiver.
+  Node* const map =
+      ThrowIfNotJSReceiver(a, isolate, context, maybe_receiver,
+                           MessageTemplate::kIncompatibleMethodReceiver,
+                           "RegExp.prototype.@@search");
+  Node* const receiver = maybe_receiver;
+
+  // Convert {maybe_string} to a String.
+  Node* const string = a->ToString(context, maybe_string);
+
+  Label fast_path(a), slow_path(a);
+  BranchIfFastPath(a, context, map, &fast_path, &slow_path);
+
+  a->Bind(&fast_path);
+  Generate_RegExpPrototypeSearchBody(a, receiver, string, context, true);
+
+  a->Bind(&slow_path);
+  Generate_RegExpPrototypeSearchBody(a, receiver, string, context, false);
+}
+
+namespace {
+
+MUST_USE_RESULT MaybeHandle<Object> ToUint32(Isolate* isolate,
+                                             Handle<Object> object,
+                                             uint32_t* out) {
+  if (object->IsUndefined(isolate)) {
+    *out = kMaxUInt32;
+    return object;
+  }
+
+  Handle<Object> number;
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, number, Object::ToNumber(object), Object);
+  *out = NumberToUint32(*number);
+  return object;
+}
+
+bool AtSurrogatePair(Isolate* isolate, Handle<String> string, int index) {
+  if (index + 1 >= string->length()) return false;
+  const uint16_t first = string->Get(index);
+  if (first < 0xD800 || first > 0xDBFF) return false;
+  const uint16_t second = string->Get(index + 1);
+  return (second >= 0xDC00 && second <= 0xDFFF);
+}
+
+Handle<JSArray> NewJSArrayWithElements(Isolate* isolate,
+                                       Handle<FixedArray> elems,
+                                       int num_elems) {
+  elems->Shrink(num_elems);
+  return isolate->factory()->NewJSArrayWithElements(elems);
+}
+
+MaybeHandle<JSArray> RegExpSplit(Isolate* isolate, Handle<JSRegExp> regexp,
+                                 Handle<String> string,
+                                 Handle<Object> limit_obj) {
+  Factory* factory = isolate->factory();
+
+  uint32_t limit;
+  RETURN_ON_EXCEPTION(isolate, ToUint32(isolate, limit_obj, &limit), JSArray);
+
+  const int length = string->length();
+
+  if (limit == 0) return factory->NewJSArray(0);
+
+  Handle<RegExpMatchInfo> last_match_info = isolate->regexp_last_match_info();
+
+  if (length == 0) {
+    Handle<Object> match_indices;
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, match_indices,
+        RegExpImpl::Exec(regexp, string, 0, last_match_info), JSArray);
+
+    if (!match_indices->IsNull(isolate)) return factory->NewJSArray(0);
+
+    Handle<FixedArray> elems = factory->NewUninitializedFixedArray(1);
+    elems->set(0, *string);
+    return factory->NewJSArrayWithElements(elems);
+  }
+
+  int current_index = 0;
+  int start_index = 0;
+  int start_match = 0;
+
+  static const int kInitialArraySize = 8;
+  Handle<FixedArray> elems = factory->NewFixedArrayWithHoles(kInitialArraySize);
+  int num_elems = 0;
+
+  while (true) {
+    if (start_index == length) {
+      Handle<String> substr =
+          factory->NewSubString(string, current_index, length);
+      elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+      break;
+    }
+
+    Handle<Object> match_indices_obj;
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, match_indices_obj,
+        RegExpImpl::Exec(regexp, string, start_index,
+                         isolate->regexp_last_match_info()),
+        JSArray);
+
+    if (match_indices_obj->IsNull(isolate)) {
+      Handle<String> substr =
+          factory->NewSubString(string, current_index, length);
+      elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+      break;
+    }
+
+    auto match_indices = Handle<RegExpMatchInfo>::cast(match_indices_obj);
+
+    start_match = match_indices->Capture(0);
+
+    if (start_match == length) {
+      Handle<String> substr =
+          factory->NewSubString(string, current_index, length);
+      elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+      break;
+    }
+
+    const int end_index = match_indices->Capture(1);
+
+    if (start_index == end_index && end_index == current_index) {
+      const bool unicode = (regexp->GetFlags() & JSRegExp::kUnicode) != 0;
+      if (unicode && AtSurrogatePair(isolate, string, start_index)) {
+        start_index += 2;
+      } else {
+        start_index += 1;
+      }
+      continue;
+    }
+
+    {
+      Handle<String> substr =
+          factory->NewSubString(string, current_index, start_match);
+      elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+    }
+
+    if (static_cast<uint32_t>(num_elems) == limit) break;
+
+    for (int i = 2; i < match_indices->NumberOfCaptureRegisters(); i += 2) {
+      const int start = match_indices->Capture(i);
+      const int end = match_indices->Capture(i + 1);
+
+      if (end != -1) {
+        Handle<String> substr = factory->NewSubString(string, start, end);
+        elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+      } else {
+        elems = FixedArray::SetAndGrow(elems, num_elems++,
+                                       factory->undefined_value());
+      }
+
+      if (static_cast<uint32_t>(num_elems) == limit) {
+        return NewJSArrayWithElements(isolate, elems, num_elems);
+      }
+    }
+
+    start_index = current_index = end_index;
+  }
+
+  return NewJSArrayWithElements(isolate, elems, num_elems);
+}
+
+// ES##sec-speciesconstructor
+// SpeciesConstructor ( O, defaultConstructor )
+MUST_USE_RESULT MaybeHandle<Object> SpeciesConstructor(
+    Isolate* isolate, Handle<JSReceiver> recv,
+    Handle<JSFunction> default_ctor) {
+  Handle<Object> ctor_obj;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, ctor_obj,
+      JSObject::GetProperty(recv, isolate->factory()->constructor_string()),
+      Object);
+
+  if (ctor_obj->IsUndefined(isolate)) return default_ctor;
+
+  if (!ctor_obj->IsJSReceiver()) {
+    THROW_NEW_ERROR(isolate,
+                    NewTypeError(MessageTemplate::kConstructorNotReceiver),
+                    Object);
+  }
+
+  Handle<JSReceiver> ctor = Handle<JSReceiver>::cast(ctor_obj);
+
+  Handle<Object> species;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, species,
+      JSObject::GetProperty(ctor, isolate->factory()->species_symbol()),
+      Object);
+
+  if (species->IsNull(isolate) || species->IsUndefined(isolate)) {
+    return default_ctor;
+  }
+
+  if (species->IsConstructor()) return species;
+
+  THROW_NEW_ERROR(
+      isolate, NewTypeError(MessageTemplate::kSpeciesNotConstructor), Object);
+}
+
+}  // namespace
+
+// ES#sec-regexp.prototype-@@split
+// RegExp.prototype [ @@split ] ( string, limit )
+BUILTIN(RegExpPrototypeSplit) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSReceiver, recv, "RegExp.prototype.@@split");
+
+  Factory* factory = isolate->factory();
+
+  Handle<Object> string_obj = args.atOrUndefined(isolate, 1);
+  Handle<Object> limit_obj = args.atOrUndefined(isolate, 2);
+
+  Handle<String> string;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, string,
+                                     Object::ToString(isolate, string_obj));
+
+  if (RegExpUtils::IsUnmodifiedRegExp(isolate, recv)) {
+    RETURN_RESULT_OR_FAILURE(
+        isolate,
+        RegExpSplit(isolate, Handle<JSRegExp>::cast(recv), string, limit_obj));
+  }
+
+  Handle<JSFunction> regexp_fun = isolate->regexp_function();
+  Handle<Object> ctor;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, ctor, SpeciesConstructor(isolate, recv, regexp_fun));
+
+  Handle<Object> flags_obj;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, flags_obj, JSObject::GetProperty(recv, factory->flags_string()));
+
+  Handle<String> flags;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, flags,
+                                     Object::ToString(isolate, flags_obj));
+
+  Handle<String> u_str = factory->LookupSingleCharacterStringFromCode('u');
+  const bool unicode = (String::IndexOf(isolate, flags, u_str, 0) >= 0);
+
+  Handle<String> y_str = factory->LookupSingleCharacterStringFromCode('y');
+  const bool sticky = (String::IndexOf(isolate, flags, y_str, 0) >= 0);
+
+  Handle<String> new_flags = flags;
+  if (!sticky) {
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, new_flags,
+                                       factory->NewConsString(flags, y_str));
+  }
+
+  Handle<JSReceiver> splitter;
+  {
+    const int argc = 2;
+
+    ScopedVector<Handle<Object>> argv(argc);
+    argv[0] = recv;
+    argv[1] = new_flags;
+
+    Handle<JSFunction> ctor_fun = Handle<JSFunction>::cast(ctor);
+    Handle<Object> splitter_obj;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, splitter_obj, Execution::New(ctor_fun, argc, argv.start()));
+
+    splitter = Handle<JSReceiver>::cast(splitter_obj);
+  }
+
+  uint32_t limit;
+  RETURN_FAILURE_ON_EXCEPTION(isolate, ToUint32(isolate, limit_obj, &limit));
+
+  const int length = string->length();
+
+  if (limit == 0) return *factory->NewJSArray(0);
+
+  if (length == 0) {
+    Handle<Object> result;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, result, RegExpUtils::RegExpExec(isolate, splitter, string,
+                                                 factory->undefined_value()));
+
+    if (!result->IsNull(isolate)) return *factory->NewJSArray(0);
+
+    Handle<FixedArray> elems = factory->NewUninitializedFixedArray(1);
+    elems->set(0, *string);
+    return *factory->NewJSArrayWithElements(elems);
+  }
+
+  // TODO(jgruber): Wrap this in a helper class.
+  static const int kInitialArraySize = 8;
+  Handle<FixedArray> elems = factory->NewFixedArrayWithHoles(kInitialArraySize);
+  int num_elems = 0;
+
+  int string_index = 0;
+  int prev_string_index = 0;
+  while (string_index < length) {
+    RETURN_FAILURE_ON_EXCEPTION(
+        isolate, RegExpUtils::SetLastIndex(isolate, splitter, string_index));
+
+    Handle<Object> result;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, result, RegExpUtils::RegExpExec(isolate, splitter, string,
+                                                 factory->undefined_value()));
+
+    if (result->IsNull(isolate)) {
+      string_index = RegExpUtils::AdvanceStringIndex(isolate, string,
+                                                     string_index, unicode);
+      continue;
+    }
+
+    // TODO(jgruber): Extract toLength of some property into function.
+    Handle<Object> last_index_obj;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, last_index_obj, RegExpUtils::GetLastIndex(isolate, splitter));
+
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, last_index_obj, Object::ToLength(isolate, last_index_obj));
+    const int last_index = Handle<Smi>::cast(last_index_obj)->value();
+
+    const int end = std::min(last_index, length);
+    if (end == prev_string_index) {
+      string_index = RegExpUtils::AdvanceStringIndex(isolate, string,
+                                                     string_index, unicode);
+      continue;
+    }
+
+    {
+      Handle<String> substr =
+          factory->NewSubString(string, prev_string_index, string_index);
+      elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+      if (static_cast<uint32_t>(num_elems) == limit) {
+        return *NewJSArrayWithElements(isolate, elems, num_elems);
+      }
+    }
+
+    prev_string_index = end;
+
+    Handle<Object> num_captures_obj;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, num_captures_obj,
+        Object::GetProperty(result, isolate->factory()->length_string()));
+
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, num_captures_obj, Object::ToLength(isolate, num_captures_obj));
+    const int num_captures =
+        std::max(Handle<Smi>::cast(num_captures_obj)->value(), 0);
+
+    for (int i = 1; i < num_captures; i++) {
+      Handle<Object> capture;
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+          isolate, capture, Object::GetElement(isolate, result, i));
+      elems = FixedArray::SetAndGrow(elems, num_elems++, capture);
+      if (static_cast<uint32_t>(num_elems) == limit) {
+        return *NewJSArrayWithElements(isolate, elems, num_elems);
+      }
+    }
+
+    string_index = prev_string_index;
+  }
+
+  {
+    Handle<String> substr =
+        factory->NewSubString(string, prev_string_index, length);
+    elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+  }
+
+  return *NewJSArrayWithElements(isolate, elems, num_elems);
+}
+
+namespace {
+
+compiler::Node* ReplaceGlobalCallableFastPath(
+    CodeStubAssembler* a, compiler::Node* context, compiler::Node* regexp,
+    compiler::Node* subject_string, compiler::Node* replace_callable) {
+  // The fast path is reached only if {receiver} is a global unmodified
+  // JSRegExp instance and {replace_callable} is callable.
+
+  typedef CodeStubAssembler::Variable Variable;
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Isolate* const isolate = a->isolate();
+
+  Node* const null = a->NullConstant();
+  Node* const undefined = a->UndefinedConstant();
+  Node* const int_zero = a->IntPtrConstant(0);
+  Node* const int_one = a->IntPtrConstant(1);
+  Node* const smi_zero = a->SmiConstant(Smi::kZero);
+
+  Node* const native_context = a->LoadNativeContext(context);
+
+  Label out(a);
+  Variable var_result(a, MachineRepresentation::kTagged);
+
+  // Set last index to 0.
+  FastStoreLastIndex(a, context, regexp, smi_zero);
+
+  // Allocate {result_array}.
+  Node* result_array;
+  {
+    ElementsKind kind = FAST_ELEMENTS;
+    Node* const array_map = a->LoadJSArrayElementsMap(kind, native_context);
+    Node* const capacity = a->IntPtrConstant(16);
+    Node* const length = smi_zero;
+    Node* const allocation_site = nullptr;
+    CodeStubAssembler::ParameterMode capacity_mode =
+        CodeStubAssembler::INTPTR_PARAMETERS;
+
+    result_array = a->AllocateJSArray(kind, array_map, capacity, length,
+                                      allocation_site, capacity_mode);
+  }
+
+  // Call into runtime for RegExpExecMultiple.
+  Node* last_match_info = a->LoadContextElement(
+      native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+  Node* const res =
+      a->CallRuntime(Runtime::kRegExpExecMultiple, context, regexp,
+                     subject_string, last_match_info, result_array);
+
+  // Reset last index to 0.
+  FastStoreLastIndex(a, context, regexp, smi_zero);
+
+  // If no matches, return the subject string.
+  var_result.Bind(subject_string);
+  a->GotoIf(a->WordEqual(res, null), &out);
+
+  // Reload last match info since it might have changed.
+  last_match_info = a->LoadContextElement(
+      native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+
+  Node* const res_length = a->LoadJSArrayLength(res);
+  Node* const res_elems = a->LoadElements(res);
+  CSA_ASSERT(a, a->HasInstanceType(res_elems, FIXED_ARRAY_TYPE));
+
+  CodeStubAssembler::ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
+  Node* const num_capture_registers = a->LoadFixedArrayElement(
+      last_match_info,
+      a->IntPtrConstant(RegExpMatchInfo::kNumberOfCapturesIndex), 0, mode);
+
+  Label if_hasexplicitcaptures(a), if_noexplicitcaptures(a), create_result(a);
+  a->Branch(a->SmiEqual(num_capture_registers, a->SmiConstant(Smi::FromInt(2))),
+            &if_noexplicitcaptures, &if_hasexplicitcaptures);
+
+  a->Bind(&if_noexplicitcaptures);
+  {
+    // If the number of captures is two then there are no explicit captures in
+    // the regexp, just the implicit capture that captures the whole match. In
+    // this case we can simplify quite a bit and end up with something faster.
+    // The builder will consist of some integers that indicate slices of the
+    // input string and some replacements that were returned from the replace
+    // function.
+
+    Variable var_match_start(a, MachineRepresentation::kTagged);
+    var_match_start.Bind(smi_zero);
+
+    Node* const end = a->SmiUntag(res_length);
+    Variable var_i(a, MachineType::PointerRepresentation());
+    var_i.Bind(int_zero);
+
+    Variable* vars[] = {&var_i, &var_match_start};
+    Label loop(a, 2, vars);
+    a->Goto(&loop);
+    a->Bind(&loop);
+    {
+      Node* const i = var_i.value();
+      a->GotoUnless(a->IntPtrLessThan(i, end), &create_result);
+
+      CodeStubAssembler::ParameterMode mode =
+          CodeStubAssembler::INTPTR_PARAMETERS;
+      Node* const elem = a->LoadFixedArrayElement(res_elems, i, 0, mode);
+
+      Label if_issmi(a), if_isstring(a), loop_epilogue(a);
+      a->Branch(a->TaggedIsSmi(elem), &if_issmi, &if_isstring);
+
+      a->Bind(&if_issmi);
+      {
+        // Integers represent slices of the original string.
+        Label if_isnegativeorzero(a), if_ispositive(a);
+        a->BranchIfSmiLessThanOrEqual(elem, smi_zero, &if_isnegativeorzero,
+                                      &if_ispositive);
+
+        a->Bind(&if_ispositive);
+        {
+          Node* const int_elem = a->SmiUntag(elem);
+          Node* const new_match_start =
+              a->IntPtrAdd(a->WordShr(int_elem, a->IntPtrConstant(11)),
+                           a->WordAnd(int_elem, a->IntPtrConstant(0x7ff)));
+          var_match_start.Bind(a->SmiTag(new_match_start));
+          a->Goto(&loop_epilogue);
+        }
+
+        a->Bind(&if_isnegativeorzero);
+        {
+          Node* const next_i = a->IntPtrAdd(i, int_one);
+          var_i.Bind(next_i);
+
+          Node* const next_elem =
+              a->LoadFixedArrayElement(res_elems, next_i, 0, mode);
+
+          Node* const new_match_start = a->SmiSub(next_elem, elem);
+          var_match_start.Bind(new_match_start);
+          a->Goto(&loop_epilogue);
+        }
+      }
+
+      a->Bind(&if_isstring);
+      {
+        CSA_ASSERT(a, a->IsStringInstanceType(a->LoadInstanceType(elem)));
+
+        Callable call_callable = CodeFactory::Call(isolate);
+        Node* const replacement_obj =
+            a->CallJS(call_callable, context, replace_callable, undefined, elem,
+                      var_match_start.value(), subject_string);
+
+        Node* const replacement_str = a->ToString(context, replacement_obj);
+        a->StoreFixedArrayElement(res_elems, i, replacement_str);
+
+        Node* const elem_length = a->LoadStringLength(elem);
+        Node* const new_match_start =
+            a->SmiAdd(var_match_start.value(), elem_length);
+        var_match_start.Bind(new_match_start);
+
+        a->Goto(&loop_epilogue);
+      }
+
+      a->Bind(&loop_epilogue);
+      {
+        var_i.Bind(a->IntPtrAdd(var_i.value(), int_one));
+        a->Goto(&loop);
+      }
+    }
+  }
+
+  a->Bind(&if_hasexplicitcaptures);
+  {
+    CodeStubAssembler::ParameterMode mode =
+        CodeStubAssembler::INTPTR_PARAMETERS;
+
+    Node* const from = int_zero;
+    Node* const to = a->SmiUntag(res_length);
+    const int increment = 1;
+
+    a->BuildFastLoop(
+        MachineType::PointerRepresentation(), from, to,
+        [res_elems, isolate, native_context, context, undefined,
+         replace_callable, mode](CodeStubAssembler* a, Node* index) {
+          Node* const elem =
+              a->LoadFixedArrayElement(res_elems, index, 0, mode);
+
+          Label do_continue(a);
+          a->GotoIf(a->TaggedIsSmi(elem), &do_continue);
+
+          // elem must be an Array.
+          // Use the apply argument as backing for global RegExp properties.
+
+          CSA_ASSERT(a, a->HasInstanceType(elem, JS_ARRAY_TYPE));
+
+          // TODO(jgruber): Remove indirection through Call->ReflectApply.
+          Callable call_callable = CodeFactory::Call(isolate);
+          Node* const reflect_apply = a->LoadContextElement(
+              native_context, Context::REFLECT_APPLY_INDEX);
+
+          Node* const replacement_obj =
+              a->CallJS(call_callable, context, reflect_apply, undefined,
+                        replace_callable, undefined, elem);
+
+          // Overwrite the i'th element in the results with the string we got
+          // back from the callback function.
+
+          Node* const replacement_str = a->ToString(context, replacement_obj);
+          a->StoreFixedArrayElement(res_elems, index, replacement_str,
+                                    UPDATE_WRITE_BARRIER, mode);
+
+          a->Goto(&do_continue);
+          a->Bind(&do_continue);
+        },
+        increment, CodeStubAssembler::IndexAdvanceMode::kPost);
+
+    a->Goto(&create_result);
+  }
+
+  a->Bind(&create_result);
+  {
+    Node* const result = a->CallRuntime(Runtime::kStringBuilderConcat, context,
+                                        res, res_length, subject_string);
+    var_result.Bind(result);
+    a->Goto(&out);
+  }
+
+  a->Bind(&out);
+  return var_result.value();
+}
+
+compiler::Node* ReplaceSimpleStringFastPath(CodeStubAssembler* a,
+                                            compiler::Node* context,
+                                            compiler::Node* regexp,
+                                            compiler::Node* subject_string,
+                                            compiler::Node* replace_string) {
+  // The fast path is reached only if {receiver} is an unmodified
+  // JSRegExp instance, {replace_value} is non-callable, and
+  // ToString({replace_value}) does not contain '$', i.e. we're doing a simple
+  // string replacement.
+
+  typedef CodeStubAssembler::Variable Variable;
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Isolate* const isolate = a->isolate();
+
+  Node* const null = a->NullConstant();
+  Node* const int_zero = a->IntPtrConstant(0);
+  Node* const smi_zero = a->SmiConstant(Smi::kZero);
+
+  Label out(a);
+  Variable var_result(a, MachineRepresentation::kTagged);
+
+  // Load the last match info.
+  Node* const native_context = a->LoadNativeContext(context);
+  Node* const last_match_info = a->LoadContextElement(
+      native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+
+  // Is {regexp} global?
+  Label if_isglobal(a), if_isnonglobal(a);
+  Node* const flags = a->LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+  Node* const is_global =
+      a->WordAnd(a->SmiUntag(flags), a->IntPtrConstant(JSRegExp::kGlobal));
+  a->Branch(a->WordEqual(is_global, int_zero), &if_isnonglobal, &if_isglobal);
+
+  a->Bind(&if_isglobal);
+  {
+    // Hand off global regexps to runtime.
+    FastStoreLastIndex(a, context, regexp, smi_zero);
+    Node* const result =
+        a->CallRuntime(Runtime::kStringReplaceGlobalRegExpWithString, context,
+                       subject_string, regexp, replace_string, last_match_info);
+    var_result.Bind(result);
+    a->Goto(&out);
+  }
+
+  a->Bind(&if_isnonglobal);
+  {
+    // Run exec, then manually construct the resulting string.
+    Callable exec_callable = CodeFactory::RegExpExec(isolate);
+    Node* const match_indices =
+        a->CallStub(exec_callable, context, regexp, subject_string, smi_zero,
+                    last_match_info);
+
+    Label if_matched(a), if_didnotmatch(a);
+    a->Branch(a->WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
+
+    a->Bind(&if_didnotmatch);
+    {
+      FastStoreLastIndex(a, context, regexp, smi_zero);
+      var_result.Bind(subject_string);
+      a->Goto(&out);
+    }
+
+    a->Bind(&if_matched);
+    {
+      CodeStubAssembler::ParameterMode mode =
+          CodeStubAssembler::INTPTR_PARAMETERS;
+
+      Node* const subject_start = smi_zero;
+      Node* const match_start = a->LoadFixedArrayElement(
+          match_indices, a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex),
+          0, mode);
+      Node* const match_end = a->LoadFixedArrayElement(
+          match_indices,
+          a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 1), 0, mode);
+      Node* const subject_end = a->LoadStringLength(subject_string);
+
+      Label if_replaceisempty(a), if_replaceisnotempty(a);
+      Node* const replace_length = a->LoadStringLength(replace_string);
+      a->Branch(a->SmiEqual(replace_length, smi_zero), &if_replaceisempty,
+                &if_replaceisnotempty);
+
+      a->Bind(&if_replaceisempty);
+      {
+        // TODO(jgruber): We could skip many of the checks that using SubString
+        // here entails.
+
+        Node* const first_part =
+            a->SubString(context, subject_string, subject_start, match_start);
+        Node* const second_part =
+            a->SubString(context, subject_string, match_end, subject_end);
+
+        Node* const result = a->StringAdd(context, first_part, second_part);
+        var_result.Bind(result);
+        a->Goto(&out);
+      }
+
+      a->Bind(&if_replaceisnotempty);
+      {
+        Node* const first_part =
+            a->SubString(context, subject_string, subject_start, match_start);
+        Node* const second_part = replace_string;
+        Node* const third_part =
+            a->SubString(context, subject_string, match_end, subject_end);
+
+        Node* result = a->StringAdd(context, first_part, second_part);
+        result = a->StringAdd(context, result, third_part);
+
+        var_result.Bind(result);
+        a->Goto(&out);
+      }
+    }
+  }
+
+  a->Bind(&out);
+  return var_result.value();
+}
+
+}  // namespace
+
+// ES#sec-regexp.prototype-@@replace
+// RegExp.prototype [ @@replace ] ( string, replaceValue )
+void Builtins::Generate_RegExpPrototypeReplace(CodeStubAssembler* a) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Isolate* const isolate = a->isolate();
+
+  Node* const maybe_receiver = a->Parameter(0);
+  Node* const maybe_string = a->Parameter(1);
+  Node* const replace_value = a->Parameter(2);
+  Node* const context = a->Parameter(5);
+
+  Node* const int_zero = a->IntPtrConstant(0);
+
+  // Ensure {maybe_receiver} is a JSReceiver.
+  Node* const map =
+      ThrowIfNotJSReceiver(a, isolate, context, maybe_receiver,
+                           MessageTemplate::kIncompatibleMethodReceiver,
+                           "RegExp.prototype.@@replace");
+  Node* const receiver = maybe_receiver;
+
+  // Convert {maybe_string} to a String.
+  Callable tostring_callable = CodeFactory::ToString(isolate);
+  Node* const string = a->CallStub(tostring_callable, context, maybe_string);
+
+  // Fast-path checks: 1. Is the {receiver} an unmodified JSRegExp instance?
+  Label checkreplacecallable(a), runtime(a, Label::kDeferred), fastpath(a);
+  BranchIfFastPath(a, context, map, &checkreplacecallable, &runtime);
+
+  a->Bind(&checkreplacecallable);
+  Node* const regexp = receiver;
+
+  // 2. Is {replace_value} callable?
+  Label checkreplacestring(a), if_iscallable(a);
+  a->GotoIf(a->TaggedIsSmi(replace_value), &checkreplacestring);
+
+  Node* const replace_value_map = a->LoadMap(replace_value);
+  a->Branch(a->IsCallableMap(replace_value_map), &if_iscallable,
+            &checkreplacestring);
+
+  // 3. Does ToString({replace_value}) contain '$'?
+  a->Bind(&checkreplacestring);
+  {
+    Node* const replace_string =
+        a->CallStub(tostring_callable, context, replace_value);
+
+    Node* const dollar_char = a->IntPtrConstant('$');
+    Node* const smi_minusone = a->SmiConstant(Smi::FromInt(-1));
+    a->GotoUnless(a->SmiEqual(a->StringIndexOfChar(context, replace_string,
+                                                   dollar_char, int_zero),
+                              smi_minusone),
+                  &runtime);
+
+    a->Return(ReplaceSimpleStringFastPath(a, context, regexp, string,
+                                          replace_string));
+  }
+
+  // {regexp} is unmodified and {replace_value} is callable.
+  a->Bind(&if_iscallable);
+  {
+    Node* const replace_callable = replace_value;
+
+    // Check if the {regexp} is global.
+    Label if_isglobal(a), if_isnotglobal(a);
+    Node* const is_global = FastFlagGetter(a, regexp, JSRegExp::kGlobal);
+    a->Branch(is_global, &if_isglobal, &if_isnotglobal);
+
+    a->Bind(&if_isglobal);
+    {
+      Node* const result = ReplaceGlobalCallableFastPath(
+          a, context, regexp, string, replace_callable);
       a->Return(result);
     }
+
+    a->Bind(&if_isnotglobal);
+    {
+      Node* const result =
+          a->CallRuntime(Runtime::kStringReplaceNonGlobalRegExpWithFunction,
+                         context, string, regexp, replace_callable);
+      a->Return(result);
+    }
+  }
+
+  a->Bind(&runtime);
+  {
+    Node* const result = a->CallRuntime(Runtime::kRegExpReplace, context,
+                                        receiver, string, replace_value);
+    a->Return(result);
+  }
+}
+
+// Simple string matching functionality for internal use which does not modify
+// the last match info.
+void Builtins::Generate_RegExpInternalMatch(CodeStubAssembler* a) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Isolate* const isolate = a->isolate();
+
+  Node* const regexp = a->Parameter(1);
+  Node* const string = a->Parameter(2);
+  Node* const context = a->Parameter(5);
+
+  Node* const null = a->NullConstant();
+  Node* const smi_zero = a->SmiConstant(Smi::FromInt(0));
+
+  Node* const native_context = a->LoadNativeContext(context);
+  Node* const internal_match_info = a->LoadContextElement(
+      native_context, Context::REGEXP_INTERNAL_MATCH_INFO_INDEX);
+
+  Callable exec_callable = CodeFactory::RegExpExec(isolate);
+  Node* const match_indices = a->CallStub(
+      exec_callable, context, regexp, string, smi_zero, internal_match_info);
+
+  Label if_matched(a), if_didnotmatch(a);
+  a->Branch(a->WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
+
+  a->Bind(&if_didnotmatch);
+  a->Return(null);
+
+  a->Bind(&if_matched);
+  {
+    Node* result = ConstructNewResultFromMatchInfo(isolate, a, context,
+                                                   match_indices, string);
+    a->Return(result);
   }
 }
 
diff --git a/src/builtins/builtins-sharedarraybuffer.cc b/src/builtins/builtins-sharedarraybuffer.cc
index 6aad4da..2b5bf49 100644
--- a/src/builtins/builtins-sharedarraybuffer.cc
+++ b/src/builtins/builtins-sharedarraybuffer.cc
@@ -37,7 +37,7 @@
       not_float_or_clamped(a), invalid(a);
 
   // Fail if it is not a heap object.
-  a->Branch(a->WordIsSmi(tagged), &is_smi, &not_smi);
+  a->Branch(a->TaggedIsSmi(tagged), &is_smi, &not_smi);
   a->Bind(&is_smi);
   a->Goto(&invalid);
 
@@ -52,8 +52,9 @@
   // Fail if the array's JSArrayBuffer is not shared.
   a->Bind(&is_typed_array);
   Node* array_buffer = a->LoadObjectField(tagged, JSTypedArray::kBufferOffset);
-  Node* is_buffer_shared = a->BitFieldDecode<JSArrayBuffer::IsShared>(
-      a->LoadObjectField(array_buffer, JSArrayBuffer::kBitFieldSlot));
+  Node* is_buffer_shared =
+      a->IsSetWord32<JSArrayBuffer::IsShared>(a->LoadObjectField(
+          array_buffer, JSArrayBuffer::kBitFieldOffset, MachineType::Uint32()));
   a->Branch(is_buffer_shared, &is_shared, &not_shared);
   a->Bind(&not_shared);
   a->Goto(&invalid);
@@ -102,7 +103,7 @@
   CodeStubAssembler::Label done(a, &var_result);
 
   CodeStubAssembler::Label if_numberissmi(a), if_numberisnotsmi(a);
-  a->Branch(a->WordIsSmi(number_index), &if_numberissmi, &if_numberisnotsmi);
+  a->Branch(a->TaggedIsSmi(number_index), &if_numberissmi, &if_numberisnotsmi);
 
   a->Bind(&if_numberissmi);
   {
diff --git a/src/builtins/builtins-string.cc b/src/builtins/builtins-string.cc
index 68d2bd0..4ccccbc 100644
--- a/src/builtins/builtins-string.cc
+++ b/src/builtins/builtins-string.cc
@@ -6,13 +6,15 @@
 #include "src/builtins/builtins-utils.h"
 
 #include "src/code-factory.h"
+#include "src/regexp/regexp-utils.h"
 
 namespace v8 {
 namespace internal {
 
-namespace {
+typedef CodeStubAssembler::ResultMode ResultMode;
+typedef CodeStubAssembler::RelationalComparisonMode RelationalComparisonMode;
 
-enum ResultMode { kDontNegateResult, kNegateResult };
+namespace {
 
 void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
   // Here's pseudo-code for the algorithm below in case of kDontNegateResult
@@ -168,9 +170,10 @@
         {
           // TODO(bmeurer): Add fast case support for flattened cons strings;
           // also add support for two byte string equality checks.
-          Runtime::FunctionId function_id = (mode == kDontNegateResult)
-                                                ? Runtime::kStringEqual
-                                                : Runtime::kStringNotEqual;
+          Runtime::FunctionId function_id =
+              (mode == ResultMode::kDontNegateResult)
+                  ? Runtime::kStringEqual
+                  : Runtime::kStringNotEqual;
           assembler->TailCallRuntime(function_id, context, lhs, rhs);
         }
       }
@@ -184,18 +187,14 @@
   }
 
   assembler->Bind(&if_equal);
-  assembler->Return(assembler->BooleanConstant(mode == kDontNegateResult));
+  assembler->Return(
+      assembler->BooleanConstant(mode == ResultMode::kDontNegateResult));
 
   assembler->Bind(&if_notequal);
-  assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
+  assembler->Return(
+      assembler->BooleanConstant(mode == ResultMode::kNegateResult));
 }
 
-enum RelationalComparisonMode {
-  kLessThan,
-  kLessThanOrEqual,
-  kGreaterThan,
-  kGreaterThanOrEqual
-};
 
 void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
                                         RelationalComparisonMode mode) {
@@ -293,8 +292,8 @@
           assembler->Goto(&loop);
 
           assembler->Bind(&if_valueisnotsame);
-          assembler->BranchIf(assembler->Uint32LessThan(lhs_value, rhs_value),
-                              &if_less, &if_greater);
+          assembler->Branch(assembler->Uint32LessThan(lhs_value, rhs_value),
+                            &if_less, &if_greater);
         }
 
         assembler->Bind(&if_done);
@@ -320,19 +319,19 @@
       // TODO(bmeurer): Add fast case support for flattened cons strings;
       // also add support for two byte string relational comparisons.
       switch (mode) {
-        case kLessThan:
+        case RelationalComparisonMode::kLessThan:
           assembler->TailCallRuntime(Runtime::kStringLessThan, context, lhs,
                                      rhs);
           break;
-        case kLessThanOrEqual:
+        case RelationalComparisonMode::kLessThanOrEqual:
           assembler->TailCallRuntime(Runtime::kStringLessThanOrEqual, context,
                                      lhs, rhs);
           break;
-        case kGreaterThan:
+        case RelationalComparisonMode::kGreaterThan:
           assembler->TailCallRuntime(Runtime::kStringGreaterThan, context, lhs,
                                      rhs);
           break;
-        case kGreaterThanOrEqual:
+        case RelationalComparisonMode::kGreaterThanOrEqual:
           assembler->TailCallRuntime(Runtime::kStringGreaterThanOrEqual,
                                      context, lhs, rhs);
           break;
@@ -342,39 +341,39 @@
 
   assembler->Bind(&if_less);
   switch (mode) {
-    case kLessThan:
-    case kLessThanOrEqual:
+    case RelationalComparisonMode::kLessThan:
+    case RelationalComparisonMode::kLessThanOrEqual:
       assembler->Return(assembler->BooleanConstant(true));
       break;
 
-    case kGreaterThan:
-    case kGreaterThanOrEqual:
+    case RelationalComparisonMode::kGreaterThan:
+    case RelationalComparisonMode::kGreaterThanOrEqual:
       assembler->Return(assembler->BooleanConstant(false));
       break;
   }
 
   assembler->Bind(&if_equal);
   switch (mode) {
-    case kLessThan:
-    case kGreaterThan:
+    case RelationalComparisonMode::kLessThan:
+    case RelationalComparisonMode::kGreaterThan:
       assembler->Return(assembler->BooleanConstant(false));
       break;
 
-    case kLessThanOrEqual:
-    case kGreaterThanOrEqual:
+    case RelationalComparisonMode::kLessThanOrEqual:
+    case RelationalComparisonMode::kGreaterThanOrEqual:
       assembler->Return(assembler->BooleanConstant(true));
       break;
   }
 
   assembler->Bind(&if_greater);
   switch (mode) {
-    case kLessThan:
-    case kLessThanOrEqual:
+    case RelationalComparisonMode::kLessThan:
+    case RelationalComparisonMode::kLessThanOrEqual:
       assembler->Return(assembler->BooleanConstant(false));
       break;
 
-    case kGreaterThan:
-    case kGreaterThanOrEqual:
+    case RelationalComparisonMode::kGreaterThan:
+    case RelationalComparisonMode::kGreaterThanOrEqual:
       assembler->Return(assembler->BooleanConstant(true));
       break;
   }
@@ -384,32 +383,36 @@
 
 // static
 void Builtins::Generate_StringEqual(CodeStubAssembler* assembler) {
-  GenerateStringEqual(assembler, kDontNegateResult);
+  GenerateStringEqual(assembler, ResultMode::kDontNegateResult);
 }
 
 // static
 void Builtins::Generate_StringNotEqual(CodeStubAssembler* assembler) {
-  GenerateStringEqual(assembler, kNegateResult);
+  GenerateStringEqual(assembler, ResultMode::kNegateResult);
 }
 
 // static
 void Builtins::Generate_StringLessThan(CodeStubAssembler* assembler) {
-  GenerateStringRelationalComparison(assembler, kLessThan);
+  GenerateStringRelationalComparison(assembler,
+                                     RelationalComparisonMode::kLessThan);
 }
 
 // static
 void Builtins::Generate_StringLessThanOrEqual(CodeStubAssembler* assembler) {
-  GenerateStringRelationalComparison(assembler, kLessThanOrEqual);
+  GenerateStringRelationalComparison(
+      assembler, RelationalComparisonMode::kLessThanOrEqual);
 }
 
 // static
 void Builtins::Generate_StringGreaterThan(CodeStubAssembler* assembler) {
-  GenerateStringRelationalComparison(assembler, kGreaterThan);
+  GenerateStringRelationalComparison(assembler,
+                                     RelationalComparisonMode::kGreaterThan);
 }
 
 // static
 void Builtins::Generate_StringGreaterThanOrEqual(CodeStubAssembler* assembler) {
-  GenerateStringRelationalComparison(assembler, kGreaterThanOrEqual);
+  GenerateStringRelationalComparison(
+      assembler, RelationalComparisonMode::kGreaterThanOrEqual);
 }
 
 // -----------------------------------------------------------------------------
@@ -421,181 +424,117 @@
   typedef compiler::Node Node;
   typedef CodeStubAssembler::Variable Variable;
 
-  Node* code = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
+  Node* argc = assembler->ChangeInt32ToIntPtr(
+      assembler->Parameter(BuiltinDescriptor::kArgumentsCount));
+  Node* context = assembler->Parameter(BuiltinDescriptor::kContext);
+
+  CodeStubArguments arguments(assembler, argc);
 
   // Check if we have exactly one argument (plus the implicit receiver), i.e.
   // if the parent frame is not an arguments adaptor frame.
   Label if_oneargument(assembler), if_notoneargument(assembler);
-  Node* parent_frame_pointer = assembler->LoadParentFramePointer();
-  Node* parent_frame_type =
-      assembler->Load(MachineType::Pointer(), parent_frame_pointer,
-                      assembler->IntPtrConstant(
-                          CommonFrameConstants::kContextOrFrameTypeOffset));
-  assembler->Branch(
-      assembler->WordEqual(
-          parent_frame_type,
-          assembler->SmiConstant(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))),
-      &if_notoneargument, &if_oneargument);
+  assembler->Branch(assembler->WordEqual(argc, assembler->IntPtrConstant(1)),
+                    &if_oneargument, &if_notoneargument);
 
   assembler->Bind(&if_oneargument);
   {
     // Single argument case, perform fast single character string cache lookup
     // for one-byte code units, or fall back to creating a single character
     // string on the fly otherwise.
+    Node* code = arguments.AtIndex(0);
     Node* code32 = assembler->TruncateTaggedToWord32(context, code);
     Node* code16 = assembler->Word32And(
         code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
     Node* result = assembler->StringFromCharCode(code16);
-    assembler->Return(result);
+    arguments.PopAndReturn(result);
   }
 
+  Node* code16 = nullptr;
   assembler->Bind(&if_notoneargument);
   {
-    // Determine the resulting string length.
-    Node* length = assembler->LoadAndUntagSmi(
-        parent_frame_pointer, ArgumentsAdaptorFrameConstants::kLengthOffset);
-
+    Label two_byte(assembler);
     // Assume that the resulting string contains only one-byte characters.
-    Node* result = assembler->AllocateSeqOneByteString(context, length);
+    Node* one_byte_result = assembler->AllocateSeqOneByteString(context, argc);
 
-    // Truncate all input parameters and append them to the resulting string.
-    Variable var_offset(assembler, MachineType::PointerRepresentation());
-    Label loop(assembler, &var_offset), done_loop(assembler);
-    var_offset.Bind(assembler->IntPtrConstant(0));
-    assembler->Goto(&loop);
-    assembler->Bind(&loop);
-    {
-      // Load the current {offset}.
-      Node* offset = var_offset.value();
+    Variable max_index(assembler, MachineType::PointerRepresentation());
+    max_index.Bind(assembler->IntPtrConstant(0));
 
-      // Check if we're done with the string.
-      assembler->GotoIf(assembler->WordEqual(offset, length), &done_loop);
-
-      // Load the next code point and truncate it to a 16-bit value.
-      Node* code = assembler->Load(
-          MachineType::AnyTagged(), parent_frame_pointer,
-          assembler->IntPtrAdd(
-              assembler->WordShl(assembler->IntPtrSub(length, offset),
-                                 assembler->IntPtrConstant(kPointerSizeLog2)),
-              assembler->IntPtrConstant(
-                  CommonFrameConstants::kFixedFrameSizeAboveFp -
-                  kPointerSize)));
-      Node* code32 = assembler->TruncateTaggedToWord32(context, code);
-      Node* code16 = assembler->Word32And(
+    // Iterate over the incoming arguments, converting them to 8-bit character
+    // codes. Stop if any of the conversions generates a code that doesn't fit
+    // in 8 bits.
+    CodeStubAssembler::VariableList vars({&max_index}, assembler->zone());
+    arguments.ForEach(vars, [context, &two_byte, &max_index, &code16,
+                             one_byte_result](CodeStubAssembler* assembler,
+                                              Node* arg) {
+      Node* code32 = assembler->TruncateTaggedToWord32(context, arg);
+      code16 = assembler->Word32And(
           code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
 
-      // Check if {code16} fits into a one-byte string.
-      Label if_codeisonebyte(assembler), if_codeistwobyte(assembler);
-      assembler->Branch(
-          assembler->Int32LessThanOrEqual(
+      assembler->GotoIf(
+          assembler->Int32GreaterThan(
               code16, assembler->Int32Constant(String::kMaxOneByteCharCode)),
-          &if_codeisonebyte, &if_codeistwobyte);
+          &two_byte);
 
-      assembler->Bind(&if_codeisonebyte);
-      {
-        // The {code16} fits into the SeqOneByteString {result}.
-        assembler->StoreNoWriteBarrier(
-            MachineRepresentation::kWord8, result,
-            assembler->IntPtrAdd(
-                assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
-                                          kHeapObjectTag),
-                offset),
-            code16);
-        var_offset.Bind(
-            assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
-        assembler->Goto(&loop);
-      }
+      // The {code16} fits into the SeqOneByteString {one_byte_result}.
+      Node* offset = assembler->ElementOffsetFromIndex(
+          max_index.value(), UINT8_ELEMENTS,
+          CodeStubAssembler::INTPTR_PARAMETERS,
+          SeqOneByteString::kHeaderSize - kHeapObjectTag);
+      assembler->StoreNoWriteBarrier(MachineRepresentation::kWord8,
+                                     one_byte_result, offset, code16);
+      max_index.Bind(assembler->IntPtrAdd(max_index.value(),
+                                          assembler->IntPtrConstant(1)));
+    });
+    arguments.PopAndReturn(one_byte_result);
 
-      assembler->Bind(&if_codeistwobyte);
-      {
-        // Allocate a SeqTwoByteString to hold the resulting string.
-        Node* cresult = assembler->AllocateSeqTwoByteString(context, length);
+    assembler->Bind(&two_byte);
 
-        // Copy all characters that were previously written to the
-        // SeqOneByteString in {result} over to the new {cresult}.
-        Variable var_coffset(assembler, MachineType::PointerRepresentation());
-        Label cloop(assembler, &var_coffset), done_cloop(assembler);
-        var_coffset.Bind(assembler->IntPtrConstant(0));
-        assembler->Goto(&cloop);
-        assembler->Bind(&cloop);
-        {
-          Node* coffset = var_coffset.value();
-          assembler->GotoIf(assembler->WordEqual(coffset, offset), &done_cloop);
-          Node* ccode = assembler->Load(
-              MachineType::Uint8(), result,
-              assembler->IntPtrAdd(
-                  assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
-                                            kHeapObjectTag),
-                  coffset));
-          assembler->StoreNoWriteBarrier(
-              MachineRepresentation::kWord16, cresult,
-              assembler->IntPtrAdd(
-                  assembler->IntPtrConstant(SeqTwoByteString::kHeaderSize -
-                                            kHeapObjectTag),
-                  assembler->WordShl(coffset, 1)),
-              ccode);
-          var_coffset.Bind(
-              assembler->IntPtrAdd(coffset, assembler->IntPtrConstant(1)));
-          assembler->Goto(&cloop);
-        }
+    // At least one of the characters in the string requires a 16-bit
+    // representation.  Allocate a SeqTwoByteString to hold the resulting
+    // string.
+    Node* two_byte_result = assembler->AllocateSeqTwoByteString(context, argc);
 
-        // Write the pending {code16} to {offset}.
-        assembler->Bind(&done_cloop);
-        assembler->StoreNoWriteBarrier(
-            MachineRepresentation::kWord16, cresult,
-            assembler->IntPtrAdd(
-                assembler->IntPtrConstant(SeqTwoByteString::kHeaderSize -
-                                          kHeapObjectTag),
-                assembler->WordShl(offset, 1)),
-            code16);
+    // Copy the characters that have already been put in the 8-bit string into
+    // their corresponding positions in the new 16-bit string.
+    Node* zero = assembler->IntPtrConstant(0);
+    assembler->CopyStringCharacters(
+        one_byte_result, two_byte_result, zero, zero, max_index.value(),
+        String::ONE_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
+        CodeStubAssembler::INTPTR_PARAMETERS);
 
-        // Copy the remaining parameters to the SeqTwoByteString {cresult}.
-        Label floop(assembler, &var_offset), done_floop(assembler);
-        assembler->Goto(&floop);
-        assembler->Bind(&floop);
-        {
-          // Compute the next {offset}.
-          Node* offset = assembler->IntPtrAdd(var_offset.value(),
-                                              assembler->IntPtrConstant(1));
+    // Write the character that caused the 8-bit to 16-bit fault.
+    Node* max_index_offset = assembler->ElementOffsetFromIndex(
+        max_index.value(), UINT16_ELEMENTS,
+        CodeStubAssembler::INTPTR_PARAMETERS,
+        SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+    assembler->StoreNoWriteBarrier(MachineRepresentation::kWord16,
+                                   two_byte_result, max_index_offset, code16);
+    max_index.Bind(
+        assembler->IntPtrAdd(max_index.value(), assembler->IntPtrConstant(1)));
 
-          // Check if we're done with the string.
-          assembler->GotoIf(assembler->WordEqual(offset, length), &done_floop);
-
-          // Load the next code point and truncate it to a 16-bit value.
-          Node* code = assembler->Load(
-              MachineType::AnyTagged(), parent_frame_pointer,
-              assembler->IntPtrAdd(
-                  assembler->WordShl(
-                      assembler->IntPtrSub(length, offset),
-                      assembler->IntPtrConstant(kPointerSizeLog2)),
-                  assembler->IntPtrConstant(
-                      CommonFrameConstants::kFixedFrameSizeAboveFp -
-                      kPointerSize)));
-          Node* code32 = assembler->TruncateTaggedToWord32(context, code);
+    // Resume copying the passed-in arguments from the same place where the
+    // 8-bit copy stopped, but this time copying over all of the characters
+    // using a 16-bit representation.
+    arguments.ForEach(
+        vars,
+        [context, two_byte_result, &max_index](CodeStubAssembler* assembler,
+                                               Node* arg) {
+          Node* code32 = assembler->TruncateTaggedToWord32(context, arg);
           Node* code16 = assembler->Word32And(
               code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
 
-          // Store the truncated {code} point at the next offset.
-          assembler->StoreNoWriteBarrier(
-              MachineRepresentation::kWord16, cresult,
-              assembler->IntPtrAdd(
-                  assembler->IntPtrConstant(SeqTwoByteString::kHeaderSize -
-                                            kHeapObjectTag),
-                  assembler->WordShl(offset, 1)),
-              code16);
-          var_offset.Bind(offset);
-          assembler->Goto(&floop);
-        }
+          Node* offset = assembler->ElementOffsetFromIndex(
+              max_index.value(), UINT16_ELEMENTS,
+              CodeStubAssembler::INTPTR_PARAMETERS,
+              SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+          assembler->StoreNoWriteBarrier(MachineRepresentation::kWord16,
+                                         two_byte_result, offset, code16);
+          max_index.Bind(assembler->IntPtrAdd(max_index.value(),
+                                              assembler->IntPtrConstant(1)));
+        },
+        max_index.value());
 
-        // Return the SeqTwoByteString.
-        assembler->Bind(&done_floop);
-        assembler->Return(cresult);
-      }
-    }
-
-    assembler->Bind(&done_loop);
-    assembler->Return(result);
+    arguments.PopAndReturn(two_byte_result);
   }
 }
 
@@ -662,7 +601,7 @@
   List<uc16> two_byte_buffer(length - index);
 
   while (true) {
-    if (code <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+    if (code <= static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
       two_byte_buffer.Add(code);
     } else {
       two_byte_buffer.Add(unibrow::Utf16::LeadSurrogate(code));
@@ -711,7 +650,8 @@
     Label return_emptystring(assembler, Label::kDeferred);
     position = assembler->ToInteger(context, position,
                                     CodeStubAssembler::kTruncateMinusZero);
-    assembler->GotoUnless(assembler->WordIsSmi(position), &return_emptystring);
+    assembler->GotoUnless(assembler->TaggedIsSmi(position),
+                          &return_emptystring);
 
     // Determine the actual length of the {receiver} String.
     Node* receiver_length =
@@ -756,7 +696,7 @@
     Label return_nan(assembler, Label::kDeferred);
     position = assembler->ToInteger(context, position,
                                     CodeStubAssembler::kTruncateMinusZero);
-    assembler->GotoUnless(assembler->WordIsSmi(position), &return_nan);
+    assembler->GotoUnless(assembler->TaggedIsSmi(position), &return_nan);
 
     // Determine the actual length of the {receiver} String.
     Node* receiver_length =
@@ -779,6 +719,100 @@
   assembler->Return(result);
 }
 
+// ES6 section 21.1.3.6
+// String.prototype.endsWith ( searchString [ , endPosition ] )
+BUILTIN(StringPrototypeEndsWith) {
+  HandleScope handle_scope(isolate);
+  TO_THIS_STRING(str, "String.prototype.endsWith");
+
+  // Check if the search string is a regExp and fail if it is.
+  Handle<Object> search = args.atOrUndefined(isolate, 1);
+  Maybe<bool> is_reg_exp = RegExpUtils::IsRegExp(isolate, search);
+  if (is_reg_exp.IsNothing()) {
+    DCHECK(isolate->has_pending_exception());
+    return isolate->heap()->exception();
+  }
+  if (is_reg_exp.FromJust()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kFirstArgumentNotRegExp,
+                              isolate->factory()->NewStringFromStaticChars(
+                                  "String.prototype.endsWith")));
+  }
+  Handle<String> search_string;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, search_string,
+                                     Object::ToString(isolate, search));
+
+  Handle<Object> position = args.atOrUndefined(isolate, 2);
+  int end;
+
+  if (position->IsUndefined(isolate)) {
+    end = str->length();
+  } else {
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
+                                       Object::ToInteger(isolate, position));
+    double index = std::max(position->Number(), 0.0);
+    index = std::min(index, static_cast<double>(str->length()));
+    end = static_cast<uint32_t>(index);
+  }
+
+  int start = end - search_string->length();
+  if (start < 0) return isolate->heap()->false_value();
+
+  FlatStringReader str_reader(isolate, String::Flatten(str));
+  FlatStringReader search_reader(isolate, String::Flatten(search_string));
+
+  for (int i = 0; i < search_string->length(); i++) {
+    if (str_reader.Get(start + i) != search_reader.Get(i)) {
+      return isolate->heap()->false_value();
+    }
+  }
+  return isolate->heap()->true_value();
+}
+
+// ES6 section 21.1.3.7
+// String.prototype.includes ( searchString [ , position ] )
+BUILTIN(StringPrototypeIncludes) {
+  HandleScope handle_scope(isolate);
+  TO_THIS_STRING(str, "String.prototype.includes");
+
+  // Check if the search string is a regExp and fail if it is.
+  Handle<Object> search = args.atOrUndefined(isolate, 1);
+  Maybe<bool> is_reg_exp = RegExpUtils::IsRegExp(isolate, search);
+  if (is_reg_exp.IsNothing()) {
+    DCHECK(isolate->has_pending_exception());
+    return isolate->heap()->exception();
+  }
+  if (is_reg_exp.FromJust()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kFirstArgumentNotRegExp,
+                              isolate->factory()->NewStringFromStaticChars(
+                                  "String.prototype.includes")));
+  }
+  Handle<String> search_string;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, search_string,
+                                     Object::ToString(isolate, search));
+  Handle<Object> position;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, position,
+      Object::ToInteger(isolate, args.atOrUndefined(isolate, 2)));
+
+  double index = std::max(position->Number(), 0.0);
+  index = std::min(index, static_cast<double>(str->length()));
+
+  int index_in_str = String::IndexOf(isolate, str, search_string,
+                                     static_cast<uint32_t>(index));
+  return *isolate->factory()->ToBoolean(index_in_str != -1);
+}
+
+// ES6 section 21.1.3.8 String.prototype.indexOf ( searchString [ , position ] )
+BUILTIN(StringPrototypeIndexOf) {
+  HandleScope handle_scope(isolate);
+
+  return String::IndexOf(isolate, args.receiver(),
+                         args.atOrUndefined(isolate, 1),
+                         args.atOrUndefined(isolate, 2));
+}
+
 // ES6 section 21.1.3.9
 // String.prototype.lastIndexOf ( searchString [ , position ] )
 BUILTIN(StringPrototypeLastIndexOf) {
@@ -803,13 +837,13 @@
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, str2, Object::ToString(isolate, args.at<Object>(1)));
 
-  if (str1.is_identical_to(str2)) return Smi::FromInt(0);  // Equal.
+  if (str1.is_identical_to(str2)) return Smi::kZero;  // Equal.
   int str1_length = str1->length();
   int str2_length = str2->length();
 
   // Decide trivial cases without flattening.
   if (str1_length == 0) {
-    if (str2_length == 0) return Smi::FromInt(0);  // Equal.
+    if (str2_length == 0) return Smi::kZero;  // Equal.
     return Smi::FromInt(-str2_length);
   } else {
     if (str2_length == 0) return Smi::FromInt(str1_length);
@@ -889,7 +923,7 @@
   Node* const length = a->Parameter(2);
   Node* const context = a->Parameter(5);
 
-  Node* const zero = a->SmiConstant(Smi::FromInt(0));
+  Node* const zero = a->SmiConstant(Smi::kZero);
 
   // Check that {receiver} is coercible to Object and convert it to a String.
   Node* const string =
@@ -903,7 +937,7 @@
         a->ToInteger(context, start, CodeStubAssembler::kTruncateMinusZero);
 
     Label if_issmi(a), if_isheapnumber(a, Label::kDeferred);
-    a->Branch(a->WordIsSmi(start_int), &if_issmi, &if_isheapnumber);
+    a->Branch(a->TaggedIsSmi(start_int), &if_issmi, &if_isheapnumber);
 
     a->Bind(&if_issmi);
     {
@@ -947,7 +981,7 @@
           a->ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero));
     }
 
-    a->Branch(a->WordIsSmi(var_length.value()), &if_issmi, &if_isheapnumber);
+    a->Branch(a->TaggedIsSmi(var_length.value()), &if_issmi, &if_isheapnumber);
 
     // Set {length} to min(max({length}, 0), {string_length} - {start}
     a->Bind(&if_issmi);
@@ -967,8 +1001,8 @@
       // two cases according to the spec: if it is negative, "" is returned; if
       // it is positive, then length is set to {string_length} - {start}.
 
-      a->Assert(a->WordEqual(a->LoadMap(var_length.value()),
-                             a->HeapNumberMapConstant()));
+      CSA_ASSERT(a, a->WordEqual(a->LoadMap(var_length.value()),
+                                 a->HeapNumberMapConstant()));
 
       Label if_isnegative(a), if_ispositive(a);
       Node* const float_zero = a->Float64Constant(0.);
@@ -1013,7 +1047,7 @@
       a->ToInteger(context, value, CodeStubAssembler::kTruncateMinusZero);
 
   Label if_issmi(a), if_isnotsmi(a, Label::kDeferred);
-  a->Branch(a->WordIsSmi(value_int), &if_issmi, &if_isnotsmi);
+  a->Branch(a->TaggedIsSmi(value_int), &if_issmi, &if_isnotsmi);
 
   a->Bind(&if_issmi);
   {
@@ -1028,7 +1062,7 @@
 
     a->Bind(&if_isoutofbounds);
     {
-      Node* const zero = a->SmiConstant(Smi::FromInt(0));
+      Node* const zero = a->SmiConstant(Smi::kZero);
       var_result.Bind(a->Select(a->SmiLessThan(value_int, zero), zero, limit));
       a->Goto(&out);
     }
@@ -1037,10 +1071,11 @@
   a->Bind(&if_isnotsmi);
   {
     // {value} is a heap number - in this case, it is definitely out of bounds.
-    a->Assert(a->WordEqual(a->LoadMap(value_int), a->HeapNumberMapConstant()));
+    CSA_ASSERT(a,
+               a->WordEqual(a->LoadMap(value_int), a->HeapNumberMapConstant()));
 
     Node* const float_zero = a->Float64Constant(0.);
-    Node* const smi_zero = a->SmiConstant(Smi::FromInt(0));
+    Node* const smi_zero = a->SmiConstant(Smi::kZero);
     Node* const value_float = a->LoadHeapNumberValue(value_int);
     var_result.Bind(a->Select(a->Float64LessThan(value_float, float_zero),
                               smi_zero, limit));
@@ -1106,6 +1141,55 @@
   }
 }
 
+BUILTIN(StringPrototypeStartsWith) {
+  HandleScope handle_scope(isolate);
+  TO_THIS_STRING(str, "String.prototype.startsWith");
+
+  // Check if the search string is a regExp and fail if it is.
+  Handle<Object> search = args.atOrUndefined(isolate, 1);
+  Maybe<bool> is_reg_exp = RegExpUtils::IsRegExp(isolate, search);
+  if (is_reg_exp.IsNothing()) {
+    DCHECK(isolate->has_pending_exception());
+    return isolate->heap()->exception();
+  }
+  if (is_reg_exp.FromJust()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kFirstArgumentNotRegExp,
+                              isolate->factory()->NewStringFromStaticChars(
+                                  "String.prototype.startsWith")));
+  }
+  Handle<String> search_string;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, search_string,
+                                     Object::ToString(isolate, search));
+
+  Handle<Object> position = args.atOrUndefined(isolate, 2);
+  int start;
+
+  if (position->IsUndefined(isolate)) {
+    start = 0;
+  } else {
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
+                                       Object::ToInteger(isolate, position));
+    double index = std::max(position->Number(), 0.0);
+    index = std::min(index, static_cast<double>(str->length()));
+    start = static_cast<uint32_t>(index);
+  }
+
+  if (start + search_string->length() > str->length()) {
+    return isolate->heap()->false_value();
+  }
+
+  FlatStringReader str_reader(isolate, String::Flatten(str));
+  FlatStringReader search_reader(isolate, String::Flatten(search_string));
+
+  for (int i = 0; i < search_string->length(); i++) {
+    if (str_reader.Get(start + i) != search_reader.Get(i)) {
+      return isolate->heap()->false_value();
+    }
+  }
+  return isolate->heap()->true_value();
+}
+
 // ES6 section 21.1.3.25 String.prototype.toString ()
 void Builtins::Generate_StringPrototypeToString(CodeStubAssembler* assembler) {
   typedef compiler::Node Node;
@@ -1173,7 +1257,7 @@
                                   Heap::kEmptyFixedArrayRootIndex);
   assembler->StoreObjectFieldNoWriteBarrier(
       iterator, JSStringIterator::kStringOffset, string);
-  Node* index = assembler->SmiConstant(Smi::FromInt(0));
+  Node* index = assembler->SmiConstant(Smi::kZero);
   assembler->StoreObjectFieldNoWriteBarrier(
       iterator, JSStringIterator::kNextIndexOffset, index);
   assembler->Return(iterator);
@@ -1218,17 +1302,16 @@
   {
     Node* lead = var_result.value();
     Node* trail = var_trail.value();
-#ifdef ENABLE_SLOW_DCHECKS
+
     // Check that this path is only taken if a surrogate pair is found
-    assembler->Assert(assembler->Uint32GreaterThanOrEqual(
-        lead, assembler->Int32Constant(0xD800)));
-    assembler->Assert(
-        assembler->Uint32LessThan(lead, assembler->Int32Constant(0xDC00)));
-    assembler->Assert(assembler->Uint32GreaterThanOrEqual(
-        trail, assembler->Int32Constant(0xDC00)));
-    assembler->Assert(
-        assembler->Uint32LessThan(trail, assembler->Int32Constant(0xE000)));
-#endif
+    CSA_SLOW_ASSERT(assembler, assembler->Uint32GreaterThanOrEqual(
+                                   lead, assembler->Int32Constant(0xD800)));
+    CSA_SLOW_ASSERT(assembler, assembler->Uint32LessThan(
+                                   lead, assembler->Int32Constant(0xDC00)));
+    CSA_SLOW_ASSERT(assembler, assembler->Uint32GreaterThanOrEqual(
+                                   trail, assembler->Int32Constant(0xDC00)));
+    CSA_SLOW_ASSERT(assembler, assembler->Uint32LessThan(
+                                   trail, assembler->Int32Constant(0xE000)));
 
     switch (encoding) {
       case UnicodeEncoding::UTF16:
@@ -1289,7 +1372,7 @@
   Node* iterator = assembler->Parameter(0);
   Node* context = assembler->Parameter(3);
 
-  assembler->GotoIf(assembler->WordIsSmi(iterator), &throw_bad_receiver);
+  assembler->GotoIf(assembler->TaggedIsSmi(iterator), &throw_bad_receiver);
   assembler->GotoUnless(
       assembler->WordEqual(assembler->LoadInstanceType(iterator),
                            assembler->Int32Constant(JS_STRING_ITERATOR_TYPE)),
diff --git a/src/builtins/builtins-typedarray.cc b/src/builtins/builtins-typedarray.cc
index ede04f2..94173fa 100644
--- a/src/builtins/builtins-typedarray.cc
+++ b/src/builtins/builtins-typedarray.cc
@@ -31,7 +31,8 @@
 
   // Check if the {receiver} is actually a JSTypedArray.
   Label if_receiverisincompatible(assembler, Label::kDeferred);
-  assembler->GotoIf(assembler->WordIsSmi(receiver), &if_receiverisincompatible);
+  assembler->GotoIf(assembler->TaggedIsSmi(receiver),
+                    &if_receiverisincompatible);
   Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
   assembler->GotoUnless(
       assembler->Word32Equal(receiver_instance_type,
@@ -41,16 +42,9 @@
   // Check if the {receiver}'s JSArrayBuffer was neutered.
   Node* receiver_buffer =
       assembler->LoadObjectField(receiver, JSTypedArray::kBufferOffset);
-  Node* receiver_buffer_bit_field = assembler->LoadObjectField(
-      receiver_buffer, JSArrayBuffer::kBitFieldOffset, MachineType::Uint32());
   Label if_receiverisneutered(assembler, Label::kDeferred);
-  assembler->GotoUnless(
-      assembler->Word32Equal(
-          assembler->Word32And(
-              receiver_buffer_bit_field,
-              assembler->Int32Constant(JSArrayBuffer::WasNeutered::kMask)),
-          assembler->Int32Constant(0)),
-      &if_receiverisneutered);
+  assembler->GotoIf(assembler->IsDetachedBuffer(receiver_buffer),
+                    &if_receiverisneutered);
   assembler->Return(assembler->LoadObjectField(receiver, object_offset));
 
   assembler->Bind(&if_receiverisneutered);
@@ -97,5 +91,79 @@
                                     JSTypedArray::kLengthOffset);
 }
 
+namespace {
+
+template <IterationKind kIterationKind>
+void Generate_TypedArrayPrototypeIterationMethod(CodeStubAssembler* assembler,
+                                                 const char* method_name) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* context = assembler->Parameter(3);
+
+  Label throw_bad_receiver(assembler, Label::kDeferred);
+  Label throw_typeerror(assembler, Label::kDeferred);
+
+  assembler->GotoIf(assembler->TaggedIsSmi(receiver), &throw_bad_receiver);
+
+  Node* map = assembler->LoadMap(receiver);
+  Node* instance_type = assembler->LoadMapInstanceType(map);
+  assembler->GotoIf(
+      assembler->Word32NotEqual(instance_type,
+                                assembler->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+      &throw_bad_receiver);
+
+  // Check if the {receiver}'s JSArrayBuffer was neutered.
+  Node* receiver_buffer =
+      assembler->LoadObjectField(receiver, JSTypedArray::kBufferOffset);
+  Label if_receiverisneutered(assembler, Label::kDeferred);
+  assembler->GotoIf(assembler->IsDetachedBuffer(receiver_buffer),
+                    &if_receiverisneutered);
+
+  assembler->Return(assembler->CreateArrayIterator(receiver, map, instance_type,
+                                                   context, kIterationKind));
+
+  Variable var_message(assembler, MachineRepresentation::kTagged);
+  assembler->Bind(&throw_bad_receiver);
+  var_message.Bind(
+      assembler->SmiConstant(Smi::FromInt(MessageTemplate::kNotTypedArray)));
+  assembler->Goto(&throw_typeerror);
+
+  assembler->Bind(&if_receiverisneutered);
+  var_message.Bind(assembler->SmiConstant(
+      Smi::FromInt(MessageTemplate::kDetachedOperation)));
+  assembler->Goto(&throw_typeerror);
+
+  assembler->Bind(&throw_typeerror);
+  {
+    Node* arg1 = assembler->HeapConstant(
+        assembler->isolate()->factory()->NewStringFromAsciiChecked(method_name,
+                                                                   TENURED));
+    Node* result = assembler->CallRuntime(Runtime::kThrowTypeError, context,
+                                          var_message.value(), arg1);
+    assembler->Return(result);
+  }
+}
+}  // namespace
+
+void Builtins::Generate_TypedArrayPrototypeValues(
+    CodeStubAssembler* assembler) {
+  Generate_TypedArrayPrototypeIterationMethod<IterationKind::kValues>(
+      assembler, "%TypedArray%.prototype.values()");
+}
+
+void Builtins::Generate_TypedArrayPrototypeEntries(
+    CodeStubAssembler* assembler) {
+  Generate_TypedArrayPrototypeIterationMethod<IterationKind::kEntries>(
+      assembler, "%TypedArray%.prototype.entries()");
+}
+
+void Builtins::Generate_TypedArrayPrototypeKeys(CodeStubAssembler* assembler) {
+  Generate_TypedArrayPrototypeIterationMethod<IterationKind::kKeys>(
+      assembler, "%TypedArray%.prototype.keys()");
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/builtins/builtins-utils.h b/src/builtins/builtins-utils.h
index ca1786c..6378fdf 100644
--- a/src/builtins/builtins-utils.h
+++ b/src/builtins/builtins-utils.h
@@ -48,9 +48,8 @@
   static const int kNumExtraArgs = 3;
   static const int kNumExtraArgsWithReceiver = 4;
 
-  template <class S>
-  Handle<S> target() {
-    return Arguments::at<S>(Arguments::length() - 1 - kTargetOffset);
+  Handle<JSFunction> target() {
+    return Arguments::at<JSFunction>(Arguments::length() - 1 - kTargetOffset);
   }
   Handle<HeapObject> new_target() {
     return Arguments::at<HeapObject>(Arguments::length() - 1 -
@@ -92,8 +91,7 @@
   MUST_USE_RESULT Object* Builtin_##name(                                     \
       int args_length, Object** args_object, Isolate* isolate) {              \
     DCHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
-    if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||       \
-                    FLAG_runtime_call_stats)) {                               \
+    if (V8_UNLIKELY(FLAG_runtime_stats)) {                                    \
       return Builtin_Impl_Stats_##name(args_length, args_object, isolate);    \
     }                                                                         \
     BuiltinArguments args(args_length, args_object);                          \
diff --git a/src/builtins/builtins.cc b/src/builtins/builtins.cc
index d5a0e17..ec981fe 100644
--- a/src/builtins/builtins.cc
+++ b/src/builtins/builtins.cc
@@ -83,8 +83,10 @@
                                    CodeAssemblerGenerator generator, int argc,
                                    Code::Flags flags, const char* name) {
   HandleScope scope(isolate);
-  Zone zone(isolate->allocator());
-  CodeStubAssembler assembler(isolate, &zone, argc, flags, name);
+  Zone zone(isolate->allocator(), ZONE_NAME);
+  const int argc_with_recv =
+      (argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
+  CodeStubAssembler assembler(isolate, &zone, argc_with_recv, flags, name);
   generator(&assembler);
   Handle<Code> code = assembler.GenerateCode();
   PostBuildProfileAndTracing(isolate, *code, name);
@@ -97,7 +99,7 @@
                                    CallDescriptors::Key interface_descriptor,
                                    Code::Flags flags, const char* name) {
   HandleScope scope(isolate);
-  Zone zone(isolate->allocator());
+  Zone zone(isolate->allocator(), ZONE_NAME);
   // The interface descriptor with given key must be initialized at this point
   // and this construction just queries the details from the descriptors table.
   CallInterfaceDescriptor descriptor(isolate, interface_descriptor);
diff --git a/src/builtins/builtins.h b/src/builtins/builtins.h
index 3579f3c..a6b126d 100644
--- a/src/builtins/builtins.h
+++ b/src/builtins/builtins.h
@@ -52,6 +52,8 @@
   /* Code aging */                                                            \
   CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, ASM)                       \
                                                                               \
+  TFS(ToObject, BUILTIN, kNoExtraICState, TypeConversion)                     \
+                                                                              \
   /* Calls */                                                                 \
   ASM(ArgumentsAdaptorTrampoline)                                             \
   /* ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) */             \
@@ -107,7 +109,6 @@
                                                                               \
   /* Interpreter */                                                           \
   ASM(InterpreterEntryTrampoline)                                             \
-  ASM(InterpreterMarkBaselineOnReturn)                                        \
   ASM(InterpreterPushArgsAndCall)                                             \
   ASM(InterpreterPushArgsAndCallFunction)                                     \
   ASM(InterpreterPushArgsAndTailCall)                                         \
@@ -115,6 +116,7 @@
   ASM(InterpreterPushArgsAndConstruct)                                        \
   ASM(InterpreterPushArgsAndConstructFunction)                                \
   ASM(InterpreterPushArgsAndConstructArray)                                   \
+  ASM(InterpreterEnterBytecodeAdvance)                                        \
   ASM(InterpreterEnterBytecodeDispatch)                                       \
   ASM(InterpreterOnStackReplacement)                                          \
                                                                               \
@@ -151,7 +153,6 @@
   TFS(GrowFastDoubleElements, BUILTIN, kNoExtraICState, GrowArrayElements)    \
   TFS(GrowFastSmiOrObjectElements, BUILTIN, kNoExtraICState,                  \
       GrowArrayElements)                                                      \
-  TFS(OrdinaryHasInstance, BUILTIN, kNoExtraICState, Compare)                 \
                                                                               \
   /* Debugger */                                                              \
   DBG(FrameDropper_LiveEdit)                                                  \
@@ -173,9 +174,11 @@
   TFS(NonNumberToNumber, BUILTIN, kNoExtraICState, TypeConversion)            \
   TFS(ToNumber, BUILTIN, kNoExtraICState, TypeConversion)                     \
   TFS(ToString, BUILTIN, kNoExtraICState, TypeConversion)                     \
+  TFS(ToInteger, BUILTIN, kNoExtraICState, TypeConversion)                    \
+  TFS(ToLength, BUILTIN, kNoExtraICState, TypeConversion)                     \
+  TFS(Typeof, BUILTIN, kNoExtraICState, Typeof)                               \
                                                                               \
   /* Handlers */                                                              \
-  ASH(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, kNoExtraICState)                \
   TFS(KeyedLoadIC_Megamorphic_TF, KEYED_LOAD_IC, kNoExtraICState,             \
       LoadWithVector)                                                         \
   ASM(KeyedLoadIC_Miss)                                                       \
@@ -183,6 +186,10 @@
   ASH(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, kNoExtraICState)              \
   ASH(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC,                        \
       StoreICState::kStrictModeState)                                         \
+  TFS(KeyedStoreIC_Megamorphic_TF, KEYED_STORE_IC, kNoExtraICState,           \
+      StoreWithVector)                                                        \
+  TFS(KeyedStoreIC_Megamorphic_Strict_TF, KEYED_STORE_IC,                     \
+      StoreICState::kStrictModeState, StoreWithVector)                        \
   ASM(KeyedStoreIC_Miss)                                                      \
   ASH(KeyedStoreIC_Slow, HANDLER, Code::KEYED_STORE_IC)                       \
   TFS(LoadGlobalIC_Miss, BUILTIN, kNoExtraICState, LoadGlobalWithVector)      \
@@ -210,16 +217,24 @@
   ASM(InternalArrayCode)                                                      \
   CPP(ArrayConcat)                                                            \
   /* ES6 section 22.1.2.2 Array.isArray */                                    \
-  TFJ(ArrayIsArray, 2)                                                        \
+  TFJ(ArrayIsArray, 1)                                                        \
   /* ES7 #sec-array.prototype.includes */                                     \
-  TFJ(ArrayIncludes, 3)                                                       \
-  TFJ(ArrayIndexOf, 3)                                                        \
+  TFJ(ArrayIncludes, 2)                                                       \
+  TFJ(ArrayIndexOf, 2)                                                        \
   CPP(ArrayPop)                                                               \
   CPP(ArrayPush)                                                              \
   CPP(ArrayShift)                                                             \
   CPP(ArraySlice)                                                             \
   CPP(ArraySplice)                                                            \
   CPP(ArrayUnshift)                                                           \
+  /* ES6 #sec-array.prototype.entries */                                      \
+  TFJ(ArrayPrototypeEntries, 0)                                               \
+  /* ES6 #sec-array.prototype.keys */                                         \
+  TFJ(ArrayPrototypeKeys, 0)                                                  \
+  /* ES6 #sec-array.prototype.values */                                       \
+  TFJ(ArrayPrototypeValues, 0)                                                \
+  /* ES6 #sec-%arrayiteratorprototype%.next */                                \
+  TFJ(ArrayIteratorPrototypeNext, 0)                                          \
                                                                               \
   /* ArrayBuffer */                                                           \
   CPP(ArrayBufferConstructor)                                                 \
@@ -231,9 +246,9 @@
   CPP(BooleanConstructor)                                                     \
   CPP(BooleanConstructor_ConstructStub)                                       \
   /* ES6 section 19.3.3.2 Boolean.prototype.toString ( ) */                   \
-  TFJ(BooleanPrototypeToString, 1)                                            \
+  TFJ(BooleanPrototypeToString, 0)                                            \
   /* ES6 section 19.3.3.3 Boolean.prototype.valueOf ( ) */                    \
-  TFJ(BooleanPrototypeValueOf, 1)                                             \
+  TFJ(BooleanPrototypeValueOf, 0)                                             \
                                                                               \
   /* CallSite */                                                              \
   CPP(CallSitePrototypeGetColumnNumber)                                       \
@@ -280,41 +295,41 @@
   CPP(DateConstructor)                                                        \
   CPP(DateConstructor_ConstructStub)                                          \
   /* ES6 section 20.3.4.2 Date.prototype.getDate ( ) */                       \
-  TFJ(DatePrototypeGetDate, 1)                                                \
+  TFJ(DatePrototypeGetDate, 0)                                                \
   /* ES6 section 20.3.4.3 Date.prototype.getDay ( ) */                        \
-  TFJ(DatePrototypeGetDay, 1)                                                 \
+  TFJ(DatePrototypeGetDay, 0)                                                 \
   /* ES6 section 20.3.4.4 Date.prototype.getFullYear ( ) */                   \
-  TFJ(DatePrototypeGetFullYear, 1)                                            \
+  TFJ(DatePrototypeGetFullYear, 0)                                            \
   /* ES6 section 20.3.4.5 Date.prototype.getHours ( ) */                      \
-  TFJ(DatePrototypeGetHours, 1)                                               \
+  TFJ(DatePrototypeGetHours, 0)                                               \
   /* ES6 section 20.3.4.6 Date.prototype.getMilliseconds ( ) */               \
-  TFJ(DatePrototypeGetMilliseconds, 1)                                        \
+  TFJ(DatePrototypeGetMilliseconds, 0)                                        \
   /* ES6 section 20.3.4.7 Date.prototype.getMinutes ( ) */                    \
-  TFJ(DatePrototypeGetMinutes, 1)                                             \
+  TFJ(DatePrototypeGetMinutes, 0)                                             \
   /* ES6 section 20.3.4.8 Date.prototype.getMonth */                          \
-  TFJ(DatePrototypeGetMonth, 1)                                               \
+  TFJ(DatePrototypeGetMonth, 0)                                               \
   /* ES6 section 20.3.4.9 Date.prototype.getSeconds ( ) */                    \
-  TFJ(DatePrototypeGetSeconds, 1)                                             \
+  TFJ(DatePrototypeGetSeconds, 0)                                             \
   /* ES6 section 20.3.4.10 Date.prototype.getTime ( ) */                      \
-  TFJ(DatePrototypeGetTime, 1)                                                \
+  TFJ(DatePrototypeGetTime, 0)                                                \
   /* ES6 section 20.3.4.11 Date.prototype.getTimezoneOffset ( ) */            \
-  TFJ(DatePrototypeGetTimezoneOffset, 1)                                      \
+  TFJ(DatePrototypeGetTimezoneOffset, 0)                                      \
   /* ES6 section 20.3.4.12 Date.prototype.getUTCDate ( ) */                   \
-  TFJ(DatePrototypeGetUTCDate, 1)                                             \
+  TFJ(DatePrototypeGetUTCDate, 0)                                             \
   /* ES6 section 20.3.4.13 Date.prototype.getUTCDay ( ) */                    \
-  TFJ(DatePrototypeGetUTCDay, 1)                                              \
+  TFJ(DatePrototypeGetUTCDay, 0)                                              \
   /* ES6 section 20.3.4.14 Date.prototype.getUTCFullYear ( ) */               \
-  TFJ(DatePrototypeGetUTCFullYear, 1)                                         \
+  TFJ(DatePrototypeGetUTCFullYear, 0)                                         \
   /* ES6 section 20.3.4.15 Date.prototype.getUTCHours ( ) */                  \
-  TFJ(DatePrototypeGetUTCHours, 1)                                            \
+  TFJ(DatePrototypeGetUTCHours, 0)                                            \
   /* ES6 section 20.3.4.16 Date.prototype.getUTCMilliseconds ( ) */           \
-  TFJ(DatePrototypeGetUTCMilliseconds, 1)                                     \
+  TFJ(DatePrototypeGetUTCMilliseconds, 0)                                     \
   /* ES6 section 20.3.4.17 Date.prototype.getUTCMinutes ( ) */                \
-  TFJ(DatePrototypeGetUTCMinutes, 1)                                          \
+  TFJ(DatePrototypeGetUTCMinutes, 0)                                          \
   /* ES6 section 20.3.4.18 Date.prototype.getUTCMonth ( ) */                  \
-  TFJ(DatePrototypeGetUTCMonth, 1)                                            \
+  TFJ(DatePrototypeGetUTCMonth, 0)                                            \
   /* ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( ) */                \
-  TFJ(DatePrototypeGetUTCSeconds, 1)                                          \
+  TFJ(DatePrototypeGetUTCSeconds, 0)                                          \
   CPP(DatePrototypeGetYear)                                                   \
   CPP(DatePrototypeSetYear)                                                   \
   CPP(DateNow)                                                                \
@@ -360,17 +375,17 @@
   CPP(FunctionPrototypeBind)                                                  \
   ASM(FunctionPrototypeCall)                                                  \
   /* ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V ) */       \
-  TFJ(FunctionPrototypeHasInstance, 2)                                        \
+  TFJ(FunctionPrototypeHasInstance, 1)                                        \
   CPP(FunctionPrototypeToString)                                              \
                                                                               \
   /* Generator and Async */                                                   \
   CPP(GeneratorFunctionConstructor)                                           \
   /* ES6 section 25.3.1.2 Generator.prototype.next ( value ) */               \
-  TFJ(GeneratorPrototypeNext, 2)                                              \
+  TFJ(GeneratorPrototypeNext, 1)                                              \
   /* ES6 section 25.3.1.3 Generator.prototype.return ( value ) */             \
-  TFJ(GeneratorPrototypeReturn, 2)                                            \
+  TFJ(GeneratorPrototypeReturn, 1)                                            \
   /* ES6 section 25.3.1.4 Generator.prototype.throw ( exception ) */          \
-  TFJ(GeneratorPrototypeThrow, 2)                                             \
+  TFJ(GeneratorPrototypeThrow, 1)                                             \
   CPP(AsyncFunctionConstructor)                                               \
                                                                               \
   /* Global object */                                                         \
@@ -382,12 +397,12 @@
   CPP(GlobalUnescape)                                                         \
   CPP(GlobalEval)                                                             \
   /* ES6 section 18.2.2 isFinite ( number ) */                                \
-  TFJ(GlobalIsFinite, 2)                                                      \
+  TFJ(GlobalIsFinite, 1)                                                      \
   /* ES6 section 18.2.3 isNaN ( number ) */                                   \
-  TFJ(GlobalIsNaN, 2)                                                         \
+  TFJ(GlobalIsNaN, 1)                                                         \
                                                                               \
   /* ES6 #sec-%iteratorprototype%-@@iterator */                               \
-  TFJ(IteratorPrototypeIterator, 1)                                           \
+  TFJ(IteratorPrototypeIterator, 0)                                           \
                                                                               \
   /* JSON */                                                                  \
   CPP(JsonParse)                                                              \
@@ -395,73 +410,75 @@
                                                                               \
   /* Math */                                                                  \
   /* ES6 section 20.2.2.1 Math.abs ( x ) */                                   \
-  TFJ(MathAbs, 2)                                                             \
+  TFJ(MathAbs, 1)                                                             \
   /* ES6 section 20.2.2.2 Math.acos ( x ) */                                  \
-  TFJ(MathAcos, 2)                                                            \
+  TFJ(MathAcos, 1)                                                            \
   /* ES6 section 20.2.2.3 Math.acosh ( x ) */                                 \
-  TFJ(MathAcosh, 2)                                                           \
+  TFJ(MathAcosh, 1)                                                           \
   /* ES6 section 20.2.2.4 Math.asin ( x ) */                                  \
-  TFJ(MathAsin, 2)                                                            \
+  TFJ(MathAsin, 1)                                                            \
   /* ES6 section 20.2.2.5 Math.asinh ( x ) */                                 \
-  TFJ(MathAsinh, 2)                                                           \
+  TFJ(MathAsinh, 1)                                                           \
   /* ES6 section 20.2.2.6 Math.atan ( x ) */                                  \
-  TFJ(MathAtan, 2)                                                            \
+  TFJ(MathAtan, 1)                                                            \
   /* ES6 section 20.2.2.7 Math.atanh ( x ) */                                 \
-  TFJ(MathAtanh, 2)                                                           \
+  TFJ(MathAtanh, 1)                                                           \
   /* ES6 section 20.2.2.8 Math.atan2 ( y, x ) */                              \
-  TFJ(MathAtan2, 3)                                                           \
+  TFJ(MathAtan2, 2)                                                           \
   /* ES6 section 20.2.2.9 Math.cbrt ( x ) */                                  \
-  TFJ(MathCbrt, 2)                                                            \
+  TFJ(MathCbrt, 1)                                                            \
   /* ES6 section 20.2.2.10 Math.ceil ( x ) */                                 \
-  TFJ(MathCeil, 2)                                                            \
+  TFJ(MathCeil, 1)                                                            \
   /* ES6 section 20.2.2.11 Math.clz32 ( x ) */                                \
-  TFJ(MathClz32, 2)                                                           \
+  TFJ(MathClz32, 1)                                                           \
   /* ES6 section 20.2.2.12 Math.cos ( x ) */                                  \
-  TFJ(MathCos, 2)                                                             \
+  TFJ(MathCos, 1)                                                             \
   /* ES6 section 20.2.2.13 Math.cosh ( x ) */                                 \
-  TFJ(MathCosh, 2)                                                            \
+  TFJ(MathCosh, 1)                                                            \
   /* ES6 section 20.2.2.14 Math.exp ( x ) */                                  \
-  TFJ(MathExp, 2)                                                             \
+  TFJ(MathExp, 1)                                                             \
   /* ES6 section 20.2.2.15 Math.expm1 ( x ) */                                \
-  TFJ(MathExpm1, 2)                                                           \
+  TFJ(MathExpm1, 1)                                                           \
   /* ES6 section 20.2.2.16 Math.floor ( x ) */                                \
-  TFJ(MathFloor, 2)                                                           \
+  TFJ(MathFloor, 1)                                                           \
   /* ES6 section 20.2.2.17 Math.fround ( x ) */                               \
-  TFJ(MathFround, 2)                                                          \
+  TFJ(MathFround, 1)                                                          \
   /* ES6 section 20.2.2.18 Math.hypot ( value1, value2, ...values ) */        \
   CPP(MathHypot)                                                              \
   /* ES6 section 20.2.2.19 Math.imul ( x, y ) */                              \
-  TFJ(MathImul, 3)                                                            \
+  TFJ(MathImul, 2)                                                            \
   /* ES6 section 20.2.2.20 Math.log ( x ) */                                  \
-  TFJ(MathLog, 2)                                                             \
+  TFJ(MathLog, 1)                                                             \
   /* ES6 section 20.2.2.21 Math.log1p ( x ) */                                \
-  TFJ(MathLog1p, 2)                                                           \
+  TFJ(MathLog1p, 1)                                                           \
   /* ES6 section 20.2.2.22 Math.log10 ( x ) */                                \
-  TFJ(MathLog10, 2)                                                           \
+  TFJ(MathLog10, 1)                                                           \
   /* ES6 section 20.2.2.23 Math.log2 ( x ) */                                 \
-  TFJ(MathLog2, 2)                                                            \
+  TFJ(MathLog2, 1)                                                            \
   /* ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values ) */         \
   ASM(MathMax)                                                                \
   /* ES6 section 20.2.2.25 Math.min ( value1, value2 , ...values ) */         \
   ASM(MathMin)                                                                \
   /* ES6 section 20.2.2.26 Math.pow ( x, y ) */                               \
-  TFJ(MathPow, 3)                                                             \
+  TFJ(MathPow, 2)                                                             \
+  /* ES6 section 20.2.2.27 Math.random */                                     \
+  TFJ(MathRandom, 0)                                                          \
   /* ES6 section 20.2.2.28 Math.round ( x ) */                                \
-  TFJ(MathRound, 2)                                                           \
+  TFJ(MathRound, 1)                                                           \
   /* ES6 section 20.2.2.29 Math.sign ( x ) */                                 \
-  TFJ(MathSign, 2)                                                            \
+  TFJ(MathSign, 1)                                                            \
   /* ES6 section 20.2.2.30 Math.sin ( x ) */                                  \
-  TFJ(MathSin, 2)                                                             \
+  TFJ(MathSin, 1)                                                             \
   /* ES6 section 20.2.2.31 Math.sinh ( x ) */                                 \
-  TFJ(MathSinh, 2)                                                            \
+  TFJ(MathSinh, 1)                                                            \
   /* ES6 section 20.2.2.32 Math.sqrt ( x ) */                                 \
-  TFJ(MathTan, 2)                                                             \
+  TFJ(MathTan, 1)                                                             \
   /* ES6 section 20.2.2.33 Math.tan ( x ) */                                  \
-  TFJ(MathTanh, 2)                                                            \
+  TFJ(MathTanh, 1)                                                            \
   /* ES6 section 20.2.2.34 Math.tanh ( x ) */                                 \
-  TFJ(MathSqrt, 2)                                                            \
+  TFJ(MathSqrt, 1)                                                            \
   /* ES6 section 20.2.2.35 Math.trunc ( x ) */                                \
-  TFJ(MathTrunc, 2)                                                           \
+  TFJ(MathTrunc, 1)                                                           \
                                                                               \
   /* Number */                                                                \
   /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case */       \
@@ -469,24 +486,47 @@
   /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case */  \
   ASM(NumberConstructor_ConstructStub)                                        \
   /* ES6 section 20.1.2.2 Number.isFinite ( number ) */                       \
-  TFJ(NumberIsFinite, 2)                                                      \
+  TFJ(NumberIsFinite, 1)                                                      \
   /* ES6 section 20.1.2.3 Number.isInteger ( number ) */                      \
-  TFJ(NumberIsInteger, 2)                                                     \
+  TFJ(NumberIsInteger, 1)                                                     \
   /* ES6 section 20.1.2.4 Number.isNaN ( number ) */                          \
-  TFJ(NumberIsNaN, 2)                                                         \
+  TFJ(NumberIsNaN, 1)                                                         \
   /* ES6 section 20.1.2.5 Number.isSafeInteger ( number ) */                  \
-  TFJ(NumberIsSafeInteger, 2)                                                 \
+  TFJ(NumberIsSafeInteger, 1)                                                 \
+  /* ES6 section 20.1.2.12 Number.parseFloat ( string ) */                    \
+  TFJ(NumberParseFloat, 1)                                                    \
+  /* ES6 section 20.1.2.13 Number.parseInt ( string, radix ) */               \
+  TFJ(NumberParseInt, 2)                                                      \
   CPP(NumberPrototypeToExponential)                                           \
   CPP(NumberPrototypeToFixed)                                                 \
   CPP(NumberPrototypeToLocaleString)                                          \
   CPP(NumberPrototypeToPrecision)                                             \
   CPP(NumberPrototypeToString)                                                \
   /* ES6 section 20.1.3.7 Number.prototype.valueOf ( ) */                     \
-  TFJ(NumberPrototypeValueOf, 1)                                              \
+  TFJ(NumberPrototypeValueOf, 0)                                              \
+  TFS(Add, BUILTIN, kNoExtraICState, BinaryOp)                                \
+  TFS(Subtract, BUILTIN, kNoExtraICState, BinaryOp)                           \
+  TFS(Multiply, BUILTIN, kNoExtraICState, BinaryOp)                           \
+  TFS(Divide, BUILTIN, kNoExtraICState, BinaryOp)                             \
+  TFS(Modulus, BUILTIN, kNoExtraICState, BinaryOp)                            \
+  TFS(BitwiseAnd, BUILTIN, kNoExtraICState, BinaryOp)                         \
+  TFS(BitwiseOr, BUILTIN, kNoExtraICState, BinaryOp)                          \
+  TFS(BitwiseXor, BUILTIN, kNoExtraICState, BinaryOp)                         \
+  TFS(ShiftLeft, BUILTIN, kNoExtraICState, BinaryOp)                          \
+  TFS(ShiftRight, BUILTIN, kNoExtraICState, BinaryOp)                         \
+  TFS(ShiftRightLogical, BUILTIN, kNoExtraICState, BinaryOp)                  \
+  TFS(LessThan, BUILTIN, kNoExtraICState, Compare)                            \
+  TFS(LessThanOrEqual, BUILTIN, kNoExtraICState, Compare)                     \
+  TFS(GreaterThan, BUILTIN, kNoExtraICState, Compare)                         \
+  TFS(GreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare)                  \
+  TFS(Equal, BUILTIN, kNoExtraICState, Compare)                               \
+  TFS(NotEqual, BUILTIN, kNoExtraICState, Compare)                            \
+  TFS(StrictEqual, BUILTIN, kNoExtraICState, Compare)                         \
+  TFS(StrictNotEqual, BUILTIN, kNoExtraICState, Compare)                      \
                                                                               \
   /* Object */                                                                \
   CPP(ObjectAssign)                                                           \
-  CPP(ObjectCreate)                                                           \
+  TFJ(ObjectCreate, 2)                                                        \
   CPP(ObjectDefineGetter)                                                     \
   CPP(ObjectDefineProperties)                                                 \
   CPP(ObjectDefineProperty)                                                   \
@@ -498,8 +538,9 @@
   CPP(ObjectGetOwnPropertyNames)                                              \
   CPP(ObjectGetOwnPropertySymbols)                                            \
   CPP(ObjectGetPrototypeOf)                                                   \
+  CPP(ObjectSetPrototypeOf)                                                   \
   /* ES6 section 19.1.3.2 Object.prototype.hasOwnProperty */                  \
-  TFJ(ObjectHasOwnProperty, 2)                                                \
+  TFJ(ObjectHasOwnProperty, 1)                                                \
   CPP(ObjectIs)                                                               \
   CPP(ObjectIsExtensible)                                                     \
   CPP(ObjectIsFrozen)                                                         \
@@ -509,11 +550,23 @@
   CPP(ObjectLookupSetter)                                                     \
   CPP(ObjectPreventExtensions)                                                \
   /* ES6 section 19.1.3.6 Object.prototype.toString () */                     \
-  TFJ(ObjectProtoToString, 1)                                                 \
+  TFJ(ObjectProtoToString, 0)                                                 \
   CPP(ObjectPrototypePropertyIsEnumerable)                                    \
+  CPP(ObjectPrototypeGetProto)                                                \
+  CPP(ObjectPrototypeSetProto)                                                \
   CPP(ObjectSeal)                                                             \
   CPP(ObjectValues)                                                           \
                                                                               \
+  TFS(HasProperty, BUILTIN, kNoExtraICState, HasProperty)                     \
+  TFS(InstanceOf, BUILTIN, kNoExtraICState, Compare)                          \
+  TFS(OrdinaryHasInstance, BUILTIN, kNoExtraICState, Compare)                 \
+  TFS(ForInFilter, BUILTIN, kNoExtraICState, ForInFilter)                     \
+                                                                              \
+  /* Promise */                                                               \
+  CPP(CreateResolvingFunctions)                                               \
+  CPP(PromiseResolveClosure)                                                  \
+  CPP(PromiseRejectClosure)                                                   \
+                                                                              \
   /* Proxy */                                                                 \
   CPP(ProxyConstructor)                                                       \
   CPP(ProxyConstructor_ConstructStub)                                         \
@@ -534,24 +587,64 @@
   CPP(ReflectSetPrototypeOf)                                                  \
                                                                               \
   /* RegExp */                                                                \
+  CPP(RegExpCapture1Getter)                                                   \
+  CPP(RegExpCapture2Getter)                                                   \
+  CPP(RegExpCapture3Getter)                                                   \
+  CPP(RegExpCapture4Getter)                                                   \
+  CPP(RegExpCapture5Getter)                                                   \
+  CPP(RegExpCapture6Getter)                                                   \
+  CPP(RegExpCapture7Getter)                                                   \
+  CPP(RegExpCapture8Getter)                                                   \
+  CPP(RegExpCapture9Getter)                                                   \
   CPP(RegExpConstructor)                                                      \
-  TFJ(RegExpPrototypeExec, 2)                                                 \
+  TFJ(RegExpInternalMatch, 2)                                                 \
+  CPP(RegExpInputGetter)                                                      \
+  CPP(RegExpInputSetter)                                                      \
+  CPP(RegExpLastMatchGetter)                                                  \
+  CPP(RegExpLastParenGetter)                                                  \
+  CPP(RegExpLeftContextGetter)                                                \
+  CPP(RegExpPrototypeCompile)                                                 \
+  TFJ(RegExpPrototypeExec, 1)                                                 \
+  TFJ(RegExpPrototypeFlagsGetter, 0)                                          \
+  TFJ(RegExpPrototypeGlobalGetter, 0)                                         \
+  TFJ(RegExpPrototypeIgnoreCaseGetter, 0)                                     \
+  CPP(RegExpPrototypeMatch)                                                   \
+  TFJ(RegExpPrototypeMultilineGetter, 0)                                      \
+  TFJ(RegExpPrototypeReplace, 2)                                              \
+  TFJ(RegExpPrototypeSearch, 1)                                               \
+  CPP(RegExpPrototypeSourceGetter)                                            \
+  CPP(RegExpPrototypeSpeciesGetter)                                           \
+  CPP(RegExpPrototypeSplit)                                                   \
+  TFJ(RegExpPrototypeStickyGetter, 0)                                         \
+  TFJ(RegExpPrototypeTest, 1)                                                 \
+  CPP(RegExpPrototypeToString)                                                \
+  TFJ(RegExpPrototypeUnicodeGetter, 0)                                        \
+  CPP(RegExpRightContextGetter)                                               \
                                                                               \
   /* SharedArrayBuffer */                                                     \
   CPP(SharedArrayBufferPrototypeGetByteLength)                                \
-  TFJ(AtomicsLoad, 3)                                                         \
-  TFJ(AtomicsStore, 4)                                                        \
+  TFJ(AtomicsLoad, 2)                                                         \
+  TFJ(AtomicsStore, 3)                                                        \
                                                                               \
   /* String */                                                                \
   ASM(StringConstructor)                                                      \
   ASM(StringConstructor_ConstructStub)                                        \
   CPP(StringFromCodePoint)                                                    \
   /* ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits ) */             \
-  TFJ(StringFromCharCode, 2)                                                  \
+  TFJ(StringFromCharCode, SharedFunctionInfo::kDontAdaptArgumentsSentinel)    \
   /* ES6 section 21.1.3.1 String.prototype.charAt ( pos ) */                  \
-  TFJ(StringPrototypeCharAt, 2)                                               \
+  TFJ(StringPrototypeCharAt, 1)                                               \
   /* ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos ) */              \
-  TFJ(StringPrototypeCharCodeAt, 2)                                           \
+  TFJ(StringPrototypeCharCodeAt, 1)                                           \
+  /* ES6 section 21.1.3.6 */                                                  \
+  /* String.prototype.endsWith ( searchString [ , endPosition ] ) */          \
+  CPP(StringPrototypeEndsWith)                                                \
+  /* ES6 section 21.1.3.7 */                                                  \
+  /* String.prototype.includes ( searchString [ , position ] ) */             \
+  CPP(StringPrototypeIncludes)                                                \
+  /* ES6 section 21.1.3.8 */                                                  \
+  /* String.prototype.indexOf ( searchString [ , position ] ) */              \
+  CPP(StringPrototypeIndexOf)                                                 \
   /* ES6 section 21.1.3.9 */                                                  \
   /* String.prototype.lastIndexOf ( searchString [ , position ] ) */          \
   CPP(StringPrototypeLastIndexOf)                                             \
@@ -560,40 +653,52 @@
   /* ES6 section 21.1.3.12 String.prototype.normalize ( [form] ) */           \
   CPP(StringPrototypeNormalize)                                               \
   /* ES6 section B.2.3.1 String.prototype.substr ( start, length ) */         \
-  TFJ(StringPrototypeSubstr, 3)                                               \
+  TFJ(StringPrototypeSubstr, 2)                                               \
   /* ES6 section 21.1.3.19 String.prototype.substring ( start, end ) */       \
-  TFJ(StringPrototypeSubstring, 3)                                            \
+  TFJ(StringPrototypeSubstring, 2)                                            \
+  /* ES6 section 21.1.3.20 */                                                 \
+  /* String.prototype.startsWith ( searchString [ , position ] ) */           \
+  CPP(StringPrototypeStartsWith)                                              \
   /* ES6 section 21.1.3.25 String.prototype.toString () */                    \
-  TFJ(StringPrototypeToString, 1)                                             \
+  TFJ(StringPrototypeToString, 0)                                             \
   CPP(StringPrototypeTrim)                                                    \
   CPP(StringPrototypeTrimLeft)                                                \
   CPP(StringPrototypeTrimRight)                                               \
   /* ES6 section 21.1.3.28 String.prototype.valueOf () */                     \
-  TFJ(StringPrototypeValueOf, 1)                                              \
+  TFJ(StringPrototypeValueOf, 0)                                              \
   /* ES6 #sec-string.prototype-@@iterator */                                  \
-  TFJ(StringPrototypeIterator, 1)                                             \
+  TFJ(StringPrototypeIterator, 0)                                             \
                                                                               \
   /* StringIterator */                                                        \
-  TFJ(StringIteratorPrototypeNext, 1)                                         \
+  TFJ(StringIteratorPrototypeNext, 0)                                         \
                                                                               \
   /* Symbol */                                                                \
   CPP(SymbolConstructor)                                                      \
   CPP(SymbolConstructor_ConstructStub)                                        \
   /* ES6 section 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint ) */      \
-  TFJ(SymbolPrototypeToPrimitive, 2)                                          \
+  TFJ(SymbolPrototypeToPrimitive, 1)                                          \
   /* ES6 section 19.4.3.2 Symbol.prototype.toString ( ) */                    \
-  TFJ(SymbolPrototypeToString, 1)                                             \
+  TFJ(SymbolPrototypeToString, 0)                                             \
   /* ES6 section 19.4.3.3 Symbol.prototype.valueOf ( ) */                     \
-  TFJ(SymbolPrototypeValueOf, 1)                                              \
+  TFJ(SymbolPrototypeValueOf, 0)                                              \
                                                                               \
   /* TypedArray */                                                            \
   CPP(TypedArrayPrototypeBuffer)                                              \
   /* ES6 section 22.2.3.2 get %TypedArray%.prototype.byteLength */            \
-  TFJ(TypedArrayPrototypeByteLength, 1)                                       \
+  TFJ(TypedArrayPrototypeByteLength, 0)                                       \
   /* ES6 section 22.2.3.3 get %TypedArray%.prototype.byteOffset */            \
-  TFJ(TypedArrayPrototypeByteOffset, 1)                                       \
+  TFJ(TypedArrayPrototypeByteOffset, 0)                                       \
   /* ES6 section 22.2.3.18 get %TypedArray%.prototype.length */               \
-  TFJ(TypedArrayPrototypeLength, 1)
+  TFJ(TypedArrayPrototypeLength, 0)                                           \
+  /* ES6 #sec-%typedarray%.prototype.entries */                               \
+  TFJ(TypedArrayPrototypeEntries, 0)                                          \
+  /* ES6 #sec-%typedarray%.prototype.keys */                                  \
+  TFJ(TypedArrayPrototypeKeys, 0)                                             \
+  /* ES6 #sec-%typedarray%.prototype.values */                                \
+  TFJ(TypedArrayPrototypeValues, 0)                                           \
+                                                                              \
+  CPP(ModuleNamespaceIterator)                                                \
+  CPP(FixedArrayIteratorNext)
 
 #define IGNORE_BUILTIN(...)
 
@@ -637,7 +742,8 @@
         builtin_count
   };
 
-#define DECLARE_BUILTIN_ACCESSOR(Name, ...) Handle<Code> Name();
+#define DECLARE_BUILTIN_ACCESSOR(Name, ...) \
+  V8_EXPORT_PRIVATE Handle<Code> Name();
   BUILTIN_LIST_ALL(DECLARE_BUILTIN_ACCESSOR)
 #undef DECLARE_BUILTIN_ACCESSOR
 
diff --git a/src/builtins/ia32/builtins-ia32.cc b/src/builtins/ia32/builtins-ia32.cc
index 9dd621f..4287333 100644
--- a/src/builtins/ia32/builtins-ia32.cc
+++ b/src/builtins/ia32/builtins-ia32.cc
@@ -110,15 +110,15 @@
   GenerateTailCallToSharedCode(masm);
 }
 
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
-                                           bool is_api_function,
-                                           bool create_implicit_receiver,
-                                           bool check_derived_construct) {
+namespace {
+
+void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
+                                    bool create_implicit_receiver,
+                                    bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- eax: number of arguments
   //  -- esi: context
   //  -- edi: constructor function
-  //  -- ebx: allocation site or undefined
   //  -- edx: new target
   // -----------------------------------
 
@@ -127,10 +127,8 @@
     FrameScope scope(masm, StackFrame::CONSTRUCT);
 
     // Preserve the incoming parameters on the stack.
-    __ AssertUndefinedOrAllocationSite(ebx);
-    __ push(esi);
-    __ push(ebx);
     __ SmiTag(eax);
+    __ push(esi);
     __ push(eax);
 
     if (create_implicit_receiver) {
@@ -197,12 +195,12 @@
       Label use_receiver, exit;
 
       // If the result is a smi, it is *not* an object in the ECMA sense.
-      __ JumpIfSmi(eax, &use_receiver);
+      __ JumpIfSmi(eax, &use_receiver, Label::kNear);
 
       // If the type of the result (stored in its map) is less than
       // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
       __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
-      __ j(above_equal, &exit);
+      __ j(above_equal, &exit, Label::kNear);
 
       // Throw away the result of the constructor invocation and use the
       // on-stack receiver as the result.
@@ -244,6 +242,8 @@
   __ ret(0);
 }
 
+}  // namespace
+
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
   Generate_JSConstructStubHelper(masm, false, true, false);
 }
@@ -473,7 +473,7 @@
     __ mov(eax, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
     {
       Label done_loop, loop;
-      __ Move(ecx, Smi::FromInt(0));
+      __ Move(ecx, Smi::kZero);
       __ bind(&loop);
       __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
       __ j(equal, &done_loop, Label::kNear);
@@ -685,31 +685,6 @@
   __ jmp(ecx);
 }
 
-void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
-  // Save the function and context for call to CompileBaseline.
-  __ mov(edi, Operand(ebp, StandardFrameConstants::kFunctionOffset));
-  __ mov(kContextRegister,
-         Operand(ebp, StandardFrameConstants::kContextOffset));
-
-  // Leave the frame before recompiling for baseline so that we don't count as
-  // an activation on the stack.
-  LeaveInterpreterFrame(masm, ebx, ecx);
-
-  {
-    FrameScope frame_scope(masm, StackFrame::INTERNAL);
-    // Push return value.
-    __ push(eax);
-
-    // Push function as argument and compile for baseline.
-    __ push(edi);
-    __ CallRuntime(Runtime::kCompileBaseline);
-
-    // Restore return value.
-    __ pop(eax);
-  }
-  __ ret(0);
-}
-
 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
                                         Register scratch1, Register scratch2,
                                         Label* stack_overflow,
@@ -1005,12 +980,12 @@
   }
 }
 
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
   // Set the return address to the correct point in the interpreter entry
   // trampoline.
   Smi* interpreter_entry_return_pc_offset(
       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
-  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
   __ LoadHeapObject(ebx,
                     masm->isolate()->builtins()->InterpreterEntryTrampoline());
   __ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() +
@@ -1047,6 +1022,31 @@
   __ jmp(ebx);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+  // Advance the current bytecode offset stored within the given interpreter
+  // stack frame. This simulates what all bytecode handlers do upon completion
+  // of the underlying operation.
+  __ mov(ebx, Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ mov(edx, Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(kInterpreterAccumulatorRegister);
+    __ Push(ebx);  // First argument is the bytecode array.
+    __ Push(edx);  // Second argument is the bytecode offset.
+    __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
+    __ Move(edx, eax);  // Result is the new bytecode offset.
+    __ Pop(kInterpreterAccumulatorRegister);
+  }
+  __ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), edx);
+
+  Generate_InterpreterEnterBytecode(masm);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  Generate_InterpreterEnterBytecode(masm);
+}
+
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax : argument count (preserved for callee)
@@ -1055,7 +1055,6 @@
   // -----------------------------------
   // First lookup code, maybe we don't need to compile!
   Label gotta_call_runtime, gotta_call_runtime_no_stack;
-  Label maybe_call_runtime;
   Label try_shared;
   Label loop_top, loop_bottom;
 
@@ -1118,15 +1117,12 @@
   __ mov(entry, FieldOperand(map, index, times_half_pointer_size,
                              SharedFunctionInfo::kOffsetToPreviousCachedCode));
   __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
-  __ JumpIfSmi(entry, &maybe_call_runtime);
+  __ JumpIfSmi(entry, &try_shared);
 
   // Found literals and code. Get them into the closure and return.
   __ pop(closure);
   // Store code entry in the closure.
   __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
-
-  Label install_optimized_code_and_tailcall;
-  __ bind(&install_optimized_code_and_tailcall);
   __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
   __ RecordWriteCodeEntryField(closure, entry, eax);
 
@@ -1160,24 +1156,16 @@
   // We found neither literals nor code.
   __ jmp(&gotta_call_runtime);
 
-  __ bind(&maybe_call_runtime);
-  __ pop(closure);
-
-  // Last possibility. Check the context free optimized code map entry.
-  __ mov(entry, FieldOperand(map, FixedArray::kHeaderSize +
-                                      SharedFunctionInfo::kSharedCodeIndex));
-  __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
-  __ JumpIfSmi(entry, &try_shared);
-
-  // Store code entry in the closure.
-  __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
-  __ jmp(&install_optimized_code_and_tailcall);
-
   __ bind(&try_shared);
+  __ pop(closure);
   __ pop(new_target);
   __ pop(argument_count);
-  // Is the full code valid?
   __ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  // Is the shared function marked for tier up?
+  __ test_b(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
+            Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
+  __ j(not_zero, &gotta_call_runtime_no_stack);
+  // Is the full code valid?
   __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
   __ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
   __ and_(ebx, Code::KindField::kMask);
@@ -1939,7 +1927,7 @@
     __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
     __ jmp(&done, Label::kNear);
     __ bind(&no_arguments);
-    __ Move(ebx, Smi::FromInt(0));
+    __ Move(ebx, Smi::kZero);
     __ bind(&done);
   }
 
@@ -2485,8 +2473,8 @@
         __ Push(edi);
         __ mov(eax, ecx);
         __ Push(esi);
-        ToObjectStub stub(masm->isolate());
-        __ CallStub(&stub);
+        __ Call(masm->isolate()->builtins()->ToObject(),
+                RelocInfo::CODE_TARGET);
         __ Pop(esi);
         __ mov(ecx, eax);
         __ Pop(edi);
@@ -2837,7 +2825,7 @@
   __ PopReturnAddressTo(ecx);
   __ Push(edx);
   __ PushReturnAddressFrom(ecx);
-  __ Move(esi, Smi::FromInt(0));
+  __ Move(esi, Smi::kZero);
   __ TailCallRuntime(Runtime::kAllocateInNewSpace);
 }
 
@@ -2852,7 +2840,7 @@
   __ Push(edx);
   __ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
   __ PushReturnAddressFrom(ecx);
-  __ Move(esi, Smi::FromInt(0));
+  __ Move(esi, Smi::kZero);
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
@@ -2865,7 +2853,7 @@
   __ PopReturnAddressTo(ecx);
   __ Push(edx);
   __ PushReturnAddressFrom(ecx);
-  __ Move(esi, Smi::FromInt(0));
+  __ Move(esi, Smi::kZero);
   __ TailCallRuntime(Runtime::kAbort);
 }
 
diff --git a/src/builtins/mips/builtins-mips.cc b/src/builtins/mips/builtins-mips.cc
index a2b6bea..b9c4a72 100644
--- a/src/builtins/mips/builtins-mips.cc
+++ b/src/builtins/mips/builtins-mips.cc
@@ -266,7 +266,7 @@
 
   // 2b. No arguments, return +0.
   __ bind(&no_arguments);
-  __ Move(v0, Smi::FromInt(0));
+  __ Move(v0, Smi::kZero);
   __ DropAndRet(1);
 }
 
@@ -295,7 +295,7 @@
     __ lw(a0, MemOperand(at));
     __ jmp(&done);
     __ bind(&no_arguments);
-    __ Move(a0, Smi::FromInt(0));
+    __ Move(a0, Smi::kZero);
     __ bind(&done);
   }
 
@@ -548,14 +548,14 @@
   GenerateTailCallToSharedCode(masm);
 }
 
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
-                                           bool is_api_function,
-                                           bool create_implicit_receiver,
-                                           bool check_derived_construct) {
+namespace {
+
+void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
+                                    bool create_implicit_receiver,
+                                    bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- a0     : number of arguments
   //  -- a1     : constructor function
-  //  -- a2     : allocation site or undefined
   //  -- a3     : new target
   //  -- cp     : context
   //  -- ra     : return address
@@ -569,9 +569,8 @@
     FrameScope scope(masm, StackFrame::CONSTRUCT);
 
     // Preserve the incoming parameters on the stack.
-    __ AssertUndefinedOrAllocationSite(a2, t0);
     __ SmiTag(a0);
-    __ Push(cp, a2, a0);
+    __ Push(cp, a0);
 
     if (create_implicit_receiver) {
       // Allocate the new receiver object.
@@ -698,6 +697,8 @@
   __ Ret();
 }
 
+}  // namespace
+
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
   Generate_JSConstructStubHelper(masm, false, true, false);
 }
@@ -1144,31 +1145,6 @@
   __ Jump(t0);
 }
 
-void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
-  // Save the function and context for call to CompileBaseline.
-  __ lw(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
-  __ lw(kContextRegister,
-        MemOperand(fp, StandardFrameConstants::kContextOffset));
-
-  // Leave the frame before recompiling for baseline so that we don't count as
-  // an activation on the stack.
-  LeaveInterpreterFrame(masm, t0);
-
-  {
-    FrameScope frame_scope(masm, StackFrame::INTERNAL);
-    // Push return value.
-    __ push(v0);
-
-    // Push function as argument and compile for baseline.
-    __ push(a1);
-    __ CallRuntime(Runtime::kCompileBaseline);
-
-    // Restore return value.
-    __ pop(v0);
-  }
-  __ Jump(ra);
-}
-
 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
                                         Register scratch1, Register scratch2,
                                         Label* stack_overflow) {
@@ -1320,12 +1296,12 @@
   }
 }
 
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
   // Set the return address to the correct point in the interpreter entry
   // trampoline.
   Smi* interpreter_entry_return_pc_offset(
       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
-  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
   __ li(t0, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
   __ Addu(ra, t0, Operand(interpreter_entry_return_pc_offset->value() +
                           Code::kHeaderSize - kHeapObjectTag));
@@ -1363,6 +1339,29 @@
   __ Jump(a1);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+  // Advance the current bytecode offset stored within the given interpreter
+  // stack frame. This simulates what all bytecode handlers do upon completion
+  // of the underlying operation.
+  __ lw(a1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ lw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(kInterpreterAccumulatorRegister, a1, a2);
+    __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
+    __ mov(a2, v0);  // Result is the new bytecode offset.
+    __ Pop(kInterpreterAccumulatorRegister);
+  }
+  __ sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+  Generate_InterpreterEnterBytecode(masm);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  Generate_InterpreterEnterBytecode(masm);
+}
+
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0 : argument count (preserved for callee)
@@ -1371,7 +1370,6 @@
   // -----------------------------------
   // First lookup code, maybe we don't need to compile!
   Label gotta_call_runtime, gotta_call_runtime_no_stack;
-  Label maybe_call_runtime;
   Label try_shared;
   Label loop_top, loop_bottom;
 
@@ -1435,15 +1433,12 @@
         FieldMemOperand(array_pointer,
                         SharedFunctionInfo::kOffsetToPreviousCachedCode));
   __ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
-  __ JumpIfSmi(entry, &maybe_call_runtime);
+  __ JumpIfSmi(entry, &try_shared);
 
   // Found literals and code. Get them into the closure and return.
   __ pop(closure);
   // Store code entry in the closure.
   __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-  Label install_optimized_code_and_tailcall;
-  __ bind(&install_optimized_code_and_tailcall);
   __ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
   __ RecordWriteCodeEntryField(closure, entry, t1);
 
@@ -1478,24 +1473,18 @@
   // We found neither literals nor code.
   __ jmp(&gotta_call_runtime);
 
-  __ bind(&maybe_call_runtime);
-  __ pop(closure);
-
-  // Last possibility. Check the context free optimized code map entry.
-  __ lw(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
-                                        SharedFunctionInfo::kSharedCodeIndex));
-  __ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
-  __ JumpIfSmi(entry, &try_shared);
-
-  // Store code entry in the closure.
-  __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ jmp(&install_optimized_code_and_tailcall);
-
   __ bind(&try_shared);
+  __ pop(closure);
   __ pop(new_target);
   __ pop(argument_count);
-  // Is the full code valid?
   __ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  // Is the shared function marked for tier up?
+  __ lbu(t1, FieldMemOperand(entry,
+                             SharedFunctionInfo::kMarkedForTierUpByteOffset));
+  __ And(t1, t1,
+         Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
+  __ Branch(&gotta_call_runtime_no_stack, ne, t1, Operand(zero_reg));
+  // Is the full code valid?
   __ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
   __ lw(t1, FieldMemOperand(entry, Code::kFlagsOffset));
   __ And(t1, t1, Operand(Code::KindField::kMask));
@@ -1858,7 +1847,7 @@
   }
 
   // If the code object is null, just return to the caller.
-  __ Ret(eq, v0, Operand(Smi::FromInt(0)));
+  __ Ret(eq, v0, Operand(Smi::kZero));
 
   // Drop any potential handler frame that is be sitting on top of the actual
   // JavaScript frame. This is the case then OSR is triggered from bytecode.
@@ -2455,8 +2444,8 @@
         __ Push(a0, a1);
         __ mov(a0, a3);
         __ Push(cp);
-        ToObjectStub stub(masm->isolate());
-        __ CallStub(&stub);
+        __ Call(masm->isolate()->builtins()->ToObject(),
+                RelocInfo::CODE_TARGET);
         __ Pop(cp);
         __ mov(a3, v0);
         __ Pop(a0, a1);
@@ -2840,7 +2829,7 @@
   // -----------------------------------
   __ SmiTag(a0);
   __ Push(a0);
-  __ Move(cp, Smi::FromInt(0));
+  __ Move(cp, Smi::kZero);
   __ TailCallRuntime(Runtime::kAllocateInNewSpace);
 }
 
@@ -2853,7 +2842,7 @@
   __ SmiTag(a0);
   __ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
   __ Push(a0, a1);
-  __ Move(cp, Smi::FromInt(0));
+  __ Move(cp, Smi::kZero);
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
@@ -2864,7 +2853,7 @@
   //  -- ra : return address
   // -----------------------------------
   __ Push(a0);
-  __ Move(cp, Smi::FromInt(0));
+  __ Move(cp, Smi::kZero);
   __ TailCallRuntime(Runtime::kAbort);
 }
 
diff --git a/src/builtins/mips64/builtins-mips64.cc b/src/builtins/mips64/builtins-mips64.cc
index f7225f0..a6abb55 100644
--- a/src/builtins/mips64/builtins-mips64.cc
+++ b/src/builtins/mips64/builtins-mips64.cc
@@ -264,7 +264,7 @@
 
   // 2b. No arguments, return +0.
   __ bind(&no_arguments);
-  __ Move(v0, Smi::FromInt(0));
+  __ Move(v0, Smi::kZero);
   __ DropAndRet(1);
 }
 
@@ -293,7 +293,7 @@
     __ ld(a0, MemOperand(at));
     __ jmp(&done);
     __ bind(&no_arguments);
-    __ Move(a0, Smi::FromInt(0));
+    __ Move(a0, Smi::kZero);
     __ bind(&done);
   }
 
@@ -546,14 +546,14 @@
   GenerateTailCallToSharedCode(masm);
 }
 
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
-                                           bool is_api_function,
-                                           bool create_implicit_receiver,
-                                           bool check_derived_construct) {
+namespace {
+
+void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
+                                    bool create_implicit_receiver,
+                                    bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- a0     : number of arguments
   //  -- a1     : constructor function
-  //  -- a2     : allocation site or undefined
   //  -- a3     : new target
   //  -- cp     : context
   //  -- ra     : return address
@@ -567,9 +567,8 @@
     FrameScope scope(masm, StackFrame::CONSTRUCT);
 
     // Preserve the incoming parameters on the stack.
-    __ AssertUndefinedOrAllocationSite(a2, t0);
     __ SmiTag(a0);
-    __ Push(cp, a2, a0);
+    __ Push(cp, a0);
 
     if (create_implicit_receiver) {
       __ Push(a1, a3);
@@ -693,6 +692,8 @@
   __ Ret();
 }
 
+}  // namespace
+
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
   Generate_JSConstructStubHelper(masm, false, true, false);
 }
@@ -1136,31 +1137,6 @@
   __ Jump(a4);
 }
 
-void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
-  // Save the function and context for call to CompileBaseline.
-  __ ld(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
-  __ ld(kContextRegister,
-        MemOperand(fp, StandardFrameConstants::kContextOffset));
-
-  // Leave the frame before recompiling for baseline so that we don't count as
-  // an activation on the stack.
-  LeaveInterpreterFrame(masm, t0);
-
-  {
-    FrameScope frame_scope(masm, StackFrame::INTERNAL);
-    // Push return value.
-    __ push(v0);
-
-    // Push function as argument and compile for baseline.
-    __ push(a1);
-    __ CallRuntime(Runtime::kCompileBaseline);
-
-    // Restore return value.
-    __ pop(v0);
-  }
-  __ Jump(ra);
-}
-
 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
                                         Register scratch1, Register scratch2,
                                         Label* stack_overflow) {
@@ -1312,12 +1288,12 @@
   }
 }
 
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
   // Set the return address to the correct point in the interpreter entry
   // trampoline.
   Smi* interpreter_entry_return_pc_offset(
       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
-  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
   __ li(t0, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
   __ Daddu(ra, t0, Operand(interpreter_entry_return_pc_offset->value() +
                            Code::kHeaderSize - kHeapObjectTag));
@@ -1355,6 +1331,29 @@
   __ Jump(a1);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+  // Advance the current bytecode offset stored within the given interpreter
+  // stack frame. This simulates what all bytecode handlers do upon completion
+  // of the underlying operation.
+  __ ld(a1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ ld(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+  __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(kInterpreterAccumulatorRegister, a1, a2);
+    __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
+    __ mov(a2, v0);  // Result is the new bytecode offset.
+    __ Pop(kInterpreterAccumulatorRegister);
+  }
+  __ sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+  Generate_InterpreterEnterBytecode(masm);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  Generate_InterpreterEnterBytecode(masm);
+}
+
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0 : argument count (preserved for callee)
@@ -1363,7 +1362,6 @@
   // -----------------------------------
   // First lookup code, maybe we don't need to compile!
   Label gotta_call_runtime, gotta_call_runtime_no_stack;
-  Label maybe_call_runtime;
   Label try_shared;
   Label loop_top, loop_bottom;
 
@@ -1427,15 +1425,12 @@
         FieldMemOperand(array_pointer,
                         SharedFunctionInfo::kOffsetToPreviousCachedCode));
   __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
-  __ JumpIfSmi(entry, &maybe_call_runtime);
+  __ JumpIfSmi(entry, &try_shared);
 
   // Found literals and code. Get them into the closure and return.
   __ pop(closure);
   // Store code entry in the closure.
   __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-  Label install_optimized_code_and_tailcall;
-  __ bind(&install_optimized_code_and_tailcall);
   __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
   __ RecordWriteCodeEntryField(closure, entry, a5);
 
@@ -1470,24 +1465,18 @@
   // We found neither literals nor code.
   __ jmp(&gotta_call_runtime);
 
-  __ bind(&maybe_call_runtime);
-  __ pop(closure);
-
-  // Last possibility. Check the context free optimized code map entry.
-  __ ld(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
-                                        SharedFunctionInfo::kSharedCodeIndex));
-  __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
-  __ JumpIfSmi(entry, &try_shared);
-
-  // Store code entry in the closure.
-  __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ jmp(&install_optimized_code_and_tailcall);
-
   __ bind(&try_shared);
+  __ pop(closure);
   __ pop(new_target);
   __ pop(argument_count);
-  // Is the full code valid?
   __ ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  // Is the shared function marked for tier up?
+  __ lbu(a5, FieldMemOperand(entry,
+                             SharedFunctionInfo::kMarkedForTierUpByteOffset));
+  __ And(a5, a5,
+         Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
+  __ Branch(&gotta_call_runtime_no_stack, ne, a5, Operand(zero_reg));
+  // Is the full code valid?
   __ ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
   __ lw(a5, FieldMemOperand(entry, Code::kFlagsOffset));
   __ And(a5, a5, Operand(Code::KindField::kMask));
@@ -1852,7 +1841,7 @@
   }
 
   // If the code object is null, just return to the caller.
-  __ Ret(eq, v0, Operand(Smi::FromInt(0)));
+  __ Ret(eq, v0, Operand(Smi::kZero));
 
   // Drop any potential handler frame that is be sitting on top of the actual
   // JavaScript frame. This is the case then OSR is triggered from bytecode.
@@ -2450,8 +2439,8 @@
         __ Push(a0, a1);
         __ mov(a0, a3);
         __ Push(cp);
-        ToObjectStub stub(masm->isolate());
-        __ CallStub(&stub);
+        __ Call(masm->isolate()->builtins()->ToObject(),
+                RelocInfo::CODE_TARGET);
         __ Pop(cp);
         __ mov(a3, v0);
         __ Pop(a0, a1);
@@ -2833,7 +2822,7 @@
   // -----------------------------------
   __ SmiTag(a0);
   __ Push(a0);
-  __ Move(cp, Smi::FromInt(0));
+  __ Move(cp, Smi::kZero);
   __ TailCallRuntime(Runtime::kAllocateInNewSpace);
 }
 
@@ -2846,7 +2835,7 @@
   __ SmiTag(a0);
   __ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
   __ Push(a0, a1);
-  __ Move(cp, Smi::FromInt(0));
+  __ Move(cp, Smi::kZero);
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
@@ -2857,7 +2846,7 @@
   //  -- ra : return address
   // -----------------------------------
   __ Push(a0);
-  __ Move(cp, Smi::FromInt(0));
+  __ Move(cp, Smi::kZero);
   __ TailCallRuntime(Runtime::kAbort);
 }
 
diff --git a/src/builtins/ppc/builtins-ppc.cc b/src/builtins/ppc/builtins-ppc.cc
index 7e2b82c..be1e67c 100644
--- a/src/builtins/ppc/builtins-ppc.cc
+++ b/src/builtins/ppc/builtins-ppc.cc
@@ -267,7 +267,7 @@
 
   // 2b. No arguments, return +0.
   __ bind(&no_arguments);
-  __ LoadSmiLiteral(r3, Smi::FromInt(0));
+  __ LoadSmiLiteral(r3, Smi::kZero);
   __ Ret(1);
 }
 
@@ -297,7 +297,7 @@
     __ LoadPX(r5, MemOperand(sp, r5));
     __ b(&done);
     __ bind(&no_arguments);
-    __ LoadSmiLiteral(r5, Smi::FromInt(0));
+    __ LoadSmiLiteral(r5, Smi::kZero);
     __ bind(&done);
   }
 
@@ -555,14 +555,14 @@
   GenerateTailCallToSharedCode(masm);
 }
 
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
-                                           bool is_api_function,
-                                           bool create_implicit_receiver,
-                                           bool check_derived_construct) {
+namespace {
+
+void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
+                                    bool create_implicit_receiver,
+                                    bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- r3     : number of arguments
   //  -- r4     : constructor function
-  //  -- r5     : allocation site or undefined
   //  -- r6     : new target
   //  -- cp     : context
   //  -- lr     : return address
@@ -576,15 +576,14 @@
     FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
 
     // Preserve the incoming parameters on the stack.
-    __ AssertUndefinedOrAllocationSite(r5, r7);
 
     if (!create_implicit_receiver) {
       __ SmiTag(r7, r3, SetRC);
-      __ Push(cp, r5, r7);
+      __ Push(cp, r7);
       __ PushRoot(Heap::kTheHoleValueRootIndex);
     } else {
       __ SmiTag(r3);
-      __ Push(cp, r5, r3);
+      __ Push(cp, r3);
 
       // Allocate the new receiver object.
       __ Push(r4, r6);
@@ -711,6 +710,8 @@
   __ blr();
 }
 
+}  // namespace
+
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
   Generate_JSConstructStubHelper(masm, false, true, false);
 }
@@ -1172,31 +1173,6 @@
   __ JumpToJSEntry(r7);
 }
 
-void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
-  // Save the function and context for call to CompileBaseline.
-  __ LoadP(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
-  __ LoadP(kContextRegister,
-           MemOperand(fp, StandardFrameConstants::kContextOffset));
-
-  // Leave the frame before recompiling for baseline so that we don't count as
-  // an activation on the stack.
-  LeaveInterpreterFrame(masm, r5);
-
-  {
-    FrameScope frame_scope(masm, StackFrame::INTERNAL);
-    // Push return value.
-    __ push(r3);
-
-    // Push function as argument and compile for baseline.
-    __ push(r4);
-    __ CallRuntime(Runtime::kCompileBaseline);
-
-    // Restore return value.
-    __ pop(r3);
-  }
-  __ blr();
-}
-
 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
                                         Register scratch,
                                         Label* stack_overflow) {
@@ -1348,12 +1324,12 @@
   }
 }
 
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
   // Set the return address to the correct point in the interpreter entry
   // trampoline.
   Smi* interpreter_entry_return_pc_offset(
       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
-  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
   __ Move(r5, masm->isolate()->builtins()->InterpreterEntryTrampoline());
   __ addi(r0, r5, Operand(interpreter_entry_return_pc_offset->value() +
                           Code::kHeaderSize - kHeapObjectTag));
@@ -1390,6 +1366,31 @@
   __ Jump(ip);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+  // Advance the current bytecode offset stored within the given interpreter
+  // stack frame. This simulates what all bytecode handlers do upon completion
+  // of the underlying operation.
+  __ LoadP(r4, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ LoadP(r5,
+           MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(kInterpreterAccumulatorRegister, r4, r5);
+    __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
+    __ Move(r5, r3);  // Result is the new bytecode offset.
+    __ Pop(kInterpreterAccumulatorRegister);
+  }
+  __ StoreP(r5,
+            MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+  Generate_InterpreterEnterBytecode(masm);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  Generate_InterpreterEnterBytecode(masm);
+}
+
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r3 : argument count (preserved for callee)
@@ -1398,7 +1399,6 @@
   // -----------------------------------
   // First lookup code, maybe we don't need to compile!
   Label gotta_call_runtime;
-  Label maybe_call_runtime;
   Label try_shared;
   Label loop_top, loop_bottom;
 
@@ -1460,14 +1460,11 @@
            FieldMemOperand(array_pointer,
                            SharedFunctionInfo::kOffsetToPreviousCachedCode));
   __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
-  __ JumpIfSmi(entry, &maybe_call_runtime);
+  __ JumpIfSmi(entry, &try_shared);
 
   // Found literals and code. Get them into the closure and return.
   // Store code entry in the closure.
   __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-  Label install_optimized_code_and_tailcall;
-  __ bind(&install_optimized_code_and_tailcall);
   __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
   __ RecordWriteCodeEntryField(closure, entry, r8);
 
@@ -1502,23 +1499,15 @@
   // We found neither literals nor code.
   __ b(&gotta_call_runtime);
 
-  __ bind(&maybe_call_runtime);
-
-  // Last possibility. Check the context free optimized code map entry.
-  __ LoadP(entry,
-           FieldMemOperand(map, FixedArray::kHeaderSize +
-                                    SharedFunctionInfo::kSharedCodeIndex));
-  __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
-  __ JumpIfSmi(entry, &try_shared);
-
-  // Store code entry in the closure.
-  __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ b(&install_optimized_code_and_tailcall);
-
   __ bind(&try_shared);
-  // Is the full code valid?
   __ LoadP(entry,
            FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  // Is the shared function marked for tier up?
+  __ lbz(r8, FieldMemOperand(entry,
+                             SharedFunctionInfo::kMarkedForTierUpByteOffset));
+  __ TestBit(r8, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
+  __ bne(&gotta_call_runtime, cr0);
+  // Is the full code valid?
   __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
   __ lwz(r8, FieldMemOperand(entry, Code::kFlagsOffset));
   __ DecodeField<Code::KindField>(r8);
@@ -1888,7 +1877,7 @@
 
   // If the code object is null, just return to the caller.
   Label skip;
-  __ CmpSmiLiteral(r3, Smi::FromInt(0), r0);
+  __ CmpSmiLiteral(r3, Smi::kZero, r0);
   __ bne(&skip);
   __ Ret();
 
@@ -2507,8 +2496,8 @@
         __ Push(r3, r4);
         __ mr(r3, r6);
         __ Push(cp);
-        ToObjectStub stub(masm->isolate());
-        __ CallStub(&stub);
+        __ Call(masm->isolate()->builtins()->ToObject(),
+                RelocInfo::CODE_TARGET);
         __ Pop(cp);
         __ mr(r6, r3);
         __ Pop(r3, r4);
@@ -2854,7 +2843,7 @@
   // -----------------------------------
   __ SmiTag(r4);
   __ Push(r4);
-  __ LoadSmiLiteral(cp, Smi::FromInt(0));
+  __ LoadSmiLiteral(cp, Smi::kZero);
   __ TailCallRuntime(Runtime::kAllocateInNewSpace);
 }
 
@@ -2867,7 +2856,7 @@
   __ SmiTag(r4);
   __ LoadSmiLiteral(r5, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
   __ Push(r4, r5);
-  __ LoadSmiLiteral(cp, Smi::FromInt(0));
+  __ LoadSmiLiteral(cp, Smi::kZero);
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
@@ -2878,7 +2867,7 @@
   //  -- lr : return address
   // -----------------------------------
   __ push(r4);
-  __ LoadSmiLiteral(cp, Smi::FromInt(0));
+  __ LoadSmiLiteral(cp, Smi::kZero);
   __ TailCallRuntime(Runtime::kAbort);
 }
 
diff --git a/src/builtins/s390/builtins-s390.cc b/src/builtins/s390/builtins-s390.cc
index 91ae2c0..8655ab8 100644
--- a/src/builtins/s390/builtins-s390.cc
+++ b/src/builtins/s390/builtins-s390.cc
@@ -267,7 +267,7 @@
 
   // 2b. No arguments, return +0.
   __ bind(&no_arguments);
-  __ LoadSmiLiteral(r2, Smi::FromInt(0));
+  __ LoadSmiLiteral(r2, Smi::kZero);
   __ Ret(1);
 }
 
@@ -296,7 +296,7 @@
     __ LoadP(r4, MemOperand(sp, r4));
     __ b(&done);
     __ bind(&no_arguments);
-    __ LoadSmiLiteral(r4, Smi::FromInt(0));
+    __ LoadSmiLiteral(r4, Smi::kZero);
     __ bind(&done);
   }
 
@@ -551,14 +551,14 @@
   GenerateTailCallToSharedCode(masm);
 }
 
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
-                                           bool is_api_function,
-                                           bool create_implicit_receiver,
-                                           bool check_derived_construct) {
+namespace {
+
+void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
+                                    bool create_implicit_receiver,
+                                    bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- r2     : number of arguments
   //  -- r3     : constructor function
-  //  -- r4     : allocation site or undefined
   //  -- r5     : new target
   //  -- cp     : context
   //  -- lr     : return address
@@ -572,16 +572,15 @@
     FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
 
     // Preserve the incoming parameters on the stack.
-    __ AssertUndefinedOrAllocationSite(r4, r6);
 
     if (!create_implicit_receiver) {
       __ SmiTag(r6, r2);
       __ LoadAndTestP(r6, r6);
-      __ Push(cp, r4, r6);
+      __ Push(cp, r6);
       __ PushRoot(Heap::kTheHoleValueRootIndex);
     } else {
       __ SmiTag(r2);
-      __ Push(cp, r4, r2);
+      __ Push(cp, r2);
 
       // Allocate the new receiver object.
       __ Push(r3, r5);
@@ -710,6 +709,8 @@
   __ Ret();
 }
 
+}  // namespace
+
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
   Generate_JSConstructStubHelper(masm, false, true, false);
 }
@@ -1175,31 +1176,6 @@
   __ JumpToJSEntry(r6);
 }
 
-void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
-  // Save the function and context for call to CompileBaseline.
-  __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
-  __ LoadP(kContextRegister,
-           MemOperand(fp, StandardFrameConstants::kContextOffset));
-
-  // Leave the frame before recompiling for baseline so that we don't count as
-  // an activation on the stack.
-  LeaveInterpreterFrame(masm, r4);
-
-  {
-    FrameScope frame_scope(masm, StackFrame::INTERNAL);
-    // Push return value.
-    __ push(r2);
-
-    // Push function as argument and compile for baseline.
-    __ push(r3);
-    __ CallRuntime(Runtime::kCompileBaseline);
-
-    // Restore return value.
-    __ pop(r2);
-  }
-  __ Ret();
-}
-
 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
                                         Register scratch,
                                         Label* stack_overflow) {
@@ -1352,12 +1328,12 @@
   }
 }
 
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
   // Set the return address to the correct point in the interpreter entry
   // trampoline.
   Smi* interpreter_entry_return_pc_offset(
       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
-  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
   __ Move(r4, masm->isolate()->builtins()->InterpreterEntryTrampoline());
   __ AddP(r14, r4, Operand(interpreter_entry_return_pc_offset->value() +
                            Code::kHeaderSize - kHeapObjectTag));
@@ -1393,6 +1369,31 @@
   __ Jump(ip);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+  // Advance the current bytecode offset stored within the given interpreter
+  // stack frame. This simulates what all bytecode handlers do upon completion
+  // of the underlying operation.
+  __ LoadP(r3, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ LoadP(r4,
+           MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(kInterpreterAccumulatorRegister, r3, r4);
+    __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
+    __ Move(r4, r2);  // Result is the new bytecode offset.
+    __ Pop(kInterpreterAccumulatorRegister);
+  }
+  __ StoreP(r4,
+            MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+  Generate_InterpreterEnterBytecode(masm);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  Generate_InterpreterEnterBytecode(masm);
+}
+
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r2 : argument count (preserved for callee)
@@ -1401,7 +1402,6 @@
   // -----------------------------------
   // First lookup code, maybe we don't need to compile!
   Label gotta_call_runtime;
-  Label maybe_call_runtime;
   Label try_shared;
   Label loop_top, loop_bottom;
 
@@ -1463,14 +1463,11 @@
            FieldMemOperand(array_pointer,
                            SharedFunctionInfo::kOffsetToPreviousCachedCode));
   __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
-  __ JumpIfSmi(entry, &maybe_call_runtime);
+  __ JumpIfSmi(entry, &try_shared);
 
   // Found literals and code. Get them into the closure and return.
   // Store code entry in the closure.
   __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-  Label install_optimized_code_and_tailcall;
-  __ bind(&install_optimized_code_and_tailcall);
   __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
   __ RecordWriteCodeEntryField(closure, entry, r7);
 
@@ -1505,23 +1502,15 @@
   // We found neither literals nor code.
   __ b(&gotta_call_runtime);
 
-  __ bind(&maybe_call_runtime);
-
-  // Last possibility. Check the context free optimized code map entry.
-  __ LoadP(entry,
-           FieldMemOperand(map, FixedArray::kHeaderSize +
-                                    SharedFunctionInfo::kSharedCodeIndex));
-  __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
-  __ JumpIfSmi(entry, &try_shared);
-
-  // Store code entry in the closure.
-  __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ b(&install_optimized_code_and_tailcall);
-
   __ bind(&try_shared);
-  // Is the full code valid?
   __ LoadP(entry,
            FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  // Is the shared function marked for tier up?
+  __ LoadlB(temp, FieldMemOperand(
+                      entry, SharedFunctionInfo::kMarkedForTierUpByteOffset));
+  __ TestBit(temp, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
+  __ bne(&gotta_call_runtime);
+  // Is the full code valid?
   __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
   __ LoadlW(r7, FieldMemOperand(entry, Code::kFlagsOffset));
   __ DecodeField<Code::KindField>(r7);
@@ -1895,7 +1884,7 @@
 
   // If the code object is null, just return to the caller.
   Label skip;
-  __ CmpSmiLiteral(r2, Smi::FromInt(0), r0);
+  __ CmpSmiLiteral(r2, Smi::kZero, r0);
   __ bne(&skip);
   __ Ret();
 
@@ -2516,8 +2505,8 @@
         __ Push(r2, r3);
         __ LoadRR(r2, r5);
         __ Push(cp);
-        ToObjectStub stub(masm->isolate());
-        __ CallStub(&stub);
+        __ Call(masm->isolate()->builtins()->ToObject(),
+                RelocInfo::CODE_TARGET);
         __ Pop(cp);
         __ LoadRR(r5, r2);
         __ Pop(r2, r3);
@@ -2865,7 +2854,7 @@
   // -----------------------------------
   __ SmiTag(r3);
   __ Push(r3);
-  __ LoadSmiLiteral(cp, Smi::FromInt(0));
+  __ LoadSmiLiteral(cp, Smi::kZero);
   __ TailCallRuntime(Runtime::kAllocateInNewSpace);
 }
 
@@ -2878,7 +2867,7 @@
   __ SmiTag(r3);
   __ LoadSmiLiteral(r4, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
   __ Push(r3, r4);
-  __ LoadSmiLiteral(cp, Smi::FromInt(0));
+  __ LoadSmiLiteral(cp, Smi::kZero);
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
@@ -2889,7 +2878,7 @@
   //  -- lr : return address
   // -----------------------------------
   __ push(r3);
-  __ LoadSmiLiteral(cp, Smi::FromInt(0));
+  __ LoadSmiLiteral(cp, Smi::kZero);
   __ TailCallRuntime(Runtime::kAbort);
 }
 
diff --git a/src/builtins/x64/builtins-x64.cc b/src/builtins/x64/builtins-x64.cc
index beae2d2..cde0264 100644
--- a/src/builtins/x64/builtins-x64.cc
+++ b/src/builtins/x64/builtins-x64.cc
@@ -112,15 +112,15 @@
   GenerateTailCallToSharedCode(masm);
 }
 
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
-                                           bool is_api_function,
-                                           bool create_implicit_receiver,
-                                           bool check_derived_construct) {
+namespace {
+
+void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
+                                    bool create_implicit_receiver,
+                                    bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- rax: number of arguments
   //  -- rsi: context
   //  -- rdi: constructor function
-  //  -- rbx: allocation site or undefined
   //  -- rdx: new target
   // -----------------------------------
 
@@ -129,10 +129,8 @@
     FrameScope scope(masm, StackFrame::CONSTRUCT);
 
     // Preserve the incoming parameters on the stack.
-    __ AssertUndefinedOrAllocationSite(rbx);
-    __ Push(rsi);
-    __ Push(rbx);
     __ Integer32ToSmi(rcx, rax);
+    __ Push(rsi);
     __ Push(rcx);
 
     if (create_implicit_receiver) {
@@ -197,13 +195,13 @@
       // on page 74.
       Label use_receiver, exit;
       // If the result is a smi, it is *not* an object in the ECMA sense.
-      __ JumpIfSmi(rax, &use_receiver);
+      __ JumpIfSmi(rax, &use_receiver, Label::kNear);
 
       // If the type of the result (stored in its map) is less than
       // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
       STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
       __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
-      __ j(above_equal, &exit);
+      __ j(above_equal, &exit, Label::kNear);
 
       // Throw away the result of the constructor invocation and use the
       // on-stack receiver as the result.
@@ -246,6 +244,8 @@
   __ ret(0);
 }
 
+}  // namespace
+
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
   Generate_JSConstructStubHelper(masm, false, true, false);
 }
@@ -658,7 +658,7 @@
   // it is present) and load it into kInterpreterBytecodeArrayRegister.
   __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
   Label load_debug_bytecode_array, bytecode_array_loaded;
-  DCHECK_EQ(Smi::FromInt(0), DebugInfo::uninitialized());
+  DCHECK_EQ(Smi::kZero, DebugInfo::uninitialized());
   __ cmpp(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
           Immediate(0));
   __ j(not_equal, &load_debug_bytecode_array);
@@ -766,31 +766,6 @@
   __ jmp(rcx);
 }
 
-void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
-  // Save the function and context for call to CompileBaseline.
-  __ movp(rdi, Operand(rbp, StandardFrameConstants::kFunctionOffset));
-  __ movp(kContextRegister,
-          Operand(rbp, StandardFrameConstants::kContextOffset));
-
-  // Leave the frame before recompiling for baseline so that we don't count as
-  // an activation on the stack.
-  LeaveInterpreterFrame(masm, rbx, rcx);
-
-  {
-    FrameScope frame_scope(masm, StackFrame::INTERNAL);
-    // Push return value.
-    __ Push(rax);
-
-    // Push function as argument and compile for baseline.
-    __ Push(rdi);
-    __ CallRuntime(Runtime::kCompileBaseline);
-
-    // Restore return value.
-    __ Pop(rax);
-  }
-  __ ret(0);
-}
-
 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
                                         Register scratch1, Register scratch2,
                                         Label* stack_overflow) {
@@ -981,12 +956,12 @@
   }
 }
 
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
   // Set the return address to the correct point in the interpreter entry
   // trampoline.
   Smi* interpreter_entry_return_pc_offset(
       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
-  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
   __ Move(rbx, masm->isolate()->builtins()->InterpreterEntryTrampoline());
   __ addp(rbx, Immediate(interpreter_entry_return_pc_offset->value() +
                          Code::kHeaderSize - kHeapObjectTag));
@@ -1023,6 +998,31 @@
   __ jmp(rbx);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+  // Advance the current bytecode offset stored within the given interpreter
+  // stack frame. This simulates what all bytecode handlers do upon completion
+  // of the underlying operation.
+  __ movp(rbx, Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ movp(rdx, Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+  __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(kInterpreterAccumulatorRegister);
+    __ Push(rbx);  // First argument is the bytecode array.
+    __ Push(rdx);  // Second argument is the bytecode offset.
+    __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
+    __ Move(rdx, rax);  // Result is the new bytecode offset.
+    __ Pop(kInterpreterAccumulatorRegister);
+  }
+  __ movp(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rdx);
+
+  Generate_InterpreterEnterBytecode(masm);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  Generate_InterpreterEnterBytecode(masm);
+}
+
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax : argument count (preserved for callee)
@@ -1031,7 +1031,6 @@
   // -----------------------------------
   // First lookup code, maybe we don't need to compile!
   Label gotta_call_runtime;
-  Label maybe_call_runtime;
   Label try_shared;
   Label loop_top, loop_bottom;
 
@@ -1085,13 +1084,10 @@
   __ movp(entry, FieldOperand(map, index, times_pointer_size,
                               SharedFunctionInfo::kOffsetToPreviousCachedCode));
   __ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
-  __ JumpIfSmi(entry, &maybe_call_runtime);
+  __ JumpIfSmi(entry, &try_shared);
 
   // Found literals and code. Get them into the closure and return.
   __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
-
-  Label install_optimized_code_and_tailcall;
-  __ bind(&install_optimized_code_and_tailcall);
   __ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
   __ RecordWriteCodeEntryField(closure, entry, r15);
 
@@ -1124,21 +1120,13 @@
   // We found neither literals nor code.
   __ jmp(&gotta_call_runtime);
 
-  __ bind(&maybe_call_runtime);
-
-  // Last possibility. Check the context free optimized code map entry.
-  __ movp(entry, FieldOperand(map, FixedArray::kHeaderSize +
-                                       SharedFunctionInfo::kSharedCodeIndex));
-  __ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
-  __ JumpIfSmi(entry, &try_shared);
-
-  // Store code entry in the closure.
-  __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
-  __ jmp(&install_optimized_code_and_tailcall);
-
   __ bind(&try_shared);
-  // Is the full code valid?
   __ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  // Is the shared function marked for tier up?
+  __ testb(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
+           Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
+  __ j(not_zero, &gotta_call_runtime);
+  // Is the full code valid?
   __ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
   __ movl(rbx, FieldOperand(entry, Code::kFlagsOffset));
   __ andl(rbx, Immediate(Code::KindField::kMask));
@@ -1904,7 +1892,7 @@
     __ movp(rbx, args.GetArgumentOperand(1));
     __ jmp(&done, Label::kNear);
     __ bind(&no_arguments);
-    __ Move(rbx, Smi::FromInt(0));
+    __ Move(rbx, Smi::kZero);
     __ bind(&done);
   }
 
@@ -2157,7 +2145,7 @@
   __ PopReturnAddressTo(rcx);
   __ Push(rdx);
   __ PushReturnAddressFrom(rcx);
-  __ Move(rsi, Smi::FromInt(0));
+  __ Move(rsi, Smi::kZero);
   __ TailCallRuntime(Runtime::kAllocateInNewSpace);
 }
 
@@ -2172,7 +2160,7 @@
   __ Push(rdx);
   __ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
   __ PushReturnAddressFrom(rcx);
-  __ Move(rsi, Smi::FromInt(0));
+  __ Move(rsi, Smi::kZero);
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
@@ -2185,7 +2173,7 @@
   __ PopReturnAddressTo(rcx);
   __ Push(rdx);
   __ PushReturnAddressFrom(rcx);
-  __ Move(rsi, Smi::FromInt(0));
+  __ Move(rsi, Smi::kZero);
   __ TailCallRuntime(Runtime::kAbort);
 }
 
@@ -2583,8 +2571,8 @@
         __ Push(rdi);
         __ movp(rax, rcx);
         __ Push(rsi);
-        ToObjectStub stub(masm->isolate());
-        __ CallStub(&stub);
+        __ Call(masm->isolate()->builtins()->ToObject(),
+                RelocInfo::CODE_TARGET);
         __ Pop(rsi);
         __ movp(rcx, rax);
         __ Pop(rdi);
diff --git a/src/builtins/x87/builtins-x87.cc b/src/builtins/x87/builtins-x87.cc
index 8e096a3..2187f86 100644
--- a/src/builtins/x87/builtins-x87.cc
+++ b/src/builtins/x87/builtins-x87.cc
@@ -110,15 +110,15 @@
   GenerateTailCallToSharedCode(masm);
 }
 
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
-                                           bool is_api_function,
-                                           bool create_implicit_receiver,
-                                           bool check_derived_construct) {
+namespace {
+
+void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
+                                    bool create_implicit_receiver,
+                                    bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- eax: number of arguments
   //  -- esi: context
   //  -- edi: constructor function
-  //  -- ebx: allocation site or undefined
   //  -- edx: new target
   // -----------------------------------
 
@@ -127,10 +127,8 @@
     FrameScope scope(masm, StackFrame::CONSTRUCT);
 
     // Preserve the incoming parameters on the stack.
-    __ AssertUndefinedOrAllocationSite(ebx);
-    __ push(esi);
-    __ push(ebx);
     __ SmiTag(eax);
+    __ push(esi);
     __ push(eax);
 
     if (create_implicit_receiver) {
@@ -198,12 +196,12 @@
       Label use_receiver, exit;
 
       // If the result is a smi, it is *not* an object in the ECMA sense.
-      __ JumpIfSmi(eax, &use_receiver);
+      __ JumpIfSmi(eax, &use_receiver, Label::kNear);
 
       // If the type of the result (stored in its map) is less than
       // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
       __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
-      __ j(above_equal, &exit);
+      __ j(above_equal, &exit, Label::kNear);
 
       // Throw away the result of the constructor invocation and use the
       // on-stack receiver as the result.
@@ -245,6 +243,8 @@
   __ ret(0);
 }
 
+}  // namespace
+
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
   Generate_JSConstructStubHelper(masm, false, true, false);
 }
@@ -474,7 +474,7 @@
     __ mov(eax, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
     {
       Label done_loop, loop;
-      __ Move(ecx, Smi::FromInt(0));
+      __ Move(ecx, Smi::kZero);
       __ bind(&loop);
       __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
       __ j(equal, &done_loop, Label::kNear);
@@ -686,31 +686,6 @@
   __ jmp(ecx);
 }
 
-void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
-  // Save the function and context for call to CompileBaseline.
-  __ mov(edi, Operand(ebp, StandardFrameConstants::kFunctionOffset));
-  __ mov(kContextRegister,
-         Operand(ebp, StandardFrameConstants::kContextOffset));
-
-  // Leave the frame before recompiling for baseline so that we don't count as
-  // an activation on the stack.
-  LeaveInterpreterFrame(masm, ebx, ecx);
-
-  {
-    FrameScope frame_scope(masm, StackFrame::INTERNAL);
-    // Push return value.
-    __ push(eax);
-
-    // Push function as argument and compile for baseline.
-    __ push(edi);
-    __ CallRuntime(Runtime::kCompileBaseline);
-
-    // Restore return value.
-    __ pop(eax);
-  }
-  __ ret(0);
-}
-
 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
                                         Register scratch1, Register scratch2,
                                         Label* stack_overflow,
@@ -1006,12 +981,12 @@
   }
 }
 
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
   // Set the return address to the correct point in the interpreter entry
   // trampoline.
   Smi* interpreter_entry_return_pc_offset(
       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
-  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
   __ LoadHeapObject(ebx,
                     masm->isolate()->builtins()->InterpreterEntryTrampoline());
   __ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() +
@@ -1048,6 +1023,31 @@
   __ jmp(ebx);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+  // Advance the current bytecode offset stored within the given interpreter
+  // stack frame. This simulates what all bytecode handlers do upon completion
+  // of the underlying operation.
+  __ mov(ebx, Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ mov(edx, Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(kInterpreterAccumulatorRegister);
+    __ Push(ebx);  // First argument is the bytecode array.
+    __ Push(edx);  // Second argument is the bytecode offset.
+    __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
+    __ Move(edx, eax);  // Result is the new bytecode offset.
+    __ Pop(kInterpreterAccumulatorRegister);
+  }
+  __ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), edx);
+
+  Generate_InterpreterEnterBytecode(masm);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  Generate_InterpreterEnterBytecode(masm);
+}
+
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax : argument count (preserved for callee)
@@ -1056,7 +1056,6 @@
   // -----------------------------------
   // First lookup code, maybe we don't need to compile!
   Label gotta_call_runtime, gotta_call_runtime_no_stack;
-  Label maybe_call_runtime;
   Label try_shared;
   Label loop_top, loop_bottom;
 
@@ -1119,15 +1118,12 @@
   __ mov(entry, FieldOperand(map, index, times_half_pointer_size,
                              SharedFunctionInfo::kOffsetToPreviousCachedCode));
   __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
-  __ JumpIfSmi(entry, &maybe_call_runtime);
+  __ JumpIfSmi(entry, &try_shared);
 
   // Found literals and code. Get them into the closure and return.
   __ pop(closure);
   // Store code entry in the closure.
   __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
-
-  Label install_optimized_code_and_tailcall;
-  __ bind(&install_optimized_code_and_tailcall);
   __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
   __ RecordWriteCodeEntryField(closure, entry, eax);
 
@@ -1161,24 +1157,16 @@
   // We found neither literals nor code.
   __ jmp(&gotta_call_runtime);
 
-  __ bind(&maybe_call_runtime);
-  __ pop(closure);
-
-  // Last possibility. Check the context free optimized code map entry.
-  __ mov(entry, FieldOperand(map, FixedArray::kHeaderSize +
-                                      SharedFunctionInfo::kSharedCodeIndex));
-  __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
-  __ JumpIfSmi(entry, &try_shared);
-
-  // Store code entry in the closure.
-  __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
-  __ jmp(&install_optimized_code_and_tailcall);
-
   __ bind(&try_shared);
+  __ pop(closure);
   __ pop(new_target);
   __ pop(argument_count);
-  // Is the full code valid?
   __ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  // Is the shared function marked for tier up?
+  __ test_b(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
+            Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
+  __ j(not_zero, &gotta_call_runtime_no_stack);
+  // Is the full code valid?
   __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
   __ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
   __ and_(ebx, Code::KindField::kMask);
@@ -1956,7 +1944,7 @@
     __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
     __ jmp(&done, Label::kNear);
     __ bind(&no_arguments);
-    __ Move(ebx, Smi::FromInt(0));
+    __ Move(ebx, Smi::kZero);
     __ bind(&done);
   }
 
@@ -2509,8 +2497,8 @@
         __ Push(edi);
         __ mov(eax, ecx);
         __ Push(esi);
-        ToObjectStub stub(masm->isolate());
-        __ CallStub(&stub);
+        __ Call(masm->isolate()->builtins()->ToObject(),
+                RelocInfo::CODE_TARGET);
         __ Pop(esi);
         __ mov(ecx, eax);
         __ Pop(edi);
@@ -2861,7 +2849,7 @@
   __ PopReturnAddressTo(ecx);
   __ Push(edx);
   __ PushReturnAddressFrom(ecx);
-  __ Move(esi, Smi::FromInt(0));
+  __ Move(esi, Smi::kZero);
   __ TailCallRuntime(Runtime::kAllocateInNewSpace);
 }
 
@@ -2876,7 +2864,7 @@
   __ Push(edx);
   __ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
   __ PushReturnAddressFrom(ecx);
-  __ Move(esi, Smi::FromInt(0));
+  __ Move(esi, Smi::kZero);
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
@@ -2889,7 +2877,7 @@
   __ PopReturnAddressTo(ecx);
   __ Push(edx);
   __ PushReturnAddressFrom(ecx);
-  __ Move(esi, Smi::FromInt(0));
+  __ Move(esi, Smi::kZero);
   __ TailCallRuntime(Runtime::kAbort);
 }
 
diff --git a/src/cancelable-task.cc b/src/cancelable-task.cc
index defbb44..ea351f8 100644
--- a/src/cancelable-task.cc
+++ b/src/cancelable-task.cc
@@ -26,13 +26,15 @@
   }
 }
 
-CancelableTaskManager::CancelableTaskManager() : task_id_counter_(0) {}
+CancelableTaskManager::CancelableTaskManager()
+    : task_id_counter_(0), canceled_(false) {}
 
 uint32_t CancelableTaskManager::Register(Cancelable* task) {
   base::LockGuard<base::Mutex> guard(&mutex_);
   uint32_t id = ++task_id_counter_;
   // The loop below is just used when task_id_counter_ overflows.
   while (cancelable_tasks_.count(id) > 0) ++id;
+  CHECK(!canceled_);
   cancelable_tasks_[id] = task;
   return id;
 }
@@ -42,12 +44,12 @@
   base::LockGuard<base::Mutex> guard(&mutex_);
   size_t removed = cancelable_tasks_.erase(id);
   USE(removed);
-  DCHECK_NE(0, removed);
+  DCHECK_NE(0u, removed);
   cancelable_tasks_barrier_.NotifyOne();
 }
 
-
-bool CancelableTaskManager::TryAbort(uint32_t id) {
+CancelableTaskManager::TryAbortResult CancelableTaskManager::TryAbort(
+    uint32_t id) {
   base::LockGuard<base::Mutex> guard(&mutex_);
   auto entry = cancelable_tasks_.find(id);
   if (entry != cancelable_tasks_.end()) {
@@ -56,10 +58,12 @@
       // Cannot call RemoveFinishedTask here because of recursive locking.
       cancelable_tasks_.erase(entry);
       cancelable_tasks_barrier_.NotifyOne();
-      return true;
+      return kTaskAborted;
+    } else {
+      return kTaskRunning;
     }
   }
-  return false;
+  return kTaskRemoved;
 }
 
 
@@ -69,6 +73,7 @@
   // of canceling we wait for the background tasks that have already been
   // started.
   base::LockGuard<base::Mutex> guard(&mutex_);
+  canceled_ = true;
 
   // Cancelable tasks could be running or could potentially register new
   // tasks, requiring a loop here.
diff --git a/src/cancelable-task.h b/src/cancelable-task.h
index b1d62aa..65f98e7 100644
--- a/src/cancelable-task.h
+++ b/src/cancelable-task.h
@@ -11,6 +11,7 @@
 #include "src/base/atomic-utils.h"
 #include "src/base/macros.h"
 #include "src/base/platform/condition-variable.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -21,26 +22,27 @@
 
 // Keeps track of cancelable tasks. It is possible to register and remove tasks
 // from any fore- and background task/thread.
-class CancelableTaskManager {
+class V8_EXPORT_PRIVATE CancelableTaskManager {
  public:
   CancelableTaskManager();
 
   // Registers a new cancelable {task}. Returns the unique {id} of the task that
   // can be used to try to abort a task by calling {Abort}.
+  // Must not be called after CancelAndWait.
   uint32_t Register(Cancelable* task);
 
   // Try to abort running a task identified by {id}. The possible outcomes are:
-  // (1) The task is already finished running and thus has been removed from
-  //     the manager.
+  // (1) The task is already finished running or was canceled before and
+  //     thus has been removed from the manager.
   // (2) The task is currently running and cannot be canceled anymore.
   // (3) The task is not yet running (or finished) so it is canceled and
   //     removed.
   //
-  // Returns {false} for (1) and (2), and {true} for (3).
-  bool TryAbort(uint32_t id);
+  enum TryAbortResult { kTaskRemoved, kTaskRunning, kTaskAborted };
+  TryAbortResult TryAbort(uint32_t id);
 
   // Cancels all remaining registered tasks and waits for tasks that are
-  // already running.
+  // already running. This disallows subsequent Register calls.
   void CancelAndWait();
 
  private:
@@ -59,13 +61,14 @@
   base::ConditionVariable cancelable_tasks_barrier_;
   base::Mutex mutex_;
 
+  bool canceled_;
+
   friend class Cancelable;
 
   DISALLOW_COPY_AND_ASSIGN(CancelableTaskManager);
 };
 
-
-class Cancelable {
+class V8_EXPORT_PRIVATE Cancelable {
  public:
   explicit Cancelable(CancelableTaskManager* parent);
   virtual ~Cancelable();
diff --git a/src/char-predicates.h b/src/char-predicates.h
index 3161ae4..966b2a5 100644
--- a/src/char-predicates.h
+++ b/src/char-predicates.h
@@ -5,6 +5,7 @@
 #ifndef V8_CHAR_PREDICATES_H_
 #define V8_CHAR_PREDICATES_H_
 
+#include "src/globals.h"
 #include "src/unicode.h"
 
 namespace v8 {
@@ -25,8 +26,7 @@
 inline bool IsRegExpWord(uc32 c);
 inline bool IsRegExpNewline(uc32 c);
 
-
-struct SupplementaryPlanes {
+struct V8_EXPORT_PRIVATE SupplementaryPlanes {
   static bool IsIDStart(uc32 c);
   static bool IsIDPart(uc32 c);
 };
diff --git a/src/code-factory.cc b/src/code-factory.cc
index 7448591..128c709 100644
--- a/src/code-factory.cc
+++ b/src/code-factory.cc
@@ -23,10 +23,6 @@
 
 // static
 Callable CodeFactory::LoadIC(Isolate* isolate) {
-  if (FLAG_tf_load_ic_stub) {
-    LoadICTrampolineTFStub stub(isolate);
-    return make_callable(stub);
-  }
   LoadICTrampolineStub stub(isolate);
   return make_callable(stub);
 }
@@ -39,10 +35,6 @@
 
 // static
 Callable CodeFactory::LoadICInOptimizedCode(Isolate* isolate) {
-  if (FLAG_tf_load_ic_stub) {
-    LoadICTFStub stub(isolate);
-    return make_callable(stub);
-  }
   LoadICStub stub(isolate);
   return make_callable(stub);
 }
@@ -62,56 +54,39 @@
 
 // static
 Callable CodeFactory::KeyedLoadIC(Isolate* isolate) {
-  if (FLAG_tf_load_ic_stub) {
-    KeyedLoadICTrampolineTFStub stub(isolate);
-    return make_callable(stub);
-  }
-  KeyedLoadICTrampolineStub stub(isolate);
+  KeyedLoadICTrampolineTFStub stub(isolate);
   return make_callable(stub);
 }
 
 // static
 Callable CodeFactory::KeyedLoadICInOptimizedCode(Isolate* isolate) {
-  if (FLAG_tf_load_ic_stub) {
-    KeyedLoadICTFStub stub(isolate);
-    return make_callable(stub);
-  }
-  KeyedLoadICStub stub(isolate);
+  KeyedLoadICTFStub stub(isolate);
   return make_callable(stub);
 }
 
 // static
 Callable CodeFactory::KeyedLoadIC_Megamorphic(Isolate* isolate) {
-  if (FLAG_tf_load_ic_stub) {
-    return Callable(isolate->builtins()->KeyedLoadIC_Megamorphic_TF(),
-                    LoadWithVectorDescriptor(isolate));
-  }
-  return Callable(isolate->builtins()->KeyedLoadIC_Megamorphic(),
+  return Callable(isolate->builtins()->KeyedLoadIC_Megamorphic_TF(),
                   LoadWithVectorDescriptor(isolate));
 }
 
 // static
-Callable CodeFactory::CallIC(Isolate* isolate, int argc,
-                             ConvertReceiverMode mode,
+Callable CodeFactory::CallIC(Isolate* isolate, ConvertReceiverMode mode,
                              TailCallMode tail_call_mode) {
-  CallICTrampolineStub stub(isolate, CallICState(argc, mode, tail_call_mode));
+  CallICTrampolineStub stub(isolate, CallICState(mode, tail_call_mode));
   return make_callable(stub);
 }
 
 // static
-Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate, int argc,
+Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate,
                                             ConvertReceiverMode mode,
                                             TailCallMode tail_call_mode) {
-  CallICStub stub(isolate, CallICState(argc, mode, tail_call_mode));
+  CallICStub stub(isolate, CallICState(mode, tail_call_mode));
   return make_callable(stub);
 }
 
 // static
 Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
-  if (FLAG_tf_store_ic_stub) {
-    StoreICTrampolineTFStub stub(isolate, StoreICState(language_mode));
-    return make_callable(stub);
-  }
   StoreICTrampolineStub stub(isolate, StoreICState(language_mode));
   return make_callable(stub);
 }
@@ -119,10 +94,6 @@
 // static
 Callable CodeFactory::StoreICInOptimizedCode(Isolate* isolate,
                                              LanguageMode language_mode) {
-  if (FLAG_tf_store_ic_stub) {
-    StoreICTFStub stub(isolate, StoreICState(language_mode));
-    return make_callable(stub);
-  }
   StoreICStub stub(isolate, StoreICState(language_mode));
   return make_callable(stub);
 }
@@ -130,6 +101,10 @@
 // static
 Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
                                    LanguageMode language_mode) {
+  if (FLAG_tf_store_ic_stub) {
+    KeyedStoreICTrampolineTFStub stub(isolate, StoreICState(language_mode));
+    return make_callable(stub);
+  }
   KeyedStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
   return make_callable(stub);
 }
@@ -137,11 +112,31 @@
 // static
 Callable CodeFactory::KeyedStoreICInOptimizedCode(Isolate* isolate,
                                                   LanguageMode language_mode) {
+  if (FLAG_tf_store_ic_stub) {
+    KeyedStoreICTFStub stub(isolate, StoreICState(language_mode));
+    return make_callable(stub);
+  }
   KeyedStoreICStub stub(isolate, StoreICState(language_mode));
   return make_callable(stub);
 }
 
 // static
+Callable CodeFactory::KeyedStoreIC_Megamorphic(Isolate* isolate,
+                                               LanguageMode language_mode) {
+  if (FLAG_tf_store_ic_stub) {
+    return Callable(
+        language_mode == STRICT
+            ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict_TF()
+            : isolate->builtins()->KeyedStoreIC_Megamorphic_TF(),
+        StoreWithVectorDescriptor(isolate));
+  }
+  return Callable(language_mode == STRICT
+                      ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict()
+                      : isolate->builtins()->KeyedStoreIC_Megamorphic(),
+                  StoreWithVectorDescriptor(isolate));
+}
+
+// static
 Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op) {
   CompareICStub stub(isolate, op);
   return make_callable(stub);
@@ -154,12 +149,6 @@
 }
 
 // static
-Callable CodeFactory::InstanceOf(Isolate* isolate) {
-  InstanceOfStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
 Callable CodeFactory::GetProperty(Isolate* isolate) {
   GetPropertyStub stub(isolate);
   return make_callable(stub);
@@ -190,36 +179,12 @@
 }
 
 // static
-Callable CodeFactory::ToString(Isolate* isolate) {
-  return Callable(isolate->builtins()->ToString(),
-                  TypeConversionDescriptor(isolate));
-}
-
-// static
 Callable CodeFactory::ToName(Isolate* isolate) {
   return Callable(isolate->builtins()->ToName(),
                   TypeConversionDescriptor(isolate));
 }
 
 // static
-Callable CodeFactory::ToInteger(Isolate* isolate) {
-  ToIntegerStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::ToLength(Isolate* isolate) {
-  ToLengthStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::ToObject(Isolate* isolate) {
-  ToObjectStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
 Callable CodeFactory::NonPrimitiveToPrimitive(Isolate* isolate,
                                               ToPrimitiveHint hint) {
   return Callable(isolate->builtins()->NonPrimitiveToPrimitive(hint),
@@ -240,88 +205,59 @@
 }
 
 // static
-Callable CodeFactory::OrdinaryHasInstance(Isolate* isolate) {
-  return Callable(isolate->builtins()->OrdinaryHasInstance(),
-                  CompareDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::RegExpConstructResult(Isolate* isolate) {
-  RegExpConstructResultStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
 Callable CodeFactory::RegExpExec(Isolate* isolate) {
   RegExpExecStub stub(isolate);
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
 }
 
 // static
-Callable CodeFactory::Add(Isolate* isolate) {
-  AddStub stub(isolate);
-  return make_callable(stub);
+Callable CodeFactory::StringFromCharCode(Isolate* isolate) {
+  Handle<Code> code(isolate->builtins()->StringFromCharCode());
+  return Callable(code, BuiltinDescriptor(isolate));
 }
 
-// static
-Callable CodeFactory::Subtract(Isolate* isolate) {
-  SubtractStub stub(isolate);
-  return make_callable(stub);
-}
+#define DECLARE_TFS(Name, Kind, Extra, InterfaceDescriptor) \
+  typedef InterfaceDescriptor##Descriptor Name##Descriptor;
+BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, DECLARE_TFS,
+             IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+#undef DECLARE_TFS
 
-// static
-Callable CodeFactory::Multiply(Isolate* isolate) {
-  MultiplyStub stub(isolate);
-  return make_callable(stub);
-}
+#define TFS_BUILTIN(Name)                             \
+  Callable CodeFactory::Name(Isolate* isolate) {      \
+    Handle<Code> code(isolate->builtins()->Name());   \
+    return Callable(code, Name##Descriptor(isolate)); \
+  }
 
-// static
-Callable CodeFactory::Divide(Isolate* isolate) {
-  DivideStub stub(isolate);
-  return make_callable(stub);
-}
+TFS_BUILTIN(ToString)
+TFS_BUILTIN(Add)
+TFS_BUILTIN(Subtract)
+TFS_BUILTIN(Multiply)
+TFS_BUILTIN(Divide)
+TFS_BUILTIN(Modulus)
+TFS_BUILTIN(BitwiseAnd)
+TFS_BUILTIN(BitwiseOr)
+TFS_BUILTIN(BitwiseXor)
+TFS_BUILTIN(ShiftLeft)
+TFS_BUILTIN(ShiftRight)
+TFS_BUILTIN(ShiftRightLogical)
+TFS_BUILTIN(LessThan)
+TFS_BUILTIN(LessThanOrEqual)
+TFS_BUILTIN(GreaterThan)
+TFS_BUILTIN(GreaterThanOrEqual)
+TFS_BUILTIN(Equal)
+TFS_BUILTIN(NotEqual)
+TFS_BUILTIN(StrictEqual)
+TFS_BUILTIN(StrictNotEqual)
+TFS_BUILTIN(HasProperty)
+TFS_BUILTIN(ToInteger)
+TFS_BUILTIN(ToLength)
+TFS_BUILTIN(ToObject)
+TFS_BUILTIN(Typeof)
+TFS_BUILTIN(InstanceOf)
+TFS_BUILTIN(OrdinaryHasInstance)
+TFS_BUILTIN(ForInFilter)
 
-// static
-Callable CodeFactory::Modulus(Isolate* isolate) {
-  ModulusStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::ShiftRight(Isolate* isolate) {
-  ShiftRightStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::ShiftRightLogical(Isolate* isolate) {
-  ShiftRightLogicalStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::ShiftLeft(Isolate* isolate) {
-  ShiftLeftStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::BitwiseAnd(Isolate* isolate) {
-  BitwiseAndStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::BitwiseOr(Isolate* isolate) {
-  BitwiseOrStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::BitwiseXor(Isolate* isolate) {
-  BitwiseXorStub stub(isolate);
-  return make_callable(stub);
-}
+#undef TFS_BUILTIN
 
 // static
 Callable CodeFactory::Inc(Isolate* isolate) {
@@ -336,54 +272,6 @@
 }
 
 // static
-Callable CodeFactory::LessThan(Isolate* isolate) {
-  LessThanStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::LessThanOrEqual(Isolate* isolate) {
-  LessThanOrEqualStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::GreaterThan(Isolate* isolate) {
-  GreaterThanStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::GreaterThanOrEqual(Isolate* isolate) {
-  GreaterThanOrEqualStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::Equal(Isolate* isolate) {
-  EqualStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::NotEqual(Isolate* isolate) {
-  NotEqualStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::StrictEqual(Isolate* isolate) {
-  StrictEqualStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::StrictNotEqual(Isolate* isolate) {
-  StrictNotEqualStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
 Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
                                 PretenureFlag pretenure_flag) {
   StringAddStub stub(isolate, flags, pretenure_flag);
@@ -463,12 +351,6 @@
 }
 
 // static
-Callable CodeFactory::Typeof(Isolate* isolate) {
-  TypeofStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
 Callable CodeFactory::FastCloneRegExp(Isolate* isolate) {
   FastCloneRegExpStub stub(isolate);
   return make_callable(stub);
@@ -591,18 +473,6 @@
 }
 
 // static
-Callable CodeFactory::HasProperty(Isolate* isolate) {
-  HasPropertyStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::ForInFilter(Isolate* isolate) {
-  ForInFilterStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
 Callable CodeFactory::InterpreterPushArgsAndCall(Isolate* isolate,
                                                  TailCallMode tail_call_mode,
                                                  CallableType function_type) {
diff --git a/src/code-factory.h b/src/code-factory.h
index 59f069e..033e5d5 100644
--- a/src/code-factory.h
+++ b/src/code-factory.h
@@ -28,8 +28,7 @@
   const CallInterfaceDescriptor descriptor_;
 };
 
-
-class CodeFactory final {
+class V8_EXPORT_PRIVATE CodeFactory final {
  public:
   // Initial states for ICs.
   static Callable LoadIC(Isolate* isolate);
@@ -40,18 +39,18 @@
   static Callable KeyedLoadIC(Isolate* isolate);
   static Callable KeyedLoadICInOptimizedCode(Isolate* isolate);
   static Callable KeyedLoadIC_Megamorphic(Isolate* isolate);
-  static Callable CallIC(Isolate* isolate, int argc,
+  static Callable CallIC(Isolate* isolate,
                          ConvertReceiverMode mode = ConvertReceiverMode::kAny,
                          TailCallMode tail_call_mode = TailCallMode::kDisallow);
   static Callable CallICInOptimizedCode(
-      Isolate* isolate, int argc,
-      ConvertReceiverMode mode = ConvertReceiverMode::kAny,
+      Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny,
       TailCallMode tail_call_mode = TailCallMode::kDisallow);
   static Callable StoreIC(Isolate* isolate, LanguageMode mode);
   static Callable StoreICInOptimizedCode(Isolate* isolate, LanguageMode mode);
   static Callable KeyedStoreIC(Isolate* isolate, LanguageMode mode);
   static Callable KeyedStoreICInOptimizedCode(Isolate* isolate,
                                               LanguageMode mode);
+  static Callable KeyedStoreIC_Megamorphic(Isolate* isolate, LanguageMode mode);
 
   static Callable ResumeGenerator(Isolate* isolate);
 
@@ -65,6 +64,9 @@
   // Code stubs. Add methods here as needed to reduce dependency on
   // code-stubs.h.
   static Callable InstanceOf(Isolate* isolate);
+  static Callable OrdinaryHasInstance(Isolate* isolate);
+
+  static Callable StringFromCharCode(Isolate* isolate);
 
   static Callable GetProperty(Isolate* isolate);
 
@@ -84,9 +86,6 @@
                                       OrdinaryToPrimitiveHint hint);
   static Callable NumberToString(Isolate* isolate);
 
-  static Callable OrdinaryHasInstance(Isolate* isolate);
-
-  static Callable RegExpConstructResult(Isolate* isolate);
   static Callable RegExpExec(Isolate* isolate);
 
   static Callable Add(Isolate* isolate);
diff --git a/src/code-stub-assembler.cc b/src/code-stub-assembler.cc
index 016814c..b1ed2f1 100644
--- a/src/code-stub-assembler.cc
+++ b/src/code-stub-assembler.cc
@@ -1,7 +1,6 @@
 // Copyright 2016 the V8 project authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
-
 #include "src/code-stub-assembler.h"
 #include "src/code-factory.h"
 #include "src/frames-inl.h"
@@ -26,11 +25,32 @@
                                      const char* name)
     : compiler::CodeAssembler(isolate, zone, parameter_count, flags, name) {}
 
-void CodeStubAssembler::Assert(Node* condition) {
+void CodeStubAssembler::Assert(ConditionBody codition_body, const char* message,
+                               const char* file, int line) {
 #if defined(DEBUG)
   Label ok(this);
-  Comment("[ Assert");
-  GotoIf(condition, &ok);
+  Label not_ok(this, Label::kDeferred);
+  if (message != nullptr && FLAG_code_comments) {
+    Comment("[ Assert: %s", message);
+  } else {
+    Comment("[ Assert");
+  }
+  Node* condition = codition_body();
+  DCHECK_NOT_NULL(condition);
+  Branch(condition, &ok, &not_ok);
+  Bind(&not_ok);
+  if (message != nullptr) {
+    char chars[1024];
+    Vector<char> buffer(chars);
+    if (file != nullptr) {
+      SNPrintF(buffer, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, line);
+    } else {
+      SNPrintF(buffer, "CSA_ASSERT failed: %s\n", message);
+    }
+    CallRuntime(
+        Runtime::kGlobalPrint, SmiConstant(Smi::kZero),
+        HeapConstant(factory()->NewStringFromAsciiChecked(&(buffer[0]))));
+  }
   DebugBreak();
   Goto(&ok);
   Bind(&ok);
@@ -38,9 +58,7 @@
 #endif
 }
 
-Node* CodeStubAssembler::NoContextConstant() {
-  return SmiConstant(Smi::FromInt(0));
-}
+Node* CodeStubAssembler::NoContextConstant() { return NumberConstant(0); }
 
 #define HEAP_CONSTANT_ACCESSOR(rootName, name)     \
   Node* CodeStubAssembler::name##Constant() {      \
@@ -73,6 +91,62 @@
   }
 }
 
+Node* CodeStubAssembler::IntPtrAddFoldConstants(Node* left, Node* right) {
+  int32_t left_constant;
+  bool is_left_constant = ToInt32Constant(left, left_constant);
+  int32_t right_constant;
+  bool is_right_constant = ToInt32Constant(right, right_constant);
+  if (is_left_constant) {
+    if (is_right_constant) {
+      return IntPtrConstant(left_constant + right_constant);
+    }
+    if (left_constant == 0) {
+      return right;
+    }
+  } else if (is_right_constant) {
+    if (right_constant == 0) {
+      return left;
+    }
+  }
+  return IntPtrAdd(left, right);
+}
+
+Node* CodeStubAssembler::IntPtrSubFoldConstants(Node* left, Node* right) {
+  int32_t left_constant;
+  bool is_left_constant = ToInt32Constant(left, left_constant);
+  int32_t right_constant;
+  bool is_right_constant = ToInt32Constant(right, right_constant);
+  if (is_left_constant) {
+    if (is_right_constant) {
+      return IntPtrConstant(left_constant - right_constant);
+    }
+  } else if (is_right_constant) {
+    if (right_constant == 0) {
+      return left;
+    }
+  }
+  return IntPtrSub(left, right);
+}
+
+Node* CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32(Node* value) {
+  Comment("IntPtrRoundUpToPowerOfTwo32");
+  CSA_ASSERT(this, UintPtrLessThanOrEqual(value, IntPtrConstant(0x80000000u)));
+  value = IntPtrSub(value, IntPtrConstant(1));
+  for (int i = 1; i <= 16; i *= 2) {
+    value = WordOr(value, WordShr(value, IntPtrConstant(i)));
+  }
+  return IntPtrAdd(value, IntPtrConstant(1));
+}
+
+Node* CodeStubAssembler::WordIsPowerOfTwo(Node* value) {
+  // value && !(value & (value - 1))
+  return WordEqual(
+      Select(WordEqual(value, IntPtrConstant(0)), IntPtrConstant(1),
+             WordAnd(value, IntPtrSub(value, IntPtrConstant(1))),
+             MachineType::PointerRepresentation()),
+      IntPtrConstant(0));
+}
+
 Node* CodeStubAssembler::Float64Round(Node* x) {
   Node* one = Float64Constant(1.0);
   Node* one_half = Float64Constant(0.5);
@@ -198,6 +272,37 @@
   return var_x.value();
 }
 
+Node* CodeStubAssembler::Float64RoundToEven(Node* x) {
+  if (IsFloat64RoundTiesEvenSupported()) {
+    return Float64RoundTiesEven(x);
+  }
+  // See ES#sec-touint8clamp for details.
+  Node* f = Float64Floor(x);
+  Node* f_and_half = Float64Add(f, Float64Constant(0.5));
+
+  Variable var_result(this, MachineRepresentation::kFloat64);
+  Label return_f(this), return_f_plus_one(this), done(this);
+
+  GotoIf(Float64LessThan(f_and_half, x), &return_f_plus_one);
+  GotoIf(Float64LessThan(x, f_and_half), &return_f);
+  {
+    Node* f_mod_2 = Float64Mod(f, Float64Constant(2.0));
+    Branch(Float64Equal(f_mod_2, Float64Constant(0.0)), &return_f,
+           &return_f_plus_one);
+  }
+
+  Bind(&return_f);
+  var_result.Bind(f);
+  Goto(&done);
+
+  Bind(&return_f_plus_one);
+  var_result.Bind(Float64Add(f, Float64Constant(1.0)));
+  Goto(&done);
+
+  Bind(&done);
+  return var_result.value();
+}
+
 Node* CodeStubAssembler::Float64Trunc(Node* x) {
   if (IsFloat64RoundTruncateSupported()) {
     return Float64RoundTruncate(x);
@@ -293,38 +398,39 @@
   return ChangeInt32ToFloat64(SmiToWord32(value));
 }
 
-Node* CodeStubAssembler::SmiAdd(Node* a, Node* b) { return IntPtrAdd(a, b); }
-
-Node* CodeStubAssembler::SmiAddWithOverflow(Node* a, Node* b) {
-  return IntPtrAddWithOverflow(a, b);
+Node* CodeStubAssembler::SmiAdd(Node* a, Node* b) {
+  return BitcastWordToTaggedSigned(
+      IntPtrAdd(BitcastTaggedToWord(a), BitcastTaggedToWord(b)));
 }
 
-Node* CodeStubAssembler::SmiSub(Node* a, Node* b) { return IntPtrSub(a, b); }
-
-Node* CodeStubAssembler::SmiSubWithOverflow(Node* a, Node* b) {
-  return IntPtrSubWithOverflow(a, b);
+Node* CodeStubAssembler::SmiSub(Node* a, Node* b) {
+  return BitcastWordToTaggedSigned(
+      IntPtrSub(BitcastTaggedToWord(a), BitcastTaggedToWord(b)));
 }
 
-Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) { return WordEqual(a, b); }
+Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) {
+  return WordEqual(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
+}
 
 Node* CodeStubAssembler::SmiAbove(Node* a, Node* b) {
-  return UintPtrGreaterThan(a, b);
+  return UintPtrGreaterThan(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
 }
 
 Node* CodeStubAssembler::SmiAboveOrEqual(Node* a, Node* b) {
-  return UintPtrGreaterThanOrEqual(a, b);
+  return UintPtrGreaterThanOrEqual(BitcastTaggedToWord(a),
+                                   BitcastTaggedToWord(b));
 }
 
 Node* CodeStubAssembler::SmiBelow(Node* a, Node* b) {
-  return UintPtrLessThan(a, b);
+  return UintPtrLessThan(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
 }
 
 Node* CodeStubAssembler::SmiLessThan(Node* a, Node* b) {
-  return IntPtrLessThan(a, b);
+  return IntPtrLessThan(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
 }
 
 Node* CodeStubAssembler::SmiLessThanOrEqual(Node* a, Node* b) {
-  return IntPtrLessThanOrEqual(a, b);
+  return IntPtrLessThanOrEqual(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
 }
 
 Node* CodeStubAssembler::SmiMax(Node* a, Node* b) {
@@ -450,7 +556,7 @@
     var_lhs_float64.Bind(SmiToFloat64(a));
     var_rhs_float64.Bind(SmiToFloat64(b));
     Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
-    Node* result = ChangeFloat64ToTagged(value);
+    Node* result = AllocateHeapNumberWithValue(value);
     var_result.Bind(result);
     Goto(&return_result);
   }
@@ -459,8 +565,9 @@
   return var_result.value();
 }
 
-Node* CodeStubAssembler::WordIsSmi(Node* a) {
-  return WordEqual(WordAnd(a, IntPtrConstant(kSmiTagMask)), IntPtrConstant(0));
+Node* CodeStubAssembler::TaggedIsSmi(Node* a) {
+  return WordEqual(WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)),
+                   IntPtrConstant(0));
 }
 
 Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
@@ -468,6 +575,11 @@
                    IntPtrConstant(0));
 }
 
+Node* CodeStubAssembler::WordIsWordAligned(Node* word) {
+  return WordEqual(IntPtrConstant(0),
+                   WordAnd(word, IntPtrConstant((1 << kPointerSizeLog2) - 1)));
+}
+
 void CodeStubAssembler::BranchIfSimd128Equal(Node* lhs, Node* lhs_map,
                                              Node* rhs, Node* rhs_map,
                                              Label* if_equal,
@@ -568,10 +680,28 @@
   }
 }
 
+void CodeStubAssembler::BranchIfJSReceiver(Node* object, Label* if_true,
+                                           Label* if_false) {
+  GotoIf(TaggedIsSmi(object), if_false);
+  STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+  Branch(Int32GreaterThanOrEqual(LoadInstanceType(object),
+                                 Int32Constant(FIRST_JS_RECEIVER_TYPE)),
+         if_true, if_false);
+}
+
+void CodeStubAssembler::BranchIfJSObject(Node* object, Label* if_true,
+                                         Label* if_false) {
+  GotoIf(TaggedIsSmi(object), if_false);
+  STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+  Branch(Int32GreaterThanOrEqual(LoadInstanceType(object),
+                                 Int32Constant(FIRST_JS_OBJECT_TYPE)),
+         if_true, if_false);
+}
+
 void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
                                             Label* if_true, Label* if_false) {
   // Bailout if receiver is a Smi.
-  GotoIf(WordIsSmi(object), if_false);
+  GotoIf(TaggedIsSmi(object), if_false);
 
   Node* map = LoadMap(object);
 
@@ -579,20 +709,14 @@
   GotoIf(WordNotEqual(LoadMapInstanceType(map), Int32Constant(JS_ARRAY_TYPE)),
          if_false);
 
-  Node* bit_field2 = LoadMapBitField2(map);
-  Node* elements_kind = BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+  Node* elements_kind = LoadMapElementsKind(map);
 
   // Bailout if receiver has slow elements.
-  GotoIf(
-      Int32GreaterThan(elements_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
-      if_false);
+  GotoUnless(IsFastElementsKind(elements_kind), if_false);
 
   // Check prototype chain if receiver does not have packed elements.
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == (FAST_SMI_ELEMENTS | 1));
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == (FAST_ELEMENTS | 1));
-  STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == (FAST_DOUBLE_ELEMENTS | 1));
-  Node* holey_elements = Word32And(elements_kind, Int32Constant(1));
-  GotoIf(Word32Equal(holey_elements, Int32Constant(0)), if_true);
+  GotoUnless(IsHoleyFastElementsKind(elements_kind), if_true);
+
   BranchIfPrototypesHaveNoElements(map, if_true, if_false);
 }
 
@@ -613,19 +737,17 @@
          &no_runtime_call);
 
   Bind(&runtime_call);
-  // AllocateInTargetSpace does not use the context.
-  Node* context = SmiConstant(Smi::FromInt(0));
-
   Node* runtime_result;
   if (flags & kPretenured) {
     Node* runtime_flags = SmiConstant(
         Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
                      AllocateTargetSpace::encode(AllocationSpace::OLD_SPACE)));
-    runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, context,
-                                 SmiTag(size_in_bytes), runtime_flags);
+    runtime_result =
+        CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
+                    SmiTag(size_in_bytes), runtime_flags);
   } else {
-    runtime_result = CallRuntime(Runtime::kAllocateInNewSpace, context,
-                                 SmiTag(size_in_bytes));
+    runtime_result = CallRuntime(Runtime::kAllocateInNewSpace,
+                                 NoContextConstant(), SmiTag(size_in_bytes));
   }
   result.Bind(runtime_result);
   Goto(&merge_runtime);
@@ -699,6 +821,7 @@
 }
 
 Node* CodeStubAssembler::Allocate(Node* size_in_bytes, AllocationFlags flags) {
+  Comment("Allocate");
   bool const new_space = !(flags & kPretenured);
   Node* top_address = ExternalConstant(
       new_space
@@ -730,6 +853,11 @@
   return InnerAllocate(previous, IntPtrConstant(offset));
 }
 
+Node* CodeStubAssembler::IsRegularHeapObjectSize(Node* size) {
+  return UintPtrLessThanOrEqual(size,
+                                IntPtrConstant(kMaxRegularHeapObjectSize));
+}
+
 void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
                                                 Label* if_false) {
   Label if_valueissmi(this), if_valueisnotsmi(this), if_valueisstring(this),
@@ -740,7 +868,7 @@
   GotoIf(WordEqual(value, BooleanConstant(false)), if_false);
 
   // Check if {value} is a Smi or a HeapObject.
-  Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+  Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
 
   Bind(&if_valueissmi);
   {
@@ -779,9 +907,8 @@
                                           MachineType::Float64());
 
       // Check if the floating point {value} is neither 0.0, -0.0 nor NaN.
-      Node* zero = Float64Constant(0.0);
-      GotoIf(Float64LessThan(zero, value_value), if_true);
-      BranchIfFloat64LessThan(value_value, zero, if_true, if_false);
+      Branch(Float64LessThan(Float64Constant(0.0), Float64Abs(value_value)),
+             if_true, if_false);
     }
 
     Bind(&if_valueisother);
@@ -796,8 +923,8 @@
           value_map_bitfield, Int32Constant(1 << Map::kIsUndetectable));
 
       // Check if the {value} is undetectable.
-      BranchIfWord32Equal(value_map_undetectable, Int32Constant(0), if_true,
-                          if_false);
+      Branch(Word32Equal(value_map_undetectable, Int32Constant(0)), if_true,
+             if_false);
     }
   }
 }
@@ -895,9 +1022,9 @@
   return LoadMapInstanceType(LoadMap(object));
 }
 
-void CodeStubAssembler::AssertInstanceType(Node* object,
-                                           InstanceType instance_type) {
-  Assert(Word32Equal(LoadInstanceType(object), Int32Constant(instance_type)));
+Node* CodeStubAssembler::HasInstanceType(Node* object,
+                                         InstanceType instance_type) {
+  return Word32Equal(LoadInstanceType(object), Int32Constant(instance_type));
 }
 
 Node* CodeStubAssembler::LoadProperties(Node* object) {
@@ -908,11 +1035,12 @@
   return LoadObjectField(object, JSObject::kElementsOffset);
 }
 
-Node* CodeStubAssembler::LoadJSArrayLength(compiler::Node* array) {
+Node* CodeStubAssembler::LoadJSArrayLength(Node* array) {
+  CSA_ASSERT(this, IsJSArray(array));
   return LoadObjectField(array, JSArray::kLengthOffset);
 }
 
-Node* CodeStubAssembler::LoadFixedArrayBaseLength(compiler::Node* array) {
+Node* CodeStubAssembler::LoadFixedArrayBaseLength(Node* array) {
   return LoadObjectField(array, FixedArrayBase::kLengthOffset);
 }
 
@@ -921,14 +1049,17 @@
 }
 
 Node* CodeStubAssembler::LoadMapBitField(Node* map) {
+  CSA_SLOW_ASSERT(this, IsMap(map));
   return LoadObjectField(map, Map::kBitFieldOffset, MachineType::Uint8());
 }
 
 Node* CodeStubAssembler::LoadMapBitField2(Node* map) {
+  CSA_SLOW_ASSERT(this, IsMap(map));
   return LoadObjectField(map, Map::kBitField2Offset, MachineType::Uint8());
 }
 
 Node* CodeStubAssembler::LoadMapBitField3(Node* map) {
+  CSA_SLOW_ASSERT(this, IsMap(map));
   return LoadObjectField(map, Map::kBitField3Offset, MachineType::Uint32());
 }
 
@@ -937,44 +1068,64 @@
 }
 
 Node* CodeStubAssembler::LoadMapElementsKind(Node* map) {
+  CSA_SLOW_ASSERT(this, IsMap(map));
   Node* bit_field2 = LoadMapBitField2(map);
-  return BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+  return DecodeWord32<Map::ElementsKindBits>(bit_field2);
 }
 
 Node* CodeStubAssembler::LoadMapDescriptors(Node* map) {
+  CSA_SLOW_ASSERT(this, IsMap(map));
   return LoadObjectField(map, Map::kDescriptorsOffset);
 }
 
 Node* CodeStubAssembler::LoadMapPrototype(Node* map) {
+  CSA_SLOW_ASSERT(this, IsMap(map));
   return LoadObjectField(map, Map::kPrototypeOffset);
 }
 
+Node* CodeStubAssembler::LoadMapPrototypeInfo(Node* map,
+                                              Label* if_no_proto_info) {
+  CSA_ASSERT(this, IsMap(map));
+  Node* prototype_info =
+      LoadObjectField(map, Map::kTransitionsOrPrototypeInfoOffset);
+  GotoIf(TaggedIsSmi(prototype_info), if_no_proto_info);
+  GotoUnless(WordEqual(LoadMap(prototype_info),
+                       LoadRoot(Heap::kPrototypeInfoMapRootIndex)),
+             if_no_proto_info);
+  return prototype_info;
+}
+
 Node* CodeStubAssembler::LoadMapInstanceSize(Node* map) {
+  CSA_SLOW_ASSERT(this, IsMap(map));
   return ChangeUint32ToWord(
       LoadObjectField(map, Map::kInstanceSizeOffset, MachineType::Uint8()));
 }
 
 Node* CodeStubAssembler::LoadMapInobjectProperties(Node* map) {
+  CSA_SLOW_ASSERT(this, IsMap(map));
   // See Map::GetInObjectProperties() for details.
   STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
-  Assert(Int32GreaterThanOrEqual(LoadMapInstanceType(map),
-                                 Int32Constant(FIRST_JS_OBJECT_TYPE)));
+  CSA_ASSERT(this,
+             Int32GreaterThanOrEqual(LoadMapInstanceType(map),
+                                     Int32Constant(FIRST_JS_OBJECT_TYPE)));
   return ChangeUint32ToWord(LoadObjectField(
       map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
       MachineType::Uint8()));
 }
 
 Node* CodeStubAssembler::LoadMapConstructorFunctionIndex(Node* map) {
+  CSA_SLOW_ASSERT(this, IsMap(map));
   // See Map::GetConstructorFunctionIndex() for details.
   STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
-  Assert(Int32LessThanOrEqual(LoadMapInstanceType(map),
-                              Int32Constant(LAST_PRIMITIVE_TYPE)));
+  CSA_ASSERT(this, Int32LessThanOrEqual(LoadMapInstanceType(map),
+                                        Int32Constant(LAST_PRIMITIVE_TYPE)));
   return ChangeUint32ToWord(LoadObjectField(
       map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
       MachineType::Uint8()));
 }
 
 Node* CodeStubAssembler::LoadMapConstructor(Node* map) {
+  CSA_SLOW_ASSERT(this, IsMap(map));
   Variable result(this, MachineRepresentation::kTagged);
   result.Bind(LoadObjectField(map, Map::kConstructorOrBackPointerOffset));
 
@@ -982,7 +1133,7 @@
   Goto(&loop);
   Bind(&loop);
   {
-    GotoIf(WordIsSmi(result.value()), &done);
+    GotoIf(TaggedIsSmi(result.value()), &done);
     Node* is_map_type =
         Word32Equal(LoadInstanceType(result.value()), Int32Constant(MAP_TYPE));
     GotoUnless(is_map_type, &done);
@@ -995,6 +1146,7 @@
 }
 
 Node* CodeStubAssembler::LoadNameHashField(Node* name) {
+  CSA_ASSERT(this, IsName(name));
   return LoadObjectField(name, Name::kHashFieldOffset, MachineType::Uint32());
 }
 
@@ -1010,15 +1162,23 @@
 }
 
 Node* CodeStubAssembler::LoadStringLength(Node* object) {
+  CSA_ASSERT(this, IsString(object));
   return LoadObjectField(object, String::kLengthOffset);
 }
 
 Node* CodeStubAssembler::LoadJSValueValue(Node* object) {
+  CSA_ASSERT(this, IsJSValue(object));
   return LoadObjectField(object, JSValue::kValueOffset);
 }
 
+Node* CodeStubAssembler::LoadWeakCellValueUnchecked(Node* weak_cell) {
+  // TODO(ishell): fix callers.
+  return LoadObjectField(weak_cell, WeakCell::kValueOffset);
+}
+
 Node* CodeStubAssembler::LoadWeakCellValue(Node* weak_cell, Label* if_cleared) {
-  Node* value = LoadObjectField(weak_cell, WeakCell::kValueOffset);
+  CSA_ASSERT(this, IsWeakCell(weak_cell));
+  Node* value = LoadWeakCellValueUnchecked(weak_cell);
   if (if_cleared != nullptr) {
     GotoIf(WordEqual(value, IntPtrConstant(0)), if_cleared);
   }
@@ -1035,6 +1195,44 @@
   return Load(MachineType::AnyTagged(), object, offset);
 }
 
+Node* CodeStubAssembler::LoadFixedTypedArrayElement(
+    Node* data_pointer, Node* index_node, ElementsKind elements_kind,
+    ParameterMode parameter_mode) {
+  Node* offset =
+      ElementOffsetFromIndex(index_node, elements_kind, parameter_mode, 0);
+  MachineType type;
+  switch (elements_kind) {
+    case UINT8_ELEMENTS: /* fall through */
+    case UINT8_CLAMPED_ELEMENTS:
+      type = MachineType::Uint8();
+      break;
+    case INT8_ELEMENTS:
+      type = MachineType::Int8();
+      break;
+    case UINT16_ELEMENTS:
+      type = MachineType::Uint16();
+      break;
+    case INT16_ELEMENTS:
+      type = MachineType::Int16();
+      break;
+    case UINT32_ELEMENTS:
+      type = MachineType::Uint32();
+      break;
+    case INT32_ELEMENTS:
+      type = MachineType::Int32();
+      break;
+    case FLOAT32_ELEMENTS:
+      type = MachineType::Float32();
+      break;
+    case FLOAT64_ELEMENTS:
+      type = MachineType::Float64();
+      break;
+    default:
+      UNREACHABLE();
+  }
+  return Load(type, data_pointer, offset);
+}
+
 Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
     Node* object, Node* index_node, int additional_offset,
     ParameterMode parameter_mode) {
@@ -1057,6 +1255,7 @@
 Node* CodeStubAssembler::LoadFixedDoubleArrayElement(
     Node* object, Node* index_node, MachineType machine_type,
     int additional_offset, ParameterMode parameter_mode, Label* if_hole) {
+  CSA_ASSERT(this, IsFixedDoubleArray(object));
   int32_t header_size =
       FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag;
   Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_DOUBLE_ELEMENTS,
@@ -1094,12 +1293,35 @@
   return Load(MachineType::AnyTagged(), context, IntPtrConstant(offset));
 }
 
+Node* CodeStubAssembler::LoadContextElement(Node* context, Node* slot_index) {
+  Node* offset =
+      IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+                IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
+  return Load(MachineType::AnyTagged(), context, offset);
+}
+
+Node* CodeStubAssembler::StoreContextElement(Node* context, int slot_index,
+                                             Node* value) {
+  int offset = Context::SlotOffset(slot_index);
+  return Store(MachineRepresentation::kTagged, context, IntPtrConstant(offset),
+               value);
+}
+
+Node* CodeStubAssembler::StoreContextElement(Node* context, Node* slot_index,
+                                             Node* value) {
+  Node* offset =
+      IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+                IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
+  return Store(MachineRepresentation::kTagged, context, offset, value);
+}
+
 Node* CodeStubAssembler::LoadNativeContext(Node* context) {
   return LoadContextElement(context, Context::NATIVE_CONTEXT_INDEX);
 }
 
 Node* CodeStubAssembler::LoadJSArrayElementsMap(ElementsKind kind,
                                                 Node* native_context) {
+  CSA_ASSERT(this, IsNativeContext(native_context));
   return LoadFixedArrayElement(native_context,
                                IntPtrConstant(Context::ArrayMapIndex(kind)));
 }
@@ -1175,6 +1397,7 @@
 
 Node* CodeStubAssembler::StoreFixedDoubleArrayElement(
     Node* object, Node* index_node, Node* value, ParameterMode parameter_mode) {
+  CSA_ASSERT(this, IsFixedDoubleArray(object));
   Node* offset =
       ElementOffsetFromIndex(index_node, FAST_DOUBLE_ELEMENTS, parameter_mode,
                              FixedArray::kHeaderSize - kHeapObjectTag);
@@ -1199,8 +1422,11 @@
   return result;
 }
 
-Node* CodeStubAssembler::AllocateSeqOneByteString(int length) {
-  Node* result = Allocate(SeqOneByteString::SizeFor(length));
+Node* CodeStubAssembler::AllocateSeqOneByteString(int length,
+                                                  AllocationFlags flags) {
+  Comment("AllocateSeqOneByteString");
+  Node* result = Allocate(SeqOneByteString::SizeFor(length), flags);
+  DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
   StoreMapNoWriteBarrier(result, LoadRoot(Heap::kOneByteStringMapRootIndex));
   StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
                                  SmiConstant(Smi::FromInt(length)));
@@ -1210,27 +1436,31 @@
   return result;
 }
 
-Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length) {
+Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length,
+                                                  ParameterMode mode,
+                                                  AllocationFlags flags) {
+  Comment("AllocateSeqOneByteString");
   Variable var_result(this, MachineRepresentation::kTagged);
 
   // Compute the SeqOneByteString size and check if it fits into new space.
   Label if_sizeissmall(this), if_notsizeissmall(this, Label::kDeferred),
       if_join(this);
-  Node* size = WordAnd(
-      IntPtrAdd(
-          IntPtrAdd(length, IntPtrConstant(SeqOneByteString::kHeaderSize)),
-          IntPtrConstant(kObjectAlignmentMask)),
-      IntPtrConstant(~kObjectAlignmentMask));
+  Node* raw_size = GetArrayAllocationSize(
+      length, UINT8_ELEMENTS, mode,
+      SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
+  Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
   Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
          &if_sizeissmall, &if_notsizeissmall);
 
   Bind(&if_sizeissmall);
   {
     // Just allocate the SeqOneByteString in new space.
-    Node* result = Allocate(size);
+    Node* result = Allocate(size, flags);
+    DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
     StoreMapNoWriteBarrier(result, LoadRoot(Heap::kOneByteStringMapRootIndex));
-    StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
-                                   SmiFromWord(length));
+    StoreObjectFieldNoWriteBarrier(
+        result, SeqOneByteString::kLengthOffset,
+        mode == SMI_PARAMETERS ? length : SmiFromWord(length));
     StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldOffset,
                                    IntPtrConstant(String::kEmptyHashField),
                                    MachineRepresentation::kWord32);
@@ -1241,8 +1471,9 @@
   Bind(&if_notsizeissmall);
   {
     // We might need to allocate in large object space, go to the runtime.
-    Node* result = CallRuntime(Runtime::kAllocateSeqOneByteString, context,
-                               SmiFromWord(length));
+    Node* result =
+        CallRuntime(Runtime::kAllocateSeqOneByteString, context,
+                    mode == SMI_PARAMETERS ? length : SmiFromWord(length));
     var_result.Bind(result);
     Goto(&if_join);
   }
@@ -1251,8 +1482,11 @@
   return var_result.value();
 }
 
-Node* CodeStubAssembler::AllocateSeqTwoByteString(int length) {
-  Node* result = Allocate(SeqTwoByteString::SizeFor(length));
+Node* CodeStubAssembler::AllocateSeqTwoByteString(int length,
+                                                  AllocationFlags flags) {
+  Comment("AllocateSeqTwoByteString");
+  Node* result = Allocate(SeqTwoByteString::SizeFor(length), flags);
+  DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
   StoreMapNoWriteBarrier(result, LoadRoot(Heap::kStringMapRootIndex));
   StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
                                  SmiConstant(Smi::FromInt(length)));
@@ -1262,27 +1496,31 @@
   return result;
 }
 
-Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length) {
+Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length,
+                                                  ParameterMode mode,
+                                                  AllocationFlags flags) {
+  Comment("AllocateSeqTwoByteString");
   Variable var_result(this, MachineRepresentation::kTagged);
 
   // Compute the SeqTwoByteString size and check if it fits into new space.
   Label if_sizeissmall(this), if_notsizeissmall(this, Label::kDeferred),
       if_join(this);
-  Node* size = WordAnd(
-      IntPtrAdd(IntPtrAdd(WordShl(length, 1),
-                          IntPtrConstant(SeqTwoByteString::kHeaderSize)),
-                IntPtrConstant(kObjectAlignmentMask)),
-      IntPtrConstant(~kObjectAlignmentMask));
+  Node* raw_size = GetArrayAllocationSize(
+      length, UINT16_ELEMENTS, mode,
+      SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
+  Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
   Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
          &if_sizeissmall, &if_notsizeissmall);
 
   Bind(&if_sizeissmall);
   {
     // Just allocate the SeqTwoByteString in new space.
-    Node* result = Allocate(size);
+    Node* result = Allocate(size, flags);
+    DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
     StoreMapNoWriteBarrier(result, LoadRoot(Heap::kStringMapRootIndex));
-    StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
-                                   SmiFromWord(length));
+    StoreObjectFieldNoWriteBarrier(
+        result, SeqTwoByteString::kLengthOffset,
+        mode == SMI_PARAMETERS ? length : SmiFromWord(length));
     StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldOffset,
                                    IntPtrConstant(String::kEmptyHashField),
                                    MachineRepresentation::kWord32);
@@ -1293,8 +1531,9 @@
   Bind(&if_notsizeissmall);
   {
     // We might need to allocate in large object space, go to the runtime.
-    Node* result = CallRuntime(Runtime::kAllocateSeqTwoByteString, context,
-                               SmiFromWord(length));
+    Node* result =
+        CallRuntime(Runtime::kAllocateSeqTwoByteString, context,
+                    mode == SMI_PARAMETERS ? length : SmiFromWord(length));
     var_result.Bind(result);
     Goto(&if_join);
   }
@@ -1303,10 +1542,13 @@
   return var_result.value();
 }
 
-Node* CodeStubAssembler::AllocateSlicedOneByteString(Node* length, Node* parent,
-                                                     Node* offset) {
+Node* CodeStubAssembler::AllocateSlicedString(
+    Heap::RootListIndex map_root_index, Node* length, Node* parent,
+    Node* offset) {
+  CSA_ASSERT(this, TaggedIsSmi(length));
   Node* result = Allocate(SlicedString::kSize);
-  Node* map = LoadRoot(Heap::kSlicedOneByteStringMapRootIndex);
+  Node* map = LoadRoot(map_root_index);
+  DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
   StoreMapNoWriteBarrier(result, map);
   StoreObjectFieldNoWriteBarrier(result, SlicedString::kLengthOffset, length,
                                  MachineRepresentation::kTagged);
@@ -1320,28 +1562,118 @@
   return result;
 }
 
+Node* CodeStubAssembler::AllocateSlicedOneByteString(Node* length, Node* parent,
+                                                     Node* offset) {
+  return AllocateSlicedString(Heap::kSlicedOneByteStringMapRootIndex, length,
+                              parent, offset);
+}
+
 Node* CodeStubAssembler::AllocateSlicedTwoByteString(Node* length, Node* parent,
                                                      Node* offset) {
-  Node* result = Allocate(SlicedString::kSize);
-  Node* map = LoadRoot(Heap::kSlicedStringMapRootIndex);
+  return AllocateSlicedString(Heap::kSlicedStringMapRootIndex, length, parent,
+                              offset);
+}
+
+Node* CodeStubAssembler::AllocateConsString(Heap::RootListIndex map_root_index,
+                                            Node* length, Node* first,
+                                            Node* second,
+                                            AllocationFlags flags) {
+  CSA_ASSERT(this, TaggedIsSmi(length));
+  Node* result = Allocate(ConsString::kSize, flags);
+  Node* map = LoadRoot(map_root_index);
+  DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
   StoreMapNoWriteBarrier(result, map);
-  StoreObjectFieldNoWriteBarrier(result, SlicedString::kLengthOffset, length,
+  StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length,
                                  MachineRepresentation::kTagged);
-  StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldOffset,
+  StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldOffset,
                                  Int32Constant(String::kEmptyHashField),
                                  MachineRepresentation::kWord32);
-  StoreObjectFieldNoWriteBarrier(result, SlicedString::kParentOffset, parent,
-                                 MachineRepresentation::kTagged);
-  StoreObjectFieldNoWriteBarrier(result, SlicedString::kOffsetOffset, offset,
-                                 MachineRepresentation::kTagged);
+  bool const new_space = !(flags & kPretenured);
+  if (new_space) {
+    StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, first,
+                                   MachineRepresentation::kTagged);
+    StoreObjectFieldNoWriteBarrier(result, ConsString::kSecondOffset, second,
+                                   MachineRepresentation::kTagged);
+  } else {
+    StoreObjectField(result, ConsString::kFirstOffset, first);
+    StoreObjectField(result, ConsString::kSecondOffset, second);
+  }
   return result;
 }
 
+Node* CodeStubAssembler::AllocateOneByteConsString(Node* length, Node* first,
+                                                   Node* second,
+                                                   AllocationFlags flags) {
+  return AllocateConsString(Heap::kConsOneByteStringMapRootIndex, length, first,
+                            second, flags);
+}
+
+Node* CodeStubAssembler::AllocateTwoByteConsString(Node* length, Node* first,
+                                                   Node* second,
+                                                   AllocationFlags flags) {
+  return AllocateConsString(Heap::kConsStringMapRootIndex, length, first,
+                            second, flags);
+}
+
+Node* CodeStubAssembler::NewConsString(Node* context, Node* length, Node* left,
+                                       Node* right, AllocationFlags flags) {
+  CSA_ASSERT(this, TaggedIsSmi(length));
+  // Added string can be a cons string.
+  Comment("Allocating ConsString");
+  Node* left_instance_type = LoadInstanceType(left);
+  Node* right_instance_type = LoadInstanceType(right);
+
+  // Compute intersection and difference of instance types.
+  Node* anded_instance_types = WordAnd(left_instance_type, right_instance_type);
+  Node* xored_instance_types = WordXor(left_instance_type, right_instance_type);
+
+  // We create a one-byte cons string if
+  // 1. both strings are one-byte, or
+  // 2. at least one of the strings is two-byte, but happens to contain only
+  //    one-byte characters.
+  // To do this, we check
+  // 1. if both strings are one-byte, or if the one-byte data hint is set in
+  //    both strings, or
+  // 2. if one of the strings has the one-byte data hint set and the other
+  //    string is one-byte.
+  STATIC_ASSERT(kOneByteStringTag != 0);
+  STATIC_ASSERT(kOneByteDataHintTag != 0);
+  Label one_byte_map(this);
+  Label two_byte_map(this);
+  Variable result(this, MachineRepresentation::kTagged);
+  Label done(this, &result);
+  GotoIf(WordNotEqual(
+             WordAnd(anded_instance_types,
+                     IntPtrConstant(kStringEncodingMask | kOneByteDataHintTag)),
+             IntPtrConstant(0)),
+         &one_byte_map);
+  Branch(WordNotEqual(WordAnd(xored_instance_types,
+                              IntPtrConstant(kStringEncodingMask |
+                                             kOneByteDataHintMask)),
+                      IntPtrConstant(kOneByteStringTag | kOneByteDataHintTag)),
+         &two_byte_map, &one_byte_map);
+
+  Bind(&one_byte_map);
+  Comment("One-byte ConsString");
+  result.Bind(AllocateOneByteConsString(length, left, right, flags));
+  Goto(&done);
+
+  Bind(&two_byte_map);
+  Comment("Two-byte ConsString");
+  result.Bind(AllocateTwoByteConsString(length, left, right, flags));
+  Goto(&done);
+
+  Bind(&done);
+
+  return result.value();
+}
+
 Node* CodeStubAssembler::AllocateRegExpResult(Node* context, Node* length,
                                               Node* index, Node* input) {
   Node* const max_length =
       SmiConstant(Smi::FromInt(JSArray::kInitialMaxFastElementArray));
-  Assert(SmiLessThanOrEqual(length, max_length));
+  CSA_ASSERT(this, SmiLessThanOrEqual(length, max_length));
+  USE(max_length);
 
   // Allocate the JSRegExpResult.
   // TODO(jgruber): Fold JSArray and FixedArray allocations, then remove
@@ -1381,6 +1713,120 @@
   return result;
 }
 
+Node* CodeStubAssembler::AllocateNameDictionary(int at_least_space_for) {
+  return AllocateNameDictionary(IntPtrConstant(at_least_space_for));
+}
+
+Node* CodeStubAssembler::AllocateNameDictionary(Node* at_least_space_for) {
+  CSA_ASSERT(this, UintPtrLessThanOrEqual(
+                       at_least_space_for,
+                       IntPtrConstant(NameDictionary::kMaxCapacity)));
+
+  Node* capacity = HashTableComputeCapacity(at_least_space_for);
+  CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
+
+  Node* length = EntryToIndex<NameDictionary>(capacity);
+  Node* store_size =
+      IntPtrAddFoldConstants(WordShl(length, IntPtrConstant(kPointerSizeLog2)),
+                             IntPtrConstant(NameDictionary::kHeaderSize));
+
+  Node* result = Allocate(store_size);
+  Comment("Initialize NameDictionary");
+  // Initialize FixedArray fields.
+  StoreObjectFieldRoot(result, FixedArray::kMapOffset,
+                       Heap::kHashTableMapRootIndex);
+  StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
+                                 SmiFromWord(length));
+  // Initialized HashTable fields.
+  Node* zero = SmiConstant(0);
+  StoreFixedArrayElement(result, NameDictionary::kNumberOfElementsIndex, zero,
+                         SKIP_WRITE_BARRIER);
+  StoreFixedArrayElement(result, NameDictionary::kNumberOfDeletedElementsIndex,
+                         zero, SKIP_WRITE_BARRIER);
+  StoreFixedArrayElement(result, NameDictionary::kCapacityIndex,
+                         SmiTag(capacity), SKIP_WRITE_BARRIER);
+  // Initialize Dictionary fields.
+  Node* filler = LoadRoot(Heap::kUndefinedValueRootIndex);
+  StoreFixedArrayElement(result, NameDictionary::kMaxNumberKeyIndex, filler,
+                         SKIP_WRITE_BARRIER);
+  StoreFixedArrayElement(result, NameDictionary::kNextEnumerationIndexIndex,
+                         SmiConstant(PropertyDetails::kInitialIndex),
+                         SKIP_WRITE_BARRIER);
+
+  // Initialize NameDictionary elements.
+  result = BitcastTaggedToWord(result);
+  Node* start_address = IntPtrAdd(
+      result, IntPtrConstant(NameDictionary::OffsetOfElementAt(
+                                 NameDictionary::kElementsStartIndex) -
+                             kHeapObjectTag));
+  Node* end_address = IntPtrAdd(
+      result,
+      IntPtrSubFoldConstants(store_size, IntPtrConstant(kHeapObjectTag)));
+  StoreFieldsNoWriteBarrier(start_address, end_address, filler);
+  return result;
+}
+
+Node* CodeStubAssembler::AllocateJSObjectFromMap(Node* map, Node* properties,
+                                                 Node* elements) {
+  CSA_ASSERT(this, IsMap(map));
+  Node* size =
+      IntPtrMul(LoadMapInstanceSize(map), IntPtrConstant(kPointerSize));
+  CSA_ASSERT(this, IsRegularHeapObjectSize(size));
+  Node* object = Allocate(size);
+  StoreMapNoWriteBarrier(object, map);
+  InitializeJSObjectFromMap(object, map, size, properties, elements);
+  return object;
+}
+
+void CodeStubAssembler::InitializeJSObjectFromMap(Node* object, Node* map,
+                                                  Node* size, Node* properties,
+                                                  Node* elements) {
+  // This helper assumes that the object is in new-space, as guarded by the
+  // check in AllocatedJSObjectFromMap.
+  if (properties == nullptr) {
+    CSA_ASSERT(this, Word32BinaryNot(IsDictionaryMap((map))));
+    StoreObjectFieldRoot(object, JSObject::kPropertiesOffset,
+                         Heap::kEmptyFixedArrayRootIndex);
+  } else {
+    StoreObjectFieldNoWriteBarrier(object, JSObject::kPropertiesOffset,
+                                   properties);
+  }
+  if (elements == nullptr) {
+    StoreObjectFieldRoot(object, JSObject::kElementsOffset,
+                         Heap::kEmptyFixedArrayRootIndex);
+  } else {
+    StoreObjectFieldNoWriteBarrier(object, JSObject::kElementsOffset, elements);
+  }
+  InitializeJSObjectBody(object, map, size, JSObject::kHeaderSize);
+}
+
+void CodeStubAssembler::InitializeJSObjectBody(Node* object, Node* map,
+                                               Node* size, int start_offset) {
+  // TODO(cbruni): activate in-object slack tracking machinery.
+  Comment("InitializeJSObjectBody");
+  Node* filler = LoadRoot(Heap::kUndefinedValueRootIndex);
+  // Calculate the untagged field addresses.
+  Node* start_address =
+      IntPtrAdd(object, IntPtrConstant(start_offset - kHeapObjectTag));
+  Node* end_address =
+      IntPtrSub(IntPtrAdd(object, size), IntPtrConstant(kHeapObjectTag));
+  StoreFieldsNoWriteBarrier(start_address, end_address, filler);
+}
+
+void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address,
+                                                  Node* end_address,
+                                                  Node* value) {
+  Comment("StoreFieldsNoWriteBarrier");
+  CSA_ASSERT(this, WordIsWordAligned(start_address));
+  CSA_ASSERT(this, WordIsWordAligned(end_address));
+  BuildFastLoop(
+      MachineType::PointerRepresentation(), start_address, end_address,
+      [value](CodeStubAssembler* a, Node* current) {
+        a->StoreNoWriteBarrier(MachineRepresentation::kTagged, current, value);
+      },
+      kPointerSize, IndexAdvanceMode::kPost);
+}
+
 Node* CodeStubAssembler::AllocateUninitializedJSArrayWithoutElements(
     ElementsKind kind, Node* array_map, Node* length, Node* allocation_site) {
   Comment("begin allocation of JSArray without elements");
@@ -1415,7 +1861,8 @@
   Node* array = AllocateUninitializedJSArray(kind, array_map, length,
                                              allocation_site, size);
 
-  Node* elements = InnerAllocate(array, elements_offset);
+  // The bitcast here is safe because InnerAllocate doesn't actually allocate.
+  Node* elements = InnerAllocate(BitcastTaggedToWord(array), elements_offset);
   StoreObjectField(array, JSObject::kElementsOffset, elements);
 
   return {array, elements};
@@ -1461,8 +1908,10 @@
                                  TagParameter(capacity, capacity_mode));
 
   // Fill in the elements with holes.
-  FillFixedArrayWithValue(kind, elements, IntPtrConstant(0), capacity,
-                          Heap::kTheHoleValueRootIndex, capacity_mode);
+  FillFixedArrayWithValue(
+      kind, elements, capacity_mode == SMI_PARAMETERS ? SmiConstant(Smi::kZero)
+                                                      : IntPtrConstant(0),
+      capacity, Heap::kTheHoleValueRootIndex, capacity_mode);
 
   return array;
 }
@@ -1471,6 +1920,8 @@
                                             Node* capacity_node,
                                             ParameterMode mode,
                                             AllocationFlags flags) {
+  CSA_ASSERT(this,
+             IntPtrGreaterThan(capacity_node, IntPtrOrSmiConstant(0, mode)));
   Node* total_size = GetFixedArrayAllocationSize(capacity_node, kind, mode);
 
   // Allocate both array and elements object, and initialize the JSArray.
@@ -1501,86 +1952,37 @@
       Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32);
   Node* value = LoadRoot(value_root_index);
 
-  const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
-  int32_t to;
-  bool constant_to = ToInt32Constant(to_node, to);
-  int32_t from;
-  bool constant_from = ToInt32Constant(from_node, from);
-  if (constant_to && constant_from &&
-      (to - from) <= kElementLoopUnrollThreshold) {
-    for (int i = from; i < to; ++i) {
-      Node* index = IntPtrConstant(i);
-      if (is_double) {
-        Node* offset = ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
-                                              first_element_offset);
-        // Don't use doubles to store the hole double, since manipulating the
-        // signaling NaN used for the hole in C++, e.g. with bit_cast, will
-        // change its value on ia32 (the x87 stack is used to return values
-        // and stores to the stack silently clear the signalling bit).
-        //
-        // TODO(danno): When we have a Float32/Float64 wrapper class that
-        // preserves double bits during manipulation, remove this code/change
-        // this to an indexed Float64 store.
-        if (Is64()) {
-          StoreNoWriteBarrier(MachineRepresentation::kWord64, array, offset,
-                              double_hole);
+  BuildFastFixedArrayForEach(
+      array, kind, from_node, to_node,
+      [value, is_double, double_hole](CodeStubAssembler* assembler, Node* array,
+                                      Node* offset) {
+        if (is_double) {
+          // Don't use doubles to store the hole double, since manipulating the
+          // signaling NaN used for the hole in C++, e.g. with bit_cast, will
+          // change its value on ia32 (the x87 stack is used to return values
+          // and stores to the stack silently clear the signalling bit).
+          //
+          // TODO(danno): When we have a Float32/Float64 wrapper class that
+          // preserves double bits during manipulation, remove this code/change
+          // this to an indexed Float64 store.
+          if (assembler->Is64()) {
+            assembler->StoreNoWriteBarrier(MachineRepresentation::kWord64,
+                                           array, offset, double_hole);
+          } else {
+            assembler->StoreNoWriteBarrier(MachineRepresentation::kWord32,
+                                           array, offset, double_hole);
+            assembler->StoreNoWriteBarrier(
+                MachineRepresentation::kWord32, array,
+                assembler->IntPtrAdd(offset,
+                                     assembler->IntPtrConstant(kPointerSize)),
+                double_hole);
+          }
         } else {
-          StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
-                              double_hole);
-          offset = ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
-                                          first_element_offset + kPointerSize);
-          StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
-                              double_hole);
+          assembler->StoreNoWriteBarrier(MachineRepresentation::kTagged, array,
+                                         offset, value);
         }
-      } else {
-        StoreFixedArrayElement(array, index, value, SKIP_WRITE_BARRIER,
-                               INTPTR_PARAMETERS);
-      }
-    }
-  } else {
-    Variable current(this, MachineRepresentation::kTagged);
-    Label test(this);
-    Label decrement(this, &current);
-    Label done(this);
-    Node* limit =
-        IntPtrAdd(array, ElementOffsetFromIndex(from_node, kind, mode));
-    current.Bind(IntPtrAdd(array, ElementOffsetFromIndex(to_node, kind, mode)));
-
-    Branch(WordEqual(current.value(), limit), &done, &decrement);
-
-    Bind(&decrement);
-    current.Bind(IntPtrSub(
-        current.value(),
-        IntPtrConstant(IsFastDoubleElementsKind(kind) ? kDoubleSize
-                                                      : kPointerSize)));
-    if (is_double) {
-      // Don't use doubles to store the hole double, since manipulating the
-      // signaling NaN used for the hole in C++, e.g. with bit_cast, will
-      // change its value on ia32 (the x87 stack is used to return values
-      // and stores to the stack silently clear the signalling bit).
-      //
-      // TODO(danno): When we have a Float32/Float64 wrapper class that
-      // preserves double bits during manipulation, remove this code/change
-      // this to an indexed Float64 store.
-      if (Is64()) {
-        StoreNoWriteBarrier(MachineRepresentation::kWord64, current.value(),
-                            Int64Constant(first_element_offset), double_hole);
-      } else {
-        StoreNoWriteBarrier(MachineRepresentation::kWord32, current.value(),
-                            Int32Constant(first_element_offset), double_hole);
-        StoreNoWriteBarrier(MachineRepresentation::kWord32, current.value(),
-                            Int32Constant(kPointerSize + first_element_offset),
-                            double_hole);
-      }
-    } else {
-      StoreNoWriteBarrier(MachineType::PointerRepresentation(), current.value(),
-                          IntPtrConstant(first_element_offset), value);
-    }
-    Node* compare = WordNotEqual(current.value(), limit);
-    Branch(compare, &decrement, &done);
-
-    Bind(&done);
-  }
+      },
+      mode);
 }
 
 void CodeStubAssembler::CopyFixedArrayElements(
@@ -1679,8 +2081,8 @@
       StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array, to_offset,
                           value);
     } else {
-      StoreNoWriteBarrier(MachineType::PointerRepresentation(), to_array,
-                          to_offset, value);
+      StoreNoWriteBarrier(MachineRepresentation::kTagged, to_array, to_offset,
+                          value);
     }
     Goto(&next_iter);
 
@@ -1717,73 +2119,66 @@
   Comment("] CopyFixedArrayElements");
 }
 
-void CodeStubAssembler::CopyStringCharacters(compiler::Node* from_string,
-                                             compiler::Node* to_string,
-                                             compiler::Node* from_index,
-                                             compiler::Node* character_count,
-                                             String::Encoding encoding) {
-  Label out(this);
+void CodeStubAssembler::CopyStringCharacters(
+    compiler::Node* from_string, compiler::Node* to_string,
+    compiler::Node* from_index, compiler::Node* to_index,
+    compiler::Node* character_count, String::Encoding from_encoding,
+    String::Encoding to_encoding, ParameterMode mode) {
+  bool from_one_byte = from_encoding == String::ONE_BYTE_ENCODING;
+  bool to_one_byte = to_encoding == String::ONE_BYTE_ENCODING;
+  DCHECK_IMPLIES(to_one_byte, from_one_byte);
+  Comment("CopyStringCharacters %s -> %s",
+          from_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING",
+          to_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING");
 
-  // Nothing to do for zero characters.
+  ElementsKind from_kind = from_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS;
+  ElementsKind to_kind = to_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS;
+  STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+  int header_size = SeqOneByteString::kHeaderSize - kHeapObjectTag;
+  Node* from_offset =
+      ElementOffsetFromIndex(from_index, from_kind, mode, header_size);
+  Node* to_offset =
+      ElementOffsetFromIndex(to_index, to_kind, mode, header_size);
+  Node* byte_count = ElementOffsetFromIndex(character_count, from_kind, mode);
+  Node* limit_offset = IntPtrAddFoldConstants(from_offset, byte_count);
 
-  GotoIf(SmiLessThanOrEqual(character_count, SmiConstant(Smi::FromInt(0))),
-         &out);
+  // Prepare the fast loop
+  MachineType type =
+      from_one_byte ? MachineType::Uint8() : MachineType::Uint16();
+  MachineRepresentation rep = to_one_byte ? MachineRepresentation::kWord8
+                                          : MachineRepresentation::kWord16;
+  int from_increment = 1 << ElementsKindToShiftSize(from_kind);
+  int to_increment = 1 << ElementsKindToShiftSize(to_kind);
 
-  // Calculate offsets into the strings.
-
-  Node* from_offset;
-  Node* limit_offset;
-  Node* to_offset;
-
-  {
-    Node* byte_count = SmiUntag(character_count);
-    Node* from_byte_index = SmiUntag(from_index);
-    if (encoding == String::ONE_BYTE_ENCODING) {
-      const int offset = SeqOneByteString::kHeaderSize - kHeapObjectTag;
-      from_offset = IntPtrAdd(IntPtrConstant(offset), from_byte_index);
-      limit_offset = IntPtrAdd(from_offset, byte_count);
-      to_offset = IntPtrConstant(offset);
-    } else {
-      STATIC_ASSERT(2 == sizeof(uc16));
-      byte_count = WordShl(byte_count, 1);
-      from_byte_index = WordShl(from_byte_index, 1);
-
-      const int offset = SeqTwoByteString::kHeaderSize - kHeapObjectTag;
-      from_offset = IntPtrAdd(IntPtrConstant(offset), from_byte_index);
-      limit_offset = IntPtrAdd(from_offset, byte_count);
-      to_offset = IntPtrConstant(offset);
-    }
-  }
-
-  Variable var_from_offset(this, MachineType::PointerRepresentation());
-  Variable var_to_offset(this, MachineType::PointerRepresentation());
-
-  var_from_offset.Bind(from_offset);
-  var_to_offset.Bind(to_offset);
-
-  Variable* vars[] = {&var_from_offset, &var_to_offset};
-  Label decrement(this, 2, vars);
-
-  Label loop(this, 2, vars);
-  Goto(&loop);
-  Bind(&loop);
-  {
-    from_offset = var_from_offset.value();
-    to_offset = var_to_offset.value();
-
-    // TODO(jgruber): We could make this faster through larger copy unit sizes.
-    Node* value = Load(MachineType::Uint8(), from_string, from_offset);
-    StoreNoWriteBarrier(MachineRepresentation::kWord8, to_string, to_offset,
-                        value);
-
-    Node* new_from_offset = IntPtrAdd(from_offset, IntPtrConstant(1));
-    var_from_offset.Bind(new_from_offset);
-    var_to_offset.Bind(IntPtrAdd(to_offset, IntPtrConstant(1)));
-
-    Branch(WordNotEqual(new_from_offset, limit_offset), &loop, &out);
-  }
-
-  Bind(&out);
+  Variable current_to_offset(this, MachineType::PointerRepresentation());
+  VariableList vars({&current_to_offset}, zone());
+  current_to_offset.Bind(to_offset);
+  int to_index_constant = 0, from_index_constant = 0;
+  Smi* to_index_smi = nullptr;
+  Smi* from_index_smi = nullptr;
+  bool index_same = (from_encoding == to_encoding) &&
+                    (from_index == to_index ||
+                     (ToInt32Constant(from_index, from_index_constant) &&
+                      ToInt32Constant(to_index, to_index_constant) &&
+                      from_index_constant == to_index_constant) ||
+                     (ToSmiConstant(from_index, from_index_smi) &&
+                      ToSmiConstant(to_index, to_index_smi) &&
+                      to_index_smi == from_index_smi));
+  BuildFastLoop(vars, MachineType::PointerRepresentation(), from_offset,
+                limit_offset,
+                [from_string, to_string, &current_to_offset, to_increment, type,
+                 rep, index_same](CodeStubAssembler* assembler, Node* offset) {
+                  Node* value = assembler->Load(type, from_string, offset);
+                  assembler->StoreNoWriteBarrier(
+                      rep, to_string,
+                      index_same ? offset : current_to_offset.value(), value);
+                  if (!index_same) {
+                    current_to_offset.Bind(assembler->IntPtrAdd(
+                        current_to_offset.value(),
+                        assembler->IntPtrConstant(to_increment)));
+                  }
+                },
+                from_increment, IndexAdvanceMode::kPost);
 }
 
 Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
@@ -1800,7 +2195,7 @@
     return value;
 
   } else {
-    Node* value = Load(MachineType::Pointer(), array, offset);
+    Node* value = Load(MachineType::AnyTagged(), array, offset);
     if (if_hole) {
       GotoIf(WordEqual(value, TheHoleConstant()), if_hole);
     }
@@ -1876,10 +2271,6 @@
   // Allocate the new backing store.
   Node* new_elements = AllocateFixedArray(to_kind, new_capacity, mode);
 
-  // Fill in the added capacity in the new store with holes.
-  FillFixedArrayWithValue(to_kind, new_elements, capacity, new_capacity,
-                          Heap::kTheHoleValueRootIndex, mode);
-
   // Copy the elements from the old elements store to the new.
   // The size-check above guarantees that the |new_elements| is allocated
   // in new space so we can skip the write barrier.
@@ -1904,13 +2295,47 @@
   if (FLAG_allocation_site_pretenuring) {
     Node* count = LoadObjectField(allocation_site,
                                   AllocationSite::kPretenureCreateCountOffset);
-    Node* incremented_count = IntPtrAdd(count, SmiConstant(Smi::FromInt(1)));
+    Node* incremented_count = SmiAdd(count, SmiConstant(Smi::FromInt(1)));
     StoreObjectFieldNoWriteBarrier(allocation_site,
                                    AllocationSite::kPretenureCreateCountOffset,
                                    incremented_count);
   }
 }
 
+Node* CodeStubAssembler::TryTaggedToFloat64(Node* value,
+                                            Label* if_valueisnotnumber) {
+  Label out(this);
+  Variable var_result(this, MachineRepresentation::kFloat64);
+
+  // Check if the {value} is a Smi or a HeapObject.
+  Label if_valueissmi(this), if_valueisnotsmi(this);
+  Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+
+  Bind(&if_valueissmi);
+  {
+    // Convert the Smi {value}.
+    var_result.Bind(SmiToFloat64(value));
+    Goto(&out);
+  }
+
+  Bind(&if_valueisnotsmi);
+  {
+    // Check if {value} is a HeapNumber.
+    Label if_valueisheapnumber(this);
+    Branch(IsHeapNumberMap(LoadMap(value)), &if_valueisheapnumber,
+           if_valueisnotnumber);
+
+    Bind(&if_valueisheapnumber);
+    {
+      // Load the floating point value.
+      var_result.Bind(LoadHeapNumberValue(value));
+      Goto(&out);
+    }
+  }
+  Bind(&out);
+  return var_result.value();
+}
+
 Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
   // We might need to loop once due to ToNumber conversion.
   Variable var_value(this, MachineRepresentation::kTagged),
@@ -1920,42 +2345,23 @@
   Goto(&loop);
   Bind(&loop);
   {
+    Label if_valueisnotnumber(this, Label::kDeferred);
+
     // Load the current {value}.
     value = var_value.value();
 
-    // Check if the {value} is a Smi or a HeapObject.
-    Label if_valueissmi(this), if_valueisnotsmi(this);
-    Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+    // Convert {value} to Float64 if it is a number and convert it to a number
+    // otherwise.
+    Node* const result = TryTaggedToFloat64(value, &if_valueisnotnumber);
+    var_result.Bind(result);
+    Goto(&done_loop);
 
-    Bind(&if_valueissmi);
+    Bind(&if_valueisnotnumber);
     {
-      // Convert the Smi {value}.
-      var_result.Bind(SmiToFloat64(value));
-      Goto(&done_loop);
-    }
-
-    Bind(&if_valueisnotsmi);
-    {
-      // Check if {value} is a HeapNumber.
-      Label if_valueisheapnumber(this),
-          if_valueisnotheapnumber(this, Label::kDeferred);
-      Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
-             &if_valueisheapnumber, &if_valueisnotheapnumber);
-
-      Bind(&if_valueisheapnumber);
-      {
-        // Load the floating point value.
-        var_result.Bind(LoadHeapNumberValue(value));
-        Goto(&done_loop);
-      }
-
-      Bind(&if_valueisnotheapnumber);
-      {
-        // Convert the {value} to a Number first.
-        Callable callable = CodeFactory::NonNumberToNumber(isolate());
-        var_value.Bind(CallStub(callable, context, value));
-        Goto(&loop);
-      }
+      // Convert the {value} to a Number first.
+      Callable callable = CodeFactory::NonNumberToNumber(isolate());
+      var_value.Bind(CallStub(callable, context, value));
+      Goto(&loop);
     }
   }
   Bind(&done_loop);
@@ -1976,7 +2382,7 @@
 
     // Check if the {value} is a Smi or a HeapObject.
     Label if_valueissmi(this), if_valueisnotsmi(this);
-    Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+    Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
 
     Bind(&if_valueissmi);
     {
@@ -2029,8 +2435,8 @@
   Bind(&if_valueisequal);
   {
     GotoUnless(Word32Equal(value32, Int32Constant(0)), &if_valueisint32);
-    BranchIfInt32LessThan(Float64ExtractHighWord32(value), Int32Constant(0),
-                          &if_valueisheapnumber, &if_valueisint32);
+    Branch(Int32LessThan(Float64ExtractHighWord32(value), Int32Constant(0)),
+           &if_valueisheapnumber, &if_valueisint32);
   }
   Bind(&if_valueisnotequal);
   Goto(&if_valueisheapnumber);
@@ -2138,7 +2544,7 @@
   // Check if the {value} is a Smi or a HeapObject.
   Label if_valueissmi(this, Label::kDeferred), if_valueisnotsmi(this),
       if_valueisstring(this);
-  Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+  Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
   Bind(&if_valueisnotsmi);
   {
     // Load the instance type of the {value}.
@@ -2206,9 +2612,9 @@
     value = var_value.value();
 
     // Check if the {value} is a Smi or a HeapObject.
-    GotoIf(WordIsSmi(value), (primitive_type == PrimitiveType::kNumber)
-                                 ? &done_loop
-                                 : &done_throw);
+    GotoIf(TaggedIsSmi(value), (primitive_type == PrimitiveType::kNumber)
+                                   ? &done_loop
+                                   : &done_throw);
 
     // Load the mape of the {value}.
     Node* value_map = LoadMap(value);
@@ -2270,7 +2676,7 @@
   Label out(this), throw_exception(this, Label::kDeferred);
   Variable var_value_map(this, MachineRepresentation::kTagged);
 
-  GotoIf(WordIsSmi(value), &throw_exception);
+  GotoIf(TaggedIsSmi(value), &throw_exception);
 
   // Load the instance type of the {value}.
   var_value_map.Bind(LoadMap(value));
@@ -2292,6 +2698,37 @@
   return var_value_map.value();
 }
 
+Node* CodeStubAssembler::IsSpecialReceiverMap(Node* map) {
+  Node* is_special = IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
+  uint32_t mask =
+      1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
+  USE(mask);
+  // Interceptors or access checks imply special receiver.
+  CSA_ASSERT(this, Select(IsSetWord32(LoadMapBitField(map), mask), is_special,
+                          Int32Constant(1), MachineRepresentation::kWord32));
+  return is_special;
+}
+
+Node* CodeStubAssembler::IsDictionaryMap(Node* map) {
+  CSA_SLOW_ASSERT(this, IsMap(map));
+  Node* bit_field3 = LoadMapBitField3(map);
+  return Word32NotEqual(IsSetWord32<Map::DictionaryMap>(bit_field3),
+                        Int32Constant(0));
+}
+
+Node* CodeStubAssembler::IsCallableMap(Node* map) {
+  CSA_ASSERT(this, IsMap(map));
+  return Word32NotEqual(
+      Word32And(LoadMapBitField(map), Int32Constant(1 << Map::kIsCallable)),
+      Int32Constant(0));
+}
+
+Node* CodeStubAssembler::IsSpecialReceiverInstanceType(Node* instance_type) {
+  STATIC_ASSERT(JS_GLOBAL_OBJECT_TYPE <= LAST_SPECIAL_RECEIVER_TYPE);
+  return Int32LessThanOrEqual(instance_type,
+                              Int32Constant(LAST_SPECIAL_RECEIVER_TYPE));
+}
+
 Node* CodeStubAssembler::IsStringInstanceType(Node* instance_type) {
   STATIC_ASSERT(INTERNALIZED_STRING_TYPE == FIRST_TYPE);
   return Int32LessThan(instance_type, Int32Constant(FIRST_NONSTRING_TYPE));
@@ -2303,7 +2740,71 @@
                                  Int32Constant(FIRST_JS_RECEIVER_TYPE));
 }
 
+Node* CodeStubAssembler::IsJSReceiver(Node* object) {
+  STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+  return IsJSReceiverInstanceType(LoadInstanceType(object));
+}
+
+Node* CodeStubAssembler::IsJSObject(Node* object) {
+  STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+  return Int32GreaterThanOrEqual(LoadInstanceType(object),
+                                 Int32Constant(FIRST_JS_RECEIVER_TYPE));
+}
+
+Node* CodeStubAssembler::IsJSGlobalProxy(Node* object) {
+  return Word32Equal(LoadInstanceType(object),
+                     Int32Constant(JS_GLOBAL_PROXY_TYPE));
+}
+
+Node* CodeStubAssembler::IsMap(Node* map) {
+  return HasInstanceType(map, MAP_TYPE);
+}
+
+Node* CodeStubAssembler::IsJSValue(Node* map) {
+  return HasInstanceType(map, JS_VALUE_TYPE);
+}
+
+Node* CodeStubAssembler::IsJSArray(Node* object) {
+  return HasInstanceType(object, JS_ARRAY_TYPE);
+}
+
+Node* CodeStubAssembler::IsWeakCell(Node* object) {
+  return HasInstanceType(object, WEAK_CELL_TYPE);
+}
+
+Node* CodeStubAssembler::IsName(Node* object) {
+  return Int32LessThanOrEqual(LoadInstanceType(object),
+                              Int32Constant(LAST_NAME_TYPE));
+}
+
+Node* CodeStubAssembler::IsString(Node* object) {
+  return Int32LessThanOrEqual(LoadInstanceType(object),
+                              Int32Constant(FIRST_NONSTRING_TYPE));
+}
+
+Node* CodeStubAssembler::IsNativeContext(Node* object) {
+  return WordEqual(LoadMap(object), LoadRoot(Heap::kNativeContextMapRootIndex));
+}
+
+Node* CodeStubAssembler::IsFixedDoubleArray(Node* object) {
+  return WordEqual(LoadMap(object), FixedDoubleArrayMapConstant());
+}
+
+Node* CodeStubAssembler::IsHashTable(Node* object) {
+  return WordEqual(LoadMap(object), LoadRoot(Heap::kHashTableMapRootIndex));
+}
+
+Node* CodeStubAssembler::IsDictionary(Node* object) {
+  return WordOr(IsHashTable(object), IsUnseededNumberDictionary(object));
+}
+
+Node* CodeStubAssembler::IsUnseededNumberDictionary(Node* object) {
+  return WordEqual(LoadMap(object),
+                   LoadRoot(Heap::kUnseededNumberDictionaryMapRootIndex));
+}
+
 Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index) {
+  CSA_ASSERT(this, IsString(string));
   // Translate the {index} into a Word.
   index = SmiToWord(index);
 
@@ -2549,6 +3050,8 @@
   Label end(a), two_byte_sequential(a);
   Variable var_result(a, MachineRepresentation::kTagged);
 
+  Node* const smi_zero = a->SmiConstant(Smi::kZero);
+
   STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
   a->GotoIf(a->Word32Equal(a->Word32And(from_instance_type,
                                         a->Int32Constant(kStringEncodingMask)),
@@ -2559,8 +3062,10 @@
   {
     Node* result =
         a->AllocateSeqOneByteString(context, a->SmiToWord(character_count));
-    a->CopyStringCharacters(from, result, from_index, character_count,
-                            String::ONE_BYTE_ENCODING);
+    a->CopyStringCharacters(from, result, from_index, smi_zero, character_count,
+                            String::ONE_BYTE_ENCODING,
+                            String::ONE_BYTE_ENCODING,
+                            CodeStubAssembler::SMI_PARAMETERS);
     var_result.Bind(result);
 
     a->Goto(&end);
@@ -2571,8 +3076,10 @@
   {
     Node* result =
         a->AllocateSeqTwoByteString(context, a->SmiToWord(character_count));
-    a->CopyStringCharacters(from, result, from_index, character_count,
-                            String::TWO_BYTE_ENCODING);
+    a->CopyStringCharacters(from, result, from_index, smi_zero, character_count,
+                            String::TWO_BYTE_ENCODING,
+                            String::TWO_BYTE_ENCODING,
+                            CodeStubAssembler::SMI_PARAMETERS);
     var_result.Bind(result);
 
     a->Goto(&end);
@@ -2601,7 +3108,7 @@
   // Make sure first argument is a string.
 
   // Bailout if receiver is a Smi.
-  GotoIf(WordIsSmi(string), &runtime);
+  GotoIf(TaggedIsSmi(string), &runtime);
 
   // Load the instance type of the {string}.
   Node* const instance_type = LoadInstanceType(string);
@@ -2783,7 +3290,7 @@
     GotoIf(SmiAbove(substr_length, string_length), &runtime);
 
     // Equal length - check if {from, to} == {0, str.length}.
-    GotoIf(SmiAbove(from, SmiConstant(Smi::FromInt(0))), &runtime);
+    GotoIf(SmiAbove(from, SmiConstant(Smi::kZero)), &runtime);
 
     // Return the original string (substr_length == string_length).
 
@@ -2806,6 +3313,178 @@
   return var_result.value();
 }
 
+Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
+                                   AllocationFlags flags) {
+  Label check_right(this);
+  Label runtime(this, Label::kDeferred);
+  Label cons(this);
+  Label non_cons(this);
+  Variable result(this, MachineRepresentation::kTagged);
+  Label done(this, &result);
+  Label done_native(this, &result);
+  Counters* counters = isolate()->counters();
+
+  Node* left_length = LoadStringLength(left);
+  GotoIf(WordNotEqual(IntPtrConstant(0), left_length), &check_right);
+  result.Bind(right);
+  Goto(&done_native);
+
+  Bind(&check_right);
+  Node* right_length = LoadStringLength(right);
+  GotoIf(WordNotEqual(IntPtrConstant(0), right_length), &cons);
+  result.Bind(left);
+  Goto(&done_native);
+
+  Bind(&cons);
+  CSA_ASSERT(this, TaggedIsSmi(left_length));
+  CSA_ASSERT(this, TaggedIsSmi(right_length));
+  Node* new_length = SmiAdd(left_length, right_length);
+  GotoIf(UintPtrGreaterThanOrEqual(
+             new_length, SmiConstant(Smi::FromInt(String::kMaxLength))),
+         &runtime);
+
+  GotoIf(IntPtrLessThan(new_length,
+                        SmiConstant(Smi::FromInt(ConsString::kMinLength))),
+         &non_cons);
+
+  result.Bind(NewConsString(context, new_length, left, right, flags));
+  Goto(&done_native);
+
+  Bind(&non_cons);
+
+  Comment("Full string concatenate");
+  Node* left_instance_type = LoadInstanceType(left);
+  Node* right_instance_type = LoadInstanceType(right);
+  // Compute intersection and difference of instance types.
+
+  Node* ored_instance_types = WordOr(left_instance_type, right_instance_type);
+  Node* xored_instance_types = WordXor(left_instance_type, right_instance_type);
+
+  // Check if both strings have the same encoding and both are sequential.
+  GotoIf(WordNotEqual(
+             WordAnd(xored_instance_types, IntPtrConstant(kStringEncodingMask)),
+             IntPtrConstant(0)),
+         &runtime);
+  GotoIf(WordNotEqual(WordAnd(ored_instance_types,
+                              IntPtrConstant(kStringRepresentationMask)),
+                      IntPtrConstant(0)),
+         &runtime);
+
+  Label two_byte(this);
+  GotoIf(WordEqual(
+             WordAnd(ored_instance_types, IntPtrConstant(kStringEncodingMask)),
+             IntPtrConstant(kTwoByteStringTag)),
+         &two_byte);
+  // One-byte sequential string case
+  Node* new_string =
+      AllocateSeqOneByteString(context, new_length, SMI_PARAMETERS);
+  CopyStringCharacters(left, new_string, SmiConstant(Smi::kZero),
+                       SmiConstant(Smi::kZero), left_length,
+                       String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING,
+                       SMI_PARAMETERS);
+  CopyStringCharacters(right, new_string, SmiConstant(Smi::kZero), left_length,
+                       right_length, String::ONE_BYTE_ENCODING,
+                       String::ONE_BYTE_ENCODING, SMI_PARAMETERS);
+  result.Bind(new_string);
+  Goto(&done_native);
+
+  Bind(&two_byte);
+  {
+    // Two-byte sequential string case
+    new_string = AllocateSeqTwoByteString(context, new_length, SMI_PARAMETERS);
+    CopyStringCharacters(left, new_string, SmiConstant(Smi::kZero),
+                         SmiConstant(Smi::kZero), left_length,
+                         String::TWO_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
+                         SMI_PARAMETERS);
+    CopyStringCharacters(right, new_string, SmiConstant(Smi::kZero),
+                         left_length, right_length, String::TWO_BYTE_ENCODING,
+                         String::TWO_BYTE_ENCODING, SMI_PARAMETERS);
+    result.Bind(new_string);
+    Goto(&done_native);
+  }
+
+  Bind(&runtime);
+  {
+    result.Bind(CallRuntime(Runtime::kStringAdd, context, left, right));
+    Goto(&done);
+  }
+
+  Bind(&done_native);
+  {
+    IncrementCounter(counters->string_add_native(), 1);
+    Goto(&done);
+  }
+
+  Bind(&done);
+  return result.value();
+}
+
+Node* CodeStubAssembler::StringIndexOfChar(Node* context, Node* string,
+                                           Node* needle_char, Node* from) {
+  CSA_ASSERT(this, IsString(string));
+  Variable var_result(this, MachineRepresentation::kTagged);
+
+  Label out(this), runtime(this, Label::kDeferred);
+
+  // Let runtime handle non-one-byte {needle_char}.
+
+  Node* const one_byte_char_mask = IntPtrConstant(0xFF);
+  GotoUnless(WordEqual(WordAnd(needle_char, one_byte_char_mask), needle_char),
+             &runtime);
+
+  // TODO(jgruber): Handle external and two-byte strings.
+
+  Node* const one_byte_seq_mask = Int32Constant(
+      kIsIndirectStringMask | kExternalStringTag | kStringEncodingMask);
+  Node* const expected_masked = Int32Constant(kOneByteStringTag);
+
+  Node* const string_instance_type = LoadInstanceType(string);
+  GotoUnless(Word32Equal(Word32And(string_instance_type, one_byte_seq_mask),
+                         expected_masked),
+             &runtime);
+
+  // If we reach this, {string} is a non-indirect, non-external one-byte string.
+
+  Node* const length = LoadStringLength(string);
+  Node* const search_range_length = SmiUntag(SmiSub(length, from));
+
+  const int offset = SeqOneByteString::kHeaderSize - kHeapObjectTag;
+  Node* const begin = IntPtrConstant(offset);
+  Node* const cursor = IntPtrAdd(begin, SmiUntag(from));
+  Node* const end = IntPtrAdd(cursor, search_range_length);
+
+  var_result.Bind(SmiConstant(Smi::FromInt(-1)));
+
+  BuildFastLoop(MachineType::PointerRepresentation(), cursor, end,
+                [string, needle_char, begin, &var_result, &out](
+                    CodeStubAssembler* csa, Node* cursor) {
+                  Label next(csa);
+                  Node* value = csa->Load(MachineType::Uint8(), string, cursor);
+                  csa->GotoUnless(csa->WordEqual(value, needle_char), &next);
+
+                  // Found a match.
+                  Node* index = csa->SmiTag(csa->IntPtrSub(cursor, begin));
+                  var_result.Bind(index);
+                  csa->Goto(&out);
+
+                  csa->Bind(&next);
+                },
+                1, IndexAdvanceMode::kPost);
+  Goto(&out);
+
+  Bind(&runtime);
+  {
+    Node* const pattern = StringFromCharCode(needle_char);
+    Node* const result =
+        CallRuntime(Runtime::kStringIndexOf, context, string, pattern, from);
+    var_result.Bind(result);
+    Goto(&out);
+  }
+
+  Bind(&out);
+  return var_result.value();
+}
+
 Node* CodeStubAssembler::StringFromCodePoint(compiler::Node* codepoint,
                                              UnicodeEncoding encoding) {
   Variable var_result(this, MachineRepresentation::kTagged);
@@ -2870,7 +3549,8 @@
       Word32And(hash, Int32Constant(String::kContainsCachedArrayIndexMask));
   GotoIf(Word32NotEqual(bit, Int32Constant(0)), &runtime);
 
-  var_result.Bind(SmiTag(BitFieldDecode<String::ArrayIndexValueBits>(hash)));
+  var_result.Bind(
+      SmiTag(DecodeWordFromWord32<String::ArrayIndexValueBits>(hash)));
   Goto(&end);
 
   Bind(&runtime);
@@ -2883,6 +3563,85 @@
   return var_result.value();
 }
 
+Node* CodeStubAssembler::NumberToString(compiler::Node* context,
+                                        compiler::Node* argument) {
+  Variable result(this, MachineRepresentation::kTagged);
+  Label runtime(this, Label::kDeferred);
+  Label smi(this);
+  Label done(this, &result);
+
+  // Load the number string cache.
+  Node* number_string_cache = LoadRoot(Heap::kNumberStringCacheRootIndex);
+
+  // Make the hash mask from the length of the number string cache. It
+  // contains two elements (number and string) for each cache entry.
+  Node* mask = LoadFixedArrayBaseLength(number_string_cache);
+  Node* one = IntPtrConstant(1);
+  mask = IntPtrSub(mask, one);
+
+  GotoIf(TaggedIsSmi(argument), &smi);
+
+  // Argument isn't smi, check to see if it's a heap-number.
+  Node* map = LoadMap(argument);
+  GotoUnless(WordEqual(map, HeapNumberMapConstant()), &runtime);
+
+  // Make a hash from the two 32-bit values of the double.
+  Node* low =
+      LoadObjectField(argument, HeapNumber::kValueOffset, MachineType::Int32());
+  Node* high = LoadObjectField(argument, HeapNumber::kValueOffset + kIntSize,
+                               MachineType::Int32());
+  Node* hash = Word32Xor(low, high);
+  if (Is64()) hash = ChangeInt32ToInt64(hash);
+  hash = WordShl(hash, one);
+  Node* index = WordAnd(hash, SmiToWord(mask));
+
+  // Cache entry's key must be a heap number
+  Node* number_key =
+      LoadFixedArrayElement(number_string_cache, index, 0, INTPTR_PARAMETERS);
+  GotoIf(TaggedIsSmi(number_key), &runtime);
+  map = LoadMap(number_key);
+  GotoUnless(WordEqual(map, HeapNumberMapConstant()), &runtime);
+
+  // Cache entry's key must match the heap number value we're looking for.
+  Node* low_compare = LoadObjectField(number_key, HeapNumber::kValueOffset,
+                                      MachineType::Int32());
+  Node* high_compare = LoadObjectField(
+      number_key, HeapNumber::kValueOffset + kIntSize, MachineType::Int32());
+  GotoUnless(WordEqual(low, low_compare), &runtime);
+  GotoUnless(WordEqual(high, high_compare), &runtime);
+
+  // Heap number match, return value fro cache entry.
+  IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+  result.Bind(LoadFixedArrayElement(number_string_cache, index, kPointerSize,
+                                    INTPTR_PARAMETERS));
+  Goto(&done);
+
+  Bind(&runtime);
+  {
+    // No cache entry, go to the runtime.
+    result.Bind(CallRuntime(Runtime::kNumberToString, context, argument));
+  }
+  Goto(&done);
+
+  Bind(&smi);
+  {
+    // Load the smi key, make sure it matches the smi we're looking for.
+    Node* smi_index = WordAnd(WordShl(argument, one), mask);
+    Node* smi_key = LoadFixedArrayElement(number_string_cache, smi_index, 0,
+                                          SMI_PARAMETERS);
+    GotoIf(WordNotEqual(smi_key, argument), &runtime);
+
+    // Smi match, return value from cache entry.
+    IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+    result.Bind(LoadFixedArrayElement(number_string_cache, smi_index,
+                                      kPointerSize, SMI_PARAMETERS));
+    Goto(&done);
+  }
+
+  Bind(&done);
+  return result.value();
+}
+
 Node* CodeStubAssembler::ToName(Node* context, Node* value) {
   typedef CodeStubAssembler::Label Label;
   typedef CodeStubAssembler::Variable Variable;
@@ -2891,7 +3650,7 @@
   Variable var_result(this, MachineRepresentation::kTagged);
 
   Label is_number(this);
-  GotoIf(WordIsSmi(value), &is_number);
+  GotoIf(TaggedIsSmi(value), &is_number);
 
   Label not_name(this);
   Node* value_instance_type = LoadInstanceType(value);
@@ -2934,8 +3693,8 @@
 
 Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
   // Assert input is a HeapObject (not smi or heap number)
-  Assert(Word32BinaryNot(WordIsSmi(input)));
-  Assert(Word32NotEqual(LoadMap(input), HeapNumberMapConstant()));
+  CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(input)));
+  CSA_ASSERT(this, Word32NotEqual(LoadMap(input), HeapNumberMapConstant()));
 
   // We might need to loop once here due to ToPrimitive conversions.
   Variable var_input(this, MachineRepresentation::kTagged);
@@ -2984,7 +3743,7 @@
 
       // Check if the {result} is already a Number.
       Label if_resultisnumber(this), if_resultisnotnumber(this);
-      GotoIf(WordIsSmi(result), &if_resultisnumber);
+      GotoIf(TaggedIsSmi(result), &if_resultisnumber);
       Node* result_map = LoadMap(result);
       Branch(WordEqual(result_map, HeapNumberMapConstant()), &if_resultisnumber,
              &if_resultisnotnumber);
@@ -3026,7 +3785,7 @@
   Label end(this);
 
   Label not_smi(this, Label::kDeferred);
-  GotoUnless(WordIsSmi(input), &not_smi);
+  GotoUnless(TaggedIsSmi(input), &not_smi);
   var_result.Bind(input);
   Goto(&end);
 
@@ -3051,6 +3810,110 @@
   return var_result.value();
 }
 
+Node* CodeStubAssembler::ToString(Node* context, Node* input) {
+  Label is_number(this);
+  Label runtime(this, Label::kDeferred);
+  Variable result(this, MachineRepresentation::kTagged);
+  Label done(this, &result);
+
+  GotoIf(TaggedIsSmi(input), &is_number);
+
+  Node* input_map = LoadMap(input);
+  Node* input_instance_type = LoadMapInstanceType(input_map);
+
+  result.Bind(input);
+  GotoIf(IsStringInstanceType(input_instance_type), &done);
+
+  Label not_heap_number(this);
+  Branch(WordNotEqual(input_map, HeapNumberMapConstant()), &not_heap_number,
+         &is_number);
+
+  Bind(&is_number);
+  result.Bind(NumberToString(context, input));
+  Goto(&done);
+
+  Bind(&not_heap_number);
+  {
+    GotoIf(Word32NotEqual(input_instance_type, Int32Constant(ODDBALL_TYPE)),
+           &runtime);
+    result.Bind(LoadObjectField(input, Oddball::kToStringOffset));
+    Goto(&done);
+  }
+
+  Bind(&runtime);
+  {
+    result.Bind(CallRuntime(Runtime::kToString, context, input));
+    Goto(&done);
+  }
+
+  Bind(&done);
+  return result.value();
+}
+
+Node* CodeStubAssembler::FlattenString(Node* string) {
+  CSA_ASSERT(this, IsString(string));
+  Variable var_result(this, MachineRepresentation::kTagged);
+  var_result.Bind(string);
+
+  Node* instance_type = LoadInstanceType(string);
+
+  // Check if the {string} is not a ConsString (i.e. already flat).
+  Label is_cons(this, Label::kDeferred), is_flat_in_cons(this), end(this);
+  {
+    GotoUnless(Word32Equal(Word32And(instance_type,
+                                     Int32Constant(kStringRepresentationMask)),
+                           Int32Constant(kConsStringTag)),
+               &end);
+
+    // Check whether the right hand side is the empty string (i.e. if
+    // this is really a flat string in a cons string).
+    Node* rhs = LoadObjectField(string, ConsString::kSecondOffset);
+    Branch(WordEqual(rhs, EmptyStringConstant()), &is_flat_in_cons, &is_cons);
+  }
+
+  // Bail out to the runtime.
+  Bind(&is_cons);
+  {
+    var_result.Bind(
+        CallRuntime(Runtime::kFlattenString, NoContextConstant(), string));
+    Goto(&end);
+  }
+
+  Bind(&is_flat_in_cons);
+  {
+    var_result.Bind(LoadObjectField(string, ConsString::kFirstOffset));
+    Goto(&end);
+  }
+
+  Bind(&end);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::JSReceiverToPrimitive(Node* context, Node* input) {
+  Label if_isreceiver(this, Label::kDeferred), if_isnotreceiver(this);
+  Variable result(this, MachineRepresentation::kTagged);
+  Label done(this, &result);
+
+  BranchIfJSReceiver(input, &if_isreceiver, &if_isnotreceiver);
+
+  Bind(&if_isreceiver);
+  {
+    // Convert {input} to a primitive first passing Number hint.
+    Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
+    result.Bind(CallStub(callable, context, input));
+    Goto(&done);
+  }
+
+  Bind(&if_isnotreceiver);
+  {
+    result.Bind(input);
+    Goto(&done);
+  }
+
+  Bind(&done);
+  return result.value();
+}
+
 Node* CodeStubAssembler::ToInteger(Node* context, Node* input,
                                    ToIntegerTruncationMode mode) {
   // We might need to loop once for ToNumber conversion.
@@ -3067,7 +3930,7 @@
     Node* arg = var_arg.value();
 
     // Check if {arg} is a Smi.
-    GotoIf(WordIsSmi(arg), &out);
+    GotoIf(TaggedIsSmi(arg), &out);
 
     // Check if {arg} is a HeapNumber.
     Label if_argisheapnumber(this),
@@ -3104,7 +3967,7 @@
     }
 
     Bind(&return_zero);
-    var_arg.Bind(SmiConstant(Smi::FromInt(0)));
+    var_arg.Bind(SmiConstant(Smi::kZero));
     Goto(&out);
   }
 
@@ -3112,12 +3975,16 @@
   return var_arg.value();
 }
 
-Node* CodeStubAssembler::BitFieldDecode(Node* word32, uint32_t shift,
-                                        uint32_t mask) {
+Node* CodeStubAssembler::DecodeWord32(Node* word32, uint32_t shift,
+                                      uint32_t mask) {
   return Word32Shr(Word32And(word32, Int32Constant(mask)),
                    static_cast<int>(shift));
 }
 
+Node* CodeStubAssembler::DecodeWord(Node* word, uint32_t shift, uint32_t mask) {
+  return WordShr(WordAnd(word, IntPtrConstant(mask)), static_cast<int>(shift));
+}
+
 void CodeStubAssembler::SetCounter(StatsCounter* counter, int value) {
   if (FLAG_native_code_counters && counter->Enabled()) {
     Node* counter_address = ExternalConstant(ExternalReference(counter));
@@ -3187,7 +4054,7 @@
   Goto(if_keyisunique);
 
   Bind(&if_hascachedindex);
-  var_index->Bind(BitFieldDecode<Name::ArrayIndexValueBits>(hash));
+  var_index->Bind(DecodeWordFromWord32<Name::ArrayIndexValueBits>(hash));
   Goto(if_keyisindex);
 }
 
@@ -3198,12 +4065,27 @@
                                                field_index));
 }
 
+template Node* CodeStubAssembler::EntryToIndex<NameDictionary>(Node*, int);
+template Node* CodeStubAssembler::EntryToIndex<GlobalDictionary>(Node*, int);
+
+Node* CodeStubAssembler::HashTableComputeCapacity(Node* at_least_space_for) {
+  Node* capacity = IntPtrRoundUpToPowerOfTwo32(
+      WordShl(at_least_space_for, IntPtrConstant(1)));
+  return IntPtrMax(capacity, IntPtrConstant(HashTableBase::kMinCapacity));
+}
+
+Node* CodeStubAssembler::IntPtrMax(Node* left, Node* right) {
+  return Select(IntPtrGreaterThanOrEqual(left, right), left, right,
+                MachineType::PointerRepresentation());
+}
+
 template <typename Dictionary>
 void CodeStubAssembler::NameDictionaryLookup(Node* dictionary,
                                              Node* unique_name, Label* if_found,
                                              Variable* var_name_index,
                                              Label* if_not_found,
                                              int inlined_probes) {
+  CSA_ASSERT(this, IsDictionary(dictionary));
   DCHECK_EQ(MachineType::PointerRepresentation(), var_name_index->rep());
   Comment("NameDictionaryLookup");
 
@@ -3288,6 +4170,7 @@
                                                Label* if_found,
                                                Variable* var_entry,
                                                Label* if_not_found) {
+  CSA_ASSERT(this, IsDictionary(dictionary));
   DCHECK_EQ(MachineType::PointerRepresentation(), var_entry->rep());
   Comment("NumberDictionaryLookup");
 
@@ -3330,7 +4213,7 @@
     Label next_probe(this);
     {
       Label if_currentissmi(this), if_currentisnotsmi(this);
-      Branch(WordIsSmi(current), &if_currentissmi, &if_currentisnotsmi);
+      Branch(TaggedIsSmi(current), &if_currentissmi, &if_currentisnotsmi);
       Bind(&if_currentissmi);
       {
         Node* current_value = SmiUntag(current);
@@ -3362,25 +4245,22 @@
                                                Label* if_found,
                                                Variable* var_name_index,
                                                Label* if_not_found) {
-  Variable var_descriptor(this, MachineType::PointerRepresentation());
-  Label loop(this, &var_descriptor);
-  var_descriptor.Bind(IntPtrConstant(0));
-  Goto(&loop);
+  Node* first_inclusive = IntPtrConstant(DescriptorArray::ToKeyIndex(0));
+  Node* factor = IntPtrConstant(DescriptorArray::kDescriptorSize);
+  Node* last_exclusive = IntPtrAdd(first_inclusive, IntPtrMul(nof, factor));
 
-  Bind(&loop);
-  {
-    Node* index = var_descriptor.value();
-    Node* name_offset = IntPtrConstant(DescriptorArray::ToKeyIndex(0));
-    Node* factor = IntPtrConstant(DescriptorArray::kDescriptorSize);
-    GotoIf(WordEqual(index, nof), if_not_found);
-    Node* name_index = IntPtrAdd(name_offset, IntPtrMul(index, factor));
-    Node* candidate_name =
-        LoadFixedArrayElement(descriptors, name_index, 0, INTPTR_PARAMETERS);
-    var_name_index->Bind(name_index);
-    GotoIf(WordEqual(candidate_name, unique_name), if_found);
-    var_descriptor.Bind(IntPtrAdd(index, IntPtrConstant(1)));
-    Goto(&loop);
-  }
+  BuildFastLoop(
+      MachineType::PointerRepresentation(), last_exclusive, first_inclusive,
+      [descriptors, unique_name, if_found, var_name_index](
+          CodeStubAssembler* assembler, Node* name_index) {
+        Node* candidate_name = assembler->LoadFixedArrayElement(
+            descriptors, name_index, 0, INTPTR_PARAMETERS);
+        var_name_index->Bind(name_index);
+        assembler->GotoIf(assembler->WordEqual(candidate_name, unique_name),
+                          if_found);
+      },
+      -DescriptorArray::kDescriptorSize, IndexAdvanceMode::kPre);
+  Goto(if_not_found);
 }
 
 void CodeStubAssembler::TryLookupProperty(
@@ -3397,19 +4277,20 @@
                               Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
          &if_objectisspecial);
 
-  Node* bit_field = LoadMapBitField(map);
-  Node* mask = Int32Constant(1 << Map::kHasNamedInterceptor |
-                             1 << Map::kIsAccessCheckNeeded);
-  Assert(Word32Equal(Word32And(bit_field, mask), Int32Constant(0)));
+  uint32_t mask =
+      1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
+  CSA_ASSERT(this, Word32BinaryNot(IsSetWord32(LoadMapBitField(map), mask)));
+  USE(mask);
 
   Node* bit_field3 = LoadMapBitField3(map);
-  Node* bit = BitFieldDecode<Map::DictionaryMap>(bit_field3);
   Label if_isfastmap(this), if_isslowmap(this);
-  Branch(Word32Equal(bit, Int32Constant(0)), &if_isfastmap, &if_isslowmap);
+  Branch(IsSetWord32<Map::DictionaryMap>(bit_field3), &if_isslowmap,
+         &if_isfastmap);
   Bind(&if_isfastmap);
   {
     Comment("DescriptorArrayLookup");
-    Node* nof = BitFieldDecodeWord<Map::NumberOfOwnDescriptorsBits>(bit_field3);
+    Node* nof =
+        DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3);
     // Bail out to the runtime for large numbers of own descriptors. The stub
     // only does linear search, which becomes too expensive in that case.
     {
@@ -3497,7 +4378,7 @@
                                                         name_to_details_offset);
   var_details->Bind(details);
 
-  Node* location = BitFieldDecode<PropertyDetails::LocationField>(details);
+  Node* location = DecodeWord32<PropertyDetails::LocationField>(details);
 
   Label if_in_field(this), if_in_descriptor(this), done(this);
   Branch(Word32Equal(location, Int32Constant(kField)), &if_in_field,
@@ -3505,17 +4386,17 @@
   Bind(&if_in_field);
   {
     Node* field_index =
-        BitFieldDecodeWord<PropertyDetails::FieldIndexField>(details);
+        DecodeWordFromWord32<PropertyDetails::FieldIndexField>(details);
     Node* representation =
-        BitFieldDecode<PropertyDetails::RepresentationField>(details);
+        DecodeWord32<PropertyDetails::RepresentationField>(details);
 
     Node* inobject_properties = LoadMapInobjectProperties(map);
 
     Label if_inobject(this), if_backing_store(this);
     Variable var_double_value(this, MachineRepresentation::kFloat64);
     Label rebox_double(this, &var_double_value);
-    BranchIfUintPtrLessThan(field_index, inobject_properties, &if_inobject,
-                            &if_backing_store);
+    Branch(UintPtrLessThan(field_index, inobject_properties), &if_inobject,
+           &if_backing_store);
     Bind(&if_inobject);
     {
       Comment("if_inobject");
@@ -3525,9 +4406,9 @@
                     IntPtrConstant(kPointerSize));
 
       Label if_double(this), if_tagged(this);
-      BranchIfWord32NotEqual(representation,
-                             Int32Constant(Representation::kDouble), &if_tagged,
-                             &if_double);
+      Branch(Word32NotEqual(representation,
+                            Int32Constant(Representation::kDouble)),
+             &if_tagged, &if_double);
       Bind(&if_tagged);
       {
         var_value->Bind(LoadObjectField(object, field_offset));
@@ -3553,9 +4434,9 @@
       Node* value = LoadFixedArrayElement(properties, field_index);
 
       Label if_double(this), if_tagged(this);
-      BranchIfWord32NotEqual(representation,
-                             Int32Constant(Representation::kDouble), &if_tagged,
-                             &if_double);
+      Branch(Word32NotEqual(representation,
+                            Int32Constant(Representation::kDouble)),
+             &if_tagged, &if_double);
       Bind(&if_tagged);
       {
         var_value->Bind(value);
@@ -3592,7 +4473,7 @@
                                                        Variable* var_details,
                                                        Variable* var_value) {
   Comment("LoadPropertyFromNameDictionary");
-
+  CSA_ASSERT(this, IsDictionary(dictionary));
   const int name_to_details_offset =
       (NameDictionary::kEntryDetailsIndex - NameDictionary::kEntryKeyIndex) *
       kPointerSize;
@@ -3616,6 +4497,7 @@
                                                          Variable* var_value,
                                                          Label* if_deleted) {
   Comment("[ LoadPropertyFromGlobalDictionary");
+  CSA_ASSERT(this, IsDictionary(dictionary));
 
   const int name_to_value_offset =
       (GlobalDictionary::kEntryValueIndex - GlobalDictionary::kEntryKeyIndex) *
@@ -3646,7 +4528,7 @@
   var_value.Bind(value);
   Label done(this);
 
-  Node* kind = BitFieldDecode<PropertyDetails::KindField>(details);
+  Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
   GotoIf(Word32Equal(kind, Int32Constant(kData)), &done);
 
   // Accessor case.
@@ -3655,7 +4537,7 @@
     GotoIf(Word32Equal(LoadInstanceType(accessor_pair),
                        Int32Constant(ACCESSOR_INFO_TYPE)),
            if_bailout);
-    AssertInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE);
+    CSA_ASSERT(this, HasInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE));
     Node* getter = LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
     Node* getter_map = LoadMap(getter);
     Node* instance_type = LoadMapInstanceType(getter_map);
@@ -3666,10 +4548,7 @@
 
     // Return undefined if the {getter} is not callable.
     var_value.Bind(UndefinedConstant());
-    GotoIf(Word32Equal(Word32And(LoadMapBitField(getter_map),
-                                 Int32Constant(1 << Map::kIsCallable)),
-                       Int32Constant(0)),
-           &done);
+    GotoUnless(IsCallableMap(getter_map), &done);
 
     // Call the accessor.
     Callable callable = CodeFactory::Call(isolate());
@@ -3806,6 +4685,9 @@
   }
   Bind(&if_isdictionary);
   {
+    // Negative keys must be converted to property names.
+    GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), if_bailout);
+
     Variable var_entry(this, MachineType::PointerRepresentation());
     Node* elements = LoadElements(object);
     NumberDictionaryLookup<SeededNumberDictionary>(
@@ -3813,18 +4695,18 @@
   }
   Bind(&if_isfaststringwrapper);
   {
-    AssertInstanceType(object, JS_VALUE_TYPE);
+    CSA_ASSERT(this, HasInstanceType(object, JS_VALUE_TYPE));
     Node* string = LoadJSValueValue(object);
-    Assert(IsStringInstanceType(LoadInstanceType(string)));
+    CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(string)));
     Node* length = LoadStringLength(string);
     GotoIf(UintPtrLessThan(intptr_index, SmiUntag(length)), if_found);
     Goto(&if_isobjectorsmi);
   }
   Bind(&if_isslowstringwrapper);
   {
-    AssertInstanceType(object, JS_VALUE_TYPE);
+    CSA_ASSERT(this, HasInstanceType(object, JS_VALUE_TYPE));
     Node* string = LoadJSValueValue(object);
-    Assert(IsStringInstanceType(LoadInstanceType(string)));
+    CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(string)));
     Node* length = LoadStringLength(string);
     GotoIf(UintPtrLessThan(intptr_index, SmiUntag(length)), if_found);
     Goto(&if_isdictionary);
@@ -3850,7 +4732,7 @@
     Label* if_bailout) {
   // Ensure receiver is JSReceiver, otherwise bailout.
   Label if_objectisnotsmi(this);
-  Branch(WordIsSmi(receiver), if_bailout, &if_objectisnotsmi);
+  Branch(TaggedIsSmi(receiver), if_bailout, &if_objectisnotsmi);
   Bind(&if_objectisnotsmi);
 
   Node* map = LoadMap(receiver);
@@ -3960,7 +4842,7 @@
       return_runtime(this, Label::kDeferred), return_result(this);
 
   // Goto runtime if {object} is a Smi.
-  GotoIf(WordIsSmi(object), &return_runtime);
+  GotoIf(TaggedIsSmi(object), &return_runtime);
 
   // Load map of {object}.
   Node* object_map = LoadMap(object);
@@ -3983,7 +4865,7 @@
   }
 
   // Goto runtime if {callable} is a Smi.
-  GotoIf(WordIsSmi(callable), &return_runtime);
+  GotoIf(TaggedIsSmi(callable), &return_runtime);
 
   // Load map of {callable}.
   Node* callable_map = LoadMap(callable);
@@ -4100,8 +4982,10 @@
   bool constant_index = false;
   if (mode == SMI_PARAMETERS) {
     element_size_shift -= kSmiShiftBits;
-    constant_index = ToIntPtrConstant(index_node, index);
-    index = index >> kSmiShiftBits;
+    Smi* smi_index;
+    constant_index = ToSmiConstant(index_node, smi_index);
+    if (constant_index) index = smi_index->value();
+    index_node = BitcastTaggedToWord(index_node);
   } else if (mode == INTEGER_PARAMETERS) {
     int32_t temp = 0;
     constant_index = ToInt32Constant(index_node, temp);
@@ -4116,16 +5000,14 @@
   if (Is64() && mode == INTEGER_PARAMETERS) {
     index_node = ChangeInt32ToInt64(index_node);
   }
-  if (base_size == 0) {
-    return (element_size_shift >= 0)
-               ? WordShl(index_node, IntPtrConstant(element_size_shift))
-               : WordShr(index_node, IntPtrConstant(-element_size_shift));
-  }
-  return IntPtrAdd(
-      IntPtrConstant(base_size),
-      (element_size_shift >= 0)
-          ? WordShl(index_node, IntPtrConstant(element_size_shift))
-          : WordShr(index_node, IntPtrConstant(-element_size_shift)));
+
+  Node* shifted_index =
+      (element_size_shift == 0)
+          ? index_node
+          : ((element_size_shift > 0)
+                 ? WordShl(index_node, IntPtrConstant(element_size_shift))
+                 : WordShr(index_node, IntPtrConstant(-element_size_shift)));
+  return IntPtrAddFoldConstants(IntPtrConstant(base_size), shifted_index);
 }
 
 compiler::Node* CodeStubAssembler::LoadTypeFeedbackVectorForStub() {
@@ -4152,11 +5034,10 @@
 
 compiler::Node* CodeStubAssembler::LoadReceiverMap(compiler::Node* receiver) {
   Variable var_receiver_map(this, MachineRepresentation::kTagged);
-  // TODO(ishell): defer blocks when it works.
-  Label load_smi_map(this /*, Label::kDeferred*/), load_receiver_map(this),
+  Label load_smi_map(this, Label::kDeferred), load_receiver_map(this),
       if_result(this);
 
-  Branch(WordIsSmi(receiver), &load_smi_map, &load_receiver_map);
+  Branch(TaggedIsSmi(receiver), &load_smi_map, &load_receiver_map);
   Bind(&load_smi_map);
   {
     var_receiver_map.Bind(LoadRoot(Heap::kHeapNumberMapRootIndex));
@@ -4174,22 +5055,29 @@
 compiler::Node* CodeStubAssembler::TryMonomorphicCase(
     compiler::Node* slot, compiler::Node* vector, compiler::Node* receiver_map,
     Label* if_handler, Variable* var_handler, Label* if_miss) {
+  Comment("TryMonomorphicCase");
   DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
 
   // TODO(ishell): add helper class that hides offset computations for a series
   // of loads.
   int32_t header_size = FixedArray::kHeaderSize - kHeapObjectTag;
-  Node* offset = ElementOffsetFromIndex(slot, FAST_HOLEY_ELEMENTS,
-                                        SMI_PARAMETERS, header_size);
-  Node* feedback = Load(MachineType::AnyTagged(), vector, offset);
+  // Adding |header_size| with a separate IntPtrAdd rather than passing it
+  // into ElementOffsetFromIndex() allows it to be folded into a single
+  // [base, index, offset] indirect memory access on x64.
+  Node* offset =
+      ElementOffsetFromIndex(slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS);
+  Node* feedback = Load(MachineType::AnyTagged(), vector,
+                        IntPtrAdd(offset, IntPtrConstant(header_size)));
 
   // Try to quickly handle the monomorphic case without knowing for sure
   // if we have a weak cell in feedback. We do know it's safe to look
   // at WeakCell::kValueOffset.
-  GotoUnless(WordEqual(receiver_map, LoadWeakCellValue(feedback)), if_miss);
+  GotoIf(WordNotEqual(receiver_map, LoadWeakCellValueUnchecked(feedback)),
+         if_miss);
 
-  Node* handler = Load(MachineType::AnyTagged(), vector,
-                       IntPtrAdd(offset, IntPtrConstant(kPointerSize)));
+  Node* handler =
+      Load(MachineType::AnyTagged(), vector,
+           IntPtrAdd(offset, IntPtrConstant(header_size + kPointerSize)));
 
   var_handler->Bind(handler);
   Goto(if_handler);
@@ -4199,6 +5087,7 @@
 void CodeStubAssembler::HandlePolymorphicCase(
     compiler::Node* receiver_map, compiler::Node* feedback, Label* if_handler,
     Variable* var_handler, Label* if_miss, int unroll_count) {
+  Comment("HandlePolymorphicCase");
   DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
 
   // Iterate {feedback} array.
@@ -4218,34 +5107,70 @@
 
     Bind(&next_entry);
   }
-  Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
 
   // Loop from {unroll_count}*kEntrySize to {length}.
-  Variable var_index(this, MachineType::PointerRepresentation());
-  Label loop(this, &var_index);
-  var_index.Bind(IntPtrConstant(unroll_count * kEntrySize));
-  Goto(&loop);
-  Bind(&loop);
-  {
-    Node* index = var_index.value();
-    GotoIf(UintPtrGreaterThanOrEqual(index, length), if_miss);
+  Node* init = IntPtrConstant(unroll_count * kEntrySize);
+  Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
+  BuildFastLoop(
+      MachineType::PointerRepresentation(), init, length,
+      [receiver_map, feedback, if_handler, var_handler](CodeStubAssembler* csa,
+                                                        Node* index) {
+        Node* cached_map = csa->LoadWeakCellValue(
+            csa->LoadFixedArrayElement(feedback, index, 0, INTPTR_PARAMETERS));
 
-    Node* cached_map = LoadWeakCellValue(
-        LoadFixedArrayElement(feedback, index, 0, INTPTR_PARAMETERS));
+        Label next_entry(csa);
+        csa->GotoIf(csa->WordNotEqual(receiver_map, cached_map), &next_entry);
 
-    Label next_entry(this);
-    GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
+        // Found, now call handler.
+        Node* handler = csa->LoadFixedArrayElement(
+            feedback, index, kPointerSize, INTPTR_PARAMETERS);
+        var_handler->Bind(handler);
+        csa->Goto(if_handler);
 
-    // Found, now call handler.
-    Node* handler =
-        LoadFixedArrayElement(feedback, index, kPointerSize, INTPTR_PARAMETERS);
-    var_handler->Bind(handler);
-    Goto(if_handler);
+        csa->Bind(&next_entry);
+      },
+      kEntrySize, IndexAdvanceMode::kPost);
+  // The loop falls through if no handler was found.
+  Goto(if_miss);
+}
 
-    Bind(&next_entry);
-    var_index.Bind(IntPtrAdd(index, IntPtrConstant(kEntrySize)));
-    Goto(&loop);
-  }
+void CodeStubAssembler::HandleKeyedStorePolymorphicCase(
+    compiler::Node* receiver_map, compiler::Node* feedback, Label* if_handler,
+    Variable* var_handler, Label* if_transition_handler,
+    Variable* var_transition_map_cell, Label* if_miss) {
+  DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
+  DCHECK_EQ(MachineRepresentation::kTagged, var_transition_map_cell->rep());
+
+  const int kEntrySize = 3;
+
+  Node* init = IntPtrConstant(0);
+  Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
+  BuildFastLoop(
+      MachineType::PointerRepresentation(), init, length,
+      [receiver_map, feedback, if_handler, var_handler, if_transition_handler,
+       var_transition_map_cell](CodeStubAssembler* csa, Node* index) {
+        Node* cached_map = csa->LoadWeakCellValue(
+            csa->LoadFixedArrayElement(feedback, index, 0, INTPTR_PARAMETERS));
+        Label next_entry(csa);
+        csa->GotoIf(csa->WordNotEqual(receiver_map, cached_map), &next_entry);
+
+        Node* maybe_transition_map_cell = csa->LoadFixedArrayElement(
+            feedback, index, kPointerSize, INTPTR_PARAMETERS);
+
+        var_handler->Bind(csa->LoadFixedArrayElement(
+            feedback, index, 2 * kPointerSize, INTPTR_PARAMETERS));
+        csa->GotoIf(
+            csa->WordEqual(maybe_transition_map_cell,
+                           csa->LoadRoot(Heap::kUndefinedValueRootIndex)),
+            if_handler);
+        var_transition_map_cell->Bind(maybe_transition_map_cell);
+        csa->Goto(if_transition_handler);
+
+        csa->Bind(&next_entry);
+      },
+      kEntrySize, IndexAdvanceMode::kPost);
+  // The loop falls through if no handler was found.
+  Goto(if_miss);
 }
 
 compiler::Node* CodeStubAssembler::StubCachePrimaryOffset(compiler::Node* name,
@@ -4254,9 +5179,10 @@
   STATIC_ASSERT(StubCache::kCacheIndexShift == Name::kHashShift);
   // Compute the hash of the name (use entire hash field).
   Node* hash_field = LoadNameHashField(name);
-  Assert(Word32Equal(
-      Word32And(hash_field, Int32Constant(Name::kHashNotComputedMask)),
-      Int32Constant(0)));
+  CSA_ASSERT(this,
+             Word32Equal(Word32And(hash_field,
+                                   Int32Constant(Name::kHashNotComputedMask)),
+                         Int32Constant(0)));
 
   // Using only the low bits in 64-bit mode is unlikely to increase the
   // risk of collision even if the heap is spread over an area larger than
@@ -4321,11 +5247,11 @@
 
   DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
                               stub_cache->key_reference(table).address());
-  Node* code = Load(MachineType::Pointer(), key_base,
-                    IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize)));
+  Node* handler = Load(MachineType::TaggedPointer(), key_base,
+                       IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize)));
 
   // We found the handler.
-  var_handler->Bind(code);
+  var_handler->Bind(handler);
   Goto(if_handler);
 }
 
@@ -4338,7 +5264,7 @@
   IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
 
   // Check that the {receiver} isn't a smi.
-  GotoIf(WordIsSmi(receiver), &miss);
+  GotoIf(TaggedIsSmi(receiver), &miss);
 
   Node* receiver_map = LoadMap(receiver);
 
@@ -4365,7 +5291,7 @@
 Node* CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
   Variable var_intptr_key(this, MachineType::PointerRepresentation());
   Label done(this, &var_intptr_key), key_is_smi(this);
-  GotoIf(WordIsSmi(key), &key_is_smi);
+  GotoIf(TaggedIsSmi(key), &key_is_smi);
   // Try to convert a heap number to a Smi.
   GotoUnless(WordEqual(LoadMap(key), HeapNumberMapConstant()), miss);
   {
@@ -4392,6 +5318,7 @@
                                                     Node* is_jsarray_condition,
                                                     Label* miss) {
   Variable var_length(this, MachineType::PointerRepresentation());
+  Comment("Fast elements bounds check");
   Label if_array(this), length_loaded(this, &var_length);
   GotoIf(is_jsarray_condition, &if_array);
   {
@@ -4416,7 +5343,7 @@
                                         Label* out_of_bounds, Label* miss) {
   Label if_typed_array(this), if_fast_packed(this), if_fast_holey(this),
       if_fast_double(this), if_fast_holey_double(this), if_nonfast(this),
-      if_dictionary(this), unreachable(this);
+      if_dictionary(this);
   GotoIf(
       IntPtrGreaterThan(elements_kind, IntPtrConstant(LAST_FAST_ELEMENTS_KIND)),
       &if_nonfast);
@@ -4501,7 +5428,7 @@
         var_entry.value(), SeededNumberDictionary::kEntryDetailsIndex);
     Node* details = SmiToWord32(
         LoadFixedArrayElement(elements, details_index, 0, INTPTR_PARAMETERS));
-    Node* kind = BitFieldDecode<PropertyDetails::KindField>(details);
+    Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
     // TODO(jkummerow): Support accessors without missing?
     GotoUnless(Word32Equal(kind, Int32Constant(kData)), miss);
     // Finally, load the value.
@@ -4545,13 +5472,13 @@
         UINT8_ELEMENTS,  UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
         UINT16_ELEMENTS, INT16_ELEMENTS,         UINT32_ELEMENTS,
         INT32_ELEMENTS,  FLOAT32_ELEMENTS,       FLOAT64_ELEMENTS};
-    const int kTypedElementsKindCount = LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
-                                        FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
-                                        1;
+    const size_t kTypedElementsKindCount =
+        LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
+        FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
     DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
     DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
     Switch(elements_kind, miss, elements_kinds, elements_kind_labels,
-           static_cast<size_t>(kTypedElementsKindCount));
+           kTypedElementsKindCount);
     Bind(&uint8_elements);
     {
       Comment("UINT8_ELEMENTS");  // Handles UINT8_CLAMPED_ELEMENTS too.
@@ -4611,114 +5538,370 @@
     const LoadICParameters* p, Node* handler, Label* miss,
     ElementSupport support_elements) {
   Comment("have_handler");
-  Label call_handler(this);
-  GotoUnless(WordIsSmi(handler), &call_handler);
+  Variable var_holder(this, MachineRepresentation::kTagged);
+  var_holder.Bind(p->receiver);
+  Variable var_smi_handler(this, MachineRepresentation::kTagged);
+  var_smi_handler.Bind(handler);
 
-  // |handler| is a Smi, encoding what to do. See handler-configuration.h
+  Variable* vars[] = {&var_holder, &var_smi_handler};
+  Label if_smi_handler(this, 2, vars);
+  Label try_proto_handler(this), call_handler(this);
+
+  Branch(TaggedIsSmi(handler), &if_smi_handler, &try_proto_handler);
+
+  // |handler| is a Smi, encoding what to do. See SmiHandler methods
   // for the encoding format.
+  Bind(&if_smi_handler);
   {
-    Variable var_double_value(this, MachineRepresentation::kFloat64);
-    Label rebox_double(this, &var_double_value);
+    HandleLoadICSmiHandlerCase(p, var_holder.value(), var_smi_handler.value(),
+                               miss, support_elements);
+  }
 
-    Node* handler_word = SmiUntag(handler);
-    if (support_elements == kSupportElements) {
-      Label property(this);
-      Node* handler_type =
-          WordAnd(handler_word, IntPtrConstant(LoadHandlerTypeBit::kMask));
+  Bind(&try_proto_handler);
+  {
+    GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
+    HandleLoadICProtoHandler(p, handler, &var_holder, &var_smi_handler,
+                             &if_smi_handler, miss);
+  }
+
+  Bind(&call_handler);
+  {
+    typedef LoadWithVectorDescriptor Descriptor;
+    TailCallStub(Descriptor(isolate()), handler, p->context,
+                 Arg(Descriptor::kReceiver, p->receiver),
+                 Arg(Descriptor::kName, p->name),
+                 Arg(Descriptor::kSlot, p->slot),
+                 Arg(Descriptor::kVector, p->vector));
+  }
+}
+
+void CodeStubAssembler::HandleLoadICSmiHandlerCase(
+    const LoadICParameters* p, Node* holder, Node* smi_handler, Label* miss,
+    ElementSupport support_elements) {
+  Variable var_double_value(this, MachineRepresentation::kFloat64);
+  Label rebox_double(this, &var_double_value);
+
+  Node* handler_word = SmiUntag(smi_handler);
+  Node* handler_kind = DecodeWord<LoadHandler::KindBits>(handler_word);
+  if (support_elements == kSupportElements) {
+    Label property(this);
+    GotoUnless(
+        WordEqual(handler_kind, IntPtrConstant(LoadHandler::kForElements)),
+        &property);
+
+    Comment("element_load");
+    Node* intptr_index = TryToIntptr(p->name, miss);
+    Node* elements = LoadElements(holder);
+    Node* is_jsarray_condition =
+        IsSetWord<LoadHandler::IsJsArrayBits>(handler_word);
+    Node* elements_kind =
+        DecodeWord<LoadHandler::ElementsKindBits>(handler_word);
+    Label if_hole(this), unimplemented_elements_kind(this);
+    Label* out_of_bounds = miss;
+    EmitElementLoad(holder, elements, elements_kind, intptr_index,
+                    is_jsarray_condition, &if_hole, &rebox_double,
+                    &var_double_value, &unimplemented_elements_kind,
+                    out_of_bounds, miss);
+
+    Bind(&unimplemented_elements_kind);
+    {
+      // Smi handlers should only be installed for supported elements kinds.
+      // Crash if we get here.
+      DebugBreak();
+      Goto(miss);
+    }
+
+    Bind(&if_hole);
+    {
+      Comment("convert hole");
+      GotoUnless(IsSetWord<LoadHandler::ConvertHoleBits>(handler_word), miss);
+      Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
+      DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
       GotoUnless(
-          WordEqual(handler_type, IntPtrConstant(kLoadICHandlerForElements)),
-          &property);
-
-      Comment("element_load");
-      Node* intptr_index = TryToIntptr(p->name, miss);
-      Node* elements = LoadElements(p->receiver);
-      Node* is_jsarray =
-          WordAnd(handler_word, IntPtrConstant(KeyedLoadIsJsArray::kMask));
-      Node* is_jsarray_condition = WordNotEqual(is_jsarray, IntPtrConstant(0));
-      Node* elements_kind = BitFieldDecode<KeyedLoadElementsKind>(handler_word);
-      Label if_hole(this), unimplemented_elements_kind(this);
-      Label* out_of_bounds = miss;
-      EmitElementLoad(p->receiver, elements, elements_kind, intptr_index,
-                      is_jsarray_condition, &if_hole, &rebox_double,
-                      &var_double_value, &unimplemented_elements_kind,
-                      out_of_bounds, miss);
-
-      Bind(&unimplemented_elements_kind);
-      {
-        // Smi handlers should only be installed for supported elements kinds.
-        // Crash if we get here.
-        DebugBreak();
-        Goto(miss);
-      }
-
-      Bind(&if_hole);
-      {
-        Comment("convert hole");
-        Node* convert_hole =
-            WordAnd(handler_word, IntPtrConstant(KeyedLoadConvertHole::kMask));
-        GotoIf(WordEqual(convert_hole, IntPtrConstant(0)), miss);
-        Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
-        DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
-        GotoUnless(
-            WordEqual(
-                LoadObjectField(protector_cell, PropertyCell::kValueOffset),
-                SmiConstant(Smi::FromInt(Isolate::kArrayProtectorValid))),
-            miss);
-        Return(UndefinedConstant());
-      }
-
-      Bind(&property);
-      Comment("property_load");
+          WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+                    SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
+          miss);
+      Return(UndefinedConstant());
     }
 
-    // |handler_word| is a field index as obtained by
-    // FieldIndex.GetLoadByFieldOffset():
-    Label inobject_double(this), out_of_object(this),
-        out_of_object_double(this);
-    Node* inobject_bit =
-        WordAnd(handler_word, IntPtrConstant(FieldOffsetIsInobject::kMask));
-    Node* double_bit =
-        WordAnd(handler_word, IntPtrConstant(FieldOffsetIsDouble::kMask));
-    Node* offset =
-        WordSar(handler_word, IntPtrConstant(FieldOffsetOffset::kShift));
+    Bind(&property);
+    Comment("property_load");
+  }
 
-    GotoIf(WordEqual(inobject_bit, IntPtrConstant(0)), &out_of_object);
+  Label constant(this), field(this);
+  Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kForFields)),
+         &field, &constant);
 
-    GotoUnless(WordEqual(double_bit, IntPtrConstant(0)), &inobject_double);
-    Return(LoadObjectField(p->receiver, offset));
+  Bind(&field);
+  {
+    Comment("field_load");
+    Node* offset = DecodeWord<LoadHandler::FieldOffsetBits>(handler_word);
 
-    Bind(&inobject_double);
-    if (FLAG_unbox_double_fields) {
-      var_double_value.Bind(
-          LoadObjectField(p->receiver, offset, MachineType::Float64()));
-    } else {
-      Node* mutable_heap_number = LoadObjectField(p->receiver, offset);
-      var_double_value.Bind(LoadHeapNumberValue(mutable_heap_number));
+    Label inobject(this), out_of_object(this);
+    Branch(IsSetWord<LoadHandler::IsInobjectBits>(handler_word), &inobject,
+           &out_of_object);
+
+    Bind(&inobject);
+    {
+      Label is_double(this);
+      GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
+      Return(LoadObjectField(holder, offset));
+
+      Bind(&is_double);
+      if (FLAG_unbox_double_fields) {
+        var_double_value.Bind(
+            LoadObjectField(holder, offset, MachineType::Float64()));
+      } else {
+        Node* mutable_heap_number = LoadObjectField(holder, offset);
+        var_double_value.Bind(LoadHeapNumberValue(mutable_heap_number));
+      }
+      Goto(&rebox_double);
     }
-    Goto(&rebox_double);
 
     Bind(&out_of_object);
-    Node* properties = LoadProperties(p->receiver);
-    Node* value = LoadObjectField(properties, offset);
-    GotoUnless(WordEqual(double_bit, IntPtrConstant(0)), &out_of_object_double);
-    Return(value);
+    {
+      Label is_double(this);
+      Node* properties = LoadProperties(holder);
+      Node* value = LoadObjectField(properties, offset);
+      GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
+      Return(value);
 
-    Bind(&out_of_object_double);
-    var_double_value.Bind(LoadHeapNumberValue(value));
-    Goto(&rebox_double);
+      Bind(&is_double);
+      var_double_value.Bind(LoadHeapNumberValue(value));
+      Goto(&rebox_double);
+    }
 
     Bind(&rebox_double);
     Return(AllocateHeapNumberWithValue(var_double_value.value()));
   }
 
-  // |handler| is a heap object. Must be code, call it.
-  Bind(&call_handler);
-  typedef LoadWithVectorDescriptor Descriptor;
-  TailCallStub(Descriptor(isolate()), handler, p->context,
-               Arg(Descriptor::kReceiver, p->receiver),
-               Arg(Descriptor::kName, p->name),
-               Arg(Descriptor::kSlot, p->slot),
-               Arg(Descriptor::kVector, p->vector));
+  Bind(&constant);
+  {
+    Comment("constant_load");
+    Node* descriptors = LoadMapDescriptors(LoadMap(holder));
+    Node* descriptor =
+        DecodeWord<LoadHandler::DescriptorValueIndexBits>(handler_word);
+    CSA_ASSERT(this,
+               UintPtrLessThan(descriptor,
+                               LoadAndUntagFixedArrayBaseLength(descriptors)));
+    Node* value =
+        LoadFixedArrayElement(descriptors, descriptor, 0, INTPTR_PARAMETERS);
+
+    Label if_accessor_info(this);
+    GotoIf(IsSetWord<LoadHandler::IsAccessorInfoBits>(handler_word),
+           &if_accessor_info);
+    Return(value);
+
+    Bind(&if_accessor_info);
+    Callable callable = CodeFactory::ApiGetter(isolate());
+    TailCallStub(callable, p->context, p->receiver, holder, value);
+  }
+}
+
+void CodeStubAssembler::HandleLoadICProtoHandler(
+    const LoadICParameters* p, Node* handler, Variable* var_holder,
+    Variable* var_smi_handler, Label* if_smi_handler, Label* miss) {
+  DCHECK_EQ(MachineRepresentation::kTagged, var_holder->rep());
+  DCHECK_EQ(MachineRepresentation::kTagged, var_smi_handler->rep());
+
+  // IC dispatchers rely on these assumptions to be held.
+  STATIC_ASSERT(FixedArray::kLengthOffset == LoadHandler::kHolderCellOffset);
+  DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kSmiHandlerIndex),
+            LoadHandler::kSmiHandlerOffset);
+  DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kValidityCellIndex),
+            LoadHandler::kValidityCellOffset);
+
+  // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
+  Label validity_cell_check_done(this);
+  Node* validity_cell =
+      LoadObjectField(handler, LoadHandler::kValidityCellOffset);
+  GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
+         &validity_cell_check_done);
+  Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+  GotoIf(WordNotEqual(cell_value,
+                      SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+         miss);
+  Goto(&validity_cell_check_done);
+
+  Bind(&validity_cell_check_done);
+  Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
+  CSA_ASSERT(this, TaggedIsSmi(smi_handler));
+  Node* handler_flags = SmiUntag(smi_handler);
+
+  Label check_prototypes(this);
+  GotoUnless(
+      IsSetWord<LoadHandler::DoNegativeLookupOnReceiverBits>(handler_flags),
+      &check_prototypes);
+  {
+    CSA_ASSERT(this, Word32BinaryNot(
+                         HasInstanceType(p->receiver, JS_GLOBAL_OBJECT_TYPE)));
+    // We have a dictionary receiver, do a negative lookup check.
+    NameDictionaryNegativeLookup(p->receiver, p->name, miss);
+    Goto(&check_prototypes);
+  }
+
+  Bind(&check_prototypes);
+  Node* maybe_holder_cell =
+      LoadObjectField(handler, LoadHandler::kHolderCellOffset);
+  Label array_handler(this), tuple_handler(this);
+  Branch(TaggedIsSmi(maybe_holder_cell), &array_handler, &tuple_handler);
+
+  Bind(&tuple_handler);
+  {
+    Label load_existent(this);
+    GotoIf(WordNotEqual(maybe_holder_cell, NullConstant()), &load_existent);
+    // This is a handler for a load of a non-existent value.
+    Return(UndefinedConstant());
+
+    Bind(&load_existent);
+    Node* holder = LoadWeakCellValue(maybe_holder_cell);
+    // The |holder| is guaranteed to be alive at this point since we passed
+    // both the receiver map check and the validity cell check.
+    CSA_ASSERT(this, WordNotEqual(holder, IntPtrConstant(0)));
+
+    var_holder->Bind(holder);
+    var_smi_handler->Bind(smi_handler);
+    Goto(if_smi_handler);
+  }
+
+  Bind(&array_handler);
+  {
+    typedef LoadICProtoArrayDescriptor Descriptor;
+    LoadICProtoArrayStub stub(isolate());
+    Node* target = HeapConstant(stub.GetCode());
+    TailCallStub(Descriptor(isolate()), target, p->context,
+                 Arg(Descriptor::kReceiver, p->receiver),
+                 Arg(Descriptor::kName, p->name),
+                 Arg(Descriptor::kSlot, p->slot),
+                 Arg(Descriptor::kVector, p->vector),
+                 Arg(Descriptor::kHandler, handler));
+  }
+}
+
+void CodeStubAssembler::LoadICProtoArray(const LoadICParameters* p,
+                                         Node* handler) {
+  Label miss(this);
+  CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(handler)));
+  CSA_ASSERT(this, IsFixedArrayMap(LoadMap(handler)));
+
+  Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
+  Node* handler_flags = SmiUntag(smi_handler);
+
+  Node* handler_length = LoadAndUntagFixedArrayBaseLength(handler);
+
+  Node* holder = EmitLoadICProtoArrayCheck(p, handler, handler_length,
+                                           handler_flags, &miss);
+
+  HandleLoadICSmiHandlerCase(p, holder, smi_handler, &miss, kOnlyProperties);
+
+  Bind(&miss);
+  {
+    TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
+                    p->slot, p->vector);
+  }
+}
+
+Node* CodeStubAssembler::EmitLoadICProtoArrayCheck(const LoadICParameters* p,
+                                                   Node* handler,
+                                                   Node* handler_length,
+                                                   Node* handler_flags,
+                                                   Label* miss) {
+  Variable start_index(this, MachineType::PointerRepresentation());
+  start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex));
+
+  Label can_access(this);
+  GotoUnless(IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_flags),
+             &can_access);
+  {
+    // Skip this entry of a handler.
+    start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex + 1));
+
+    int offset =
+        FixedArray::OffsetOfElementAt(LoadHandler::kFirstPrototypeIndex);
+    Node* expected_native_context =
+        LoadWeakCellValue(LoadObjectField(handler, offset), miss);
+    CSA_ASSERT(this, IsNativeContext(expected_native_context));
+
+    Node* native_context = LoadNativeContext(p->context);
+    GotoIf(WordEqual(expected_native_context, native_context), &can_access);
+    // If the receiver is not a JSGlobalProxy then we miss.
+    GotoUnless(IsJSGlobalProxy(p->receiver), miss);
+    // For JSGlobalProxy receiver try to compare security tokens of current
+    // and expected native contexts.
+    Node* expected_token = LoadContextElement(expected_native_context,
+                                              Context::SECURITY_TOKEN_INDEX);
+    Node* current_token =
+        LoadContextElement(native_context, Context::SECURITY_TOKEN_INDEX);
+    Branch(WordEqual(expected_token, current_token), &can_access, miss);
+  }
+  Bind(&can_access);
+
+  BuildFastLoop(
+      MachineType::PointerRepresentation(), start_index.value(), handler_length,
+      [this, p, handler, miss](CodeStubAssembler*, Node* current) {
+        Node* prototype_cell =
+            LoadFixedArrayElement(handler, current, 0, INTPTR_PARAMETERS);
+        CheckPrototype(prototype_cell, p->name, miss);
+      },
+      1, IndexAdvanceMode::kPost);
+
+  Node* maybe_holder_cell = LoadFixedArrayElement(
+      handler, IntPtrConstant(LoadHandler::kHolderCellIndex), 0,
+      INTPTR_PARAMETERS);
+  Label load_existent(this);
+  GotoIf(WordNotEqual(maybe_holder_cell, NullConstant()), &load_existent);
+  // This is a handler for a load of a non-existent value.
+  Return(UndefinedConstant());
+
+  Bind(&load_existent);
+  Node* holder = LoadWeakCellValue(maybe_holder_cell);
+  // The |holder| is guaranteed to be alive at this point since we passed
+  // the receiver map check, the validity cell check and the prototype chain
+  // check.
+  CSA_ASSERT(this, WordNotEqual(holder, IntPtrConstant(0)));
+  return holder;
+}
+
+void CodeStubAssembler::CheckPrototype(Node* prototype_cell, Node* name,
+                                       Label* miss) {
+  Node* maybe_prototype = LoadWeakCellValue(prototype_cell, miss);
+
+  Label done(this);
+  Label if_property_cell(this), if_dictionary_object(this);
+
+  // |maybe_prototype| is either a PropertyCell or a slow-mode prototype.
+  Branch(WordEqual(LoadMap(maybe_prototype),
+                   LoadRoot(Heap::kGlobalPropertyCellMapRootIndex)),
+         &if_property_cell, &if_dictionary_object);
+
+  Bind(&if_dictionary_object);
+  {
+    CSA_ASSERT(this, IsDictionaryMap(LoadMap(maybe_prototype)));
+    NameDictionaryNegativeLookup(maybe_prototype, name, miss);
+    Goto(&done);
+  }
+
+  Bind(&if_property_cell);
+  {
+    // Ensure the property cell still contains the hole.
+    Node* value = LoadObjectField(maybe_prototype, PropertyCell::kValueOffset);
+    GotoIf(WordNotEqual(value, LoadRoot(Heap::kTheHoleValueRootIndex)), miss);
+    Goto(&done);
+  }
+
+  Bind(&done);
+}
+
+void CodeStubAssembler::NameDictionaryNegativeLookup(Node* object, Node* name,
+                                                     Label* miss) {
+  CSA_ASSERT(this, IsDictionaryMap(LoadMap(object)));
+  Node* properties = LoadProperties(object);
+  // Ensure the property does not exist in a dictionary-mode object.
+  Variable var_name_index(this, MachineType::PointerRepresentation());
+  Label done(this);
+  NameDictionaryLookup<NameDictionary>(properties, name, miss, &var_name_index,
+                                       &done);
+  Bind(&done);
 }
 
 void CodeStubAssembler::LoadIC(const LoadICParameters* p) {
@@ -4837,7 +6020,7 @@
       if_property_dictionary(this), if_found_on_receiver(this);
 
   Node* receiver = p->receiver;
-  GotoIf(WordIsSmi(receiver), &slow);
+  GotoIf(TaggedIsSmi(receiver), &slow);
   Node* receiver_map = LoadMap(receiver);
   Node* instance_type = LoadMapInstanceType(receiver_map);
   // Receivers requiring non-standard element accesses (interceptors, access
@@ -4912,7 +6095,8 @@
     const int32_t kMaxLinear = 210;
     Label stub_cache(this);
     Node* bitfield3 = LoadMapBitField3(receiver_map);
-    Node* nof = BitFieldDecodeWord<Map::NumberOfOwnDescriptorsBits>(bitfield3);
+    Node* nof =
+        DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(bitfield3);
     GotoIf(UintPtrGreaterThan(nof, IntPtrConstant(kMaxLinear)), &stub_cache);
     Node* descriptors = LoadMapDescriptors(receiver_map);
     Variable var_name_index(this, MachineType::PointerRepresentation());
@@ -4983,6 +6167,262 @@
   }
 }
 
+void CodeStubAssembler::HandleStoreFieldAndReturn(Node* handler_word,
+                                                  Node* holder,
+                                                  Representation representation,
+                                                  Node* value, Node* transition,
+                                                  Label* miss) {
+  bool transition_to_field = transition != nullptr;
+  Node* prepared_value = PrepareValueForWrite(value, representation, miss);
+
+  if (transition_to_field) {
+    Label storage_extended(this);
+    GotoUnless(IsSetWord<StoreHandler::ExtendStorageBits>(handler_word),
+               &storage_extended);
+    Comment("[ Extend storage");
+    ExtendPropertiesBackingStore(holder);
+    Comment("] Extend storage");
+    Goto(&storage_extended);
+
+    Bind(&storage_extended);
+  }
+
+  Node* offset = DecodeWord<StoreHandler::FieldOffsetBits>(handler_word);
+  Label if_inobject(this), if_out_of_object(this);
+  Branch(IsSetWord<StoreHandler::IsInobjectBits>(handler_word), &if_inobject,
+         &if_out_of_object);
+
+  Bind(&if_inobject);
+  {
+    StoreNamedField(holder, offset, true, representation, prepared_value,
+                    transition_to_field);
+    if (transition_to_field) {
+      StoreObjectField(holder, JSObject::kMapOffset, transition);
+    }
+    Return(value);
+  }
+
+  Bind(&if_out_of_object);
+  {
+    StoreNamedField(holder, offset, false, representation, prepared_value,
+                    transition_to_field);
+    if (transition_to_field) {
+      StoreObjectField(holder, JSObject::kMapOffset, transition);
+    }
+    Return(value);
+  }
+}
+
+void CodeStubAssembler::HandleStoreICSmiHandlerCase(Node* handler_word,
+                                                    Node* holder, Node* value,
+                                                    Node* transition,
+                                                    Label* miss) {
+  Comment(transition ? "transitioning field store" : "field store");
+
+#ifdef DEBUG
+  Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+  if (transition) {
+    CSA_ASSERT(
+        this,
+        WordOr(WordEqual(handler_kind,
+                         IntPtrConstant(StoreHandler::kTransitionToField)),
+               WordEqual(handler_kind,
+                         IntPtrConstant(StoreHandler::kTransitionToConstant))));
+  } else {
+    CSA_ASSERT(this, WordEqual(handler_kind,
+                               IntPtrConstant(StoreHandler::kStoreField)));
+  }
+#endif
+
+  Node* field_representation =
+      DecodeWord<StoreHandler::FieldRepresentationBits>(handler_word);
+
+  Label if_smi_field(this), if_double_field(this), if_heap_object_field(this),
+      if_tagged_field(this);
+
+  GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kTagged)),
+         &if_tagged_field);
+  GotoIf(WordEqual(field_representation,
+                   IntPtrConstant(StoreHandler::kHeapObject)),
+         &if_heap_object_field);
+  GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kDouble)),
+         &if_double_field);
+  CSA_ASSERT(this, WordEqual(field_representation,
+                             IntPtrConstant(StoreHandler::kSmi)));
+  Goto(&if_smi_field);
+
+  Bind(&if_tagged_field);
+  {
+    Comment("store tagged field");
+    HandleStoreFieldAndReturn(handler_word, holder, Representation::Tagged(),
+                              value, transition, miss);
+  }
+
+  Bind(&if_double_field);
+  {
+    Comment("store double field");
+    HandleStoreFieldAndReturn(handler_word, holder, Representation::Double(),
+                              value, transition, miss);
+  }
+
+  Bind(&if_heap_object_field);
+  {
+    Comment("store heap object field");
+    // Generate full field type check here and then store value as Tagged.
+    Node* prepared_value =
+        PrepareValueForWrite(value, Representation::HeapObject(), miss);
+    Node* value_index_in_descriptor =
+        DecodeWord<StoreHandler::DescriptorValueIndexBits>(handler_word);
+    Node* descriptors =
+        LoadMapDescriptors(transition ? transition : LoadMap(holder));
+    Node* maybe_field_type = LoadFixedArrayElement(
+        descriptors, value_index_in_descriptor, 0, INTPTR_PARAMETERS);
+    Label do_store(this);
+    GotoIf(TaggedIsSmi(maybe_field_type), &do_store);
+    // Check that value type matches the field type.
+    {
+      Node* field_type = LoadWeakCellValue(maybe_field_type, miss);
+      Branch(WordEqual(LoadMap(prepared_value), field_type), &do_store, miss);
+    }
+    Bind(&do_store);
+    HandleStoreFieldAndReturn(handler_word, holder, Representation::Tagged(),
+                              prepared_value, transition, miss);
+  }
+
+  Bind(&if_smi_field);
+  {
+    Comment("store smi field");
+    HandleStoreFieldAndReturn(handler_word, holder, Representation::Smi(),
+                              value, transition, miss);
+  }
+}
+
+void CodeStubAssembler::HandleStoreICHandlerCase(const StoreICParameters* p,
+                                                 Node* handler, Label* miss) {
+  Label if_smi_handler(this);
+  Label try_proto_handler(this), call_handler(this);
+
+  Branch(TaggedIsSmi(handler), &if_smi_handler, &try_proto_handler);
+
+  // |handler| is a Smi, encoding what to do. See SmiHandler methods
+  // for the encoding format.
+  Bind(&if_smi_handler);
+  {
+    Node* holder = p->receiver;
+    Node* handler_word = SmiUntag(handler);
+
+    // Handle non-transitioning field stores.
+    HandleStoreICSmiHandlerCase(handler_word, holder, p->value, nullptr, miss);
+  }
+
+  Bind(&try_proto_handler);
+  {
+    GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
+    HandleStoreICProtoHandler(p, handler, miss);
+  }
+
+  // |handler| is a heap object. Must be code, call it.
+  Bind(&call_handler);
+  {
+    StoreWithVectorDescriptor descriptor(isolate());
+    TailCallStub(descriptor, handler, p->context, p->receiver, p->name,
+                 p->value, p->slot, p->vector);
+  }
+}
+
+void CodeStubAssembler::HandleStoreICProtoHandler(const StoreICParameters* p,
+                                                  Node* handler, Label* miss) {
+  // IC dispatchers rely on these assumptions to be held.
+  STATIC_ASSERT(FixedArray::kLengthOffset ==
+                StoreHandler::kTransitionCellOffset);
+  DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kSmiHandlerIndex),
+            StoreHandler::kSmiHandlerOffset);
+  DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kValidityCellIndex),
+            StoreHandler::kValidityCellOffset);
+
+  // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
+  Label validity_cell_check_done(this);
+  Node* validity_cell =
+      LoadObjectField(handler, StoreHandler::kValidityCellOffset);
+  GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
+         &validity_cell_check_done);
+  Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+  GotoIf(WordNotEqual(cell_value,
+                      SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+         miss);
+  Goto(&validity_cell_check_done);
+
+  Bind(&validity_cell_check_done);
+  Node* smi_handler = LoadObjectField(handler, StoreHandler::kSmiHandlerOffset);
+  CSA_ASSERT(this, TaggedIsSmi(smi_handler));
+
+  Node* maybe_transition_cell =
+      LoadObjectField(handler, StoreHandler::kTransitionCellOffset);
+  Label array_handler(this), tuple_handler(this);
+  Branch(TaggedIsSmi(maybe_transition_cell), &array_handler, &tuple_handler);
+
+  Variable var_transition(this, MachineRepresentation::kTagged);
+  Label if_transition(this), if_transition_to_constant(this);
+  Bind(&tuple_handler);
+  {
+    Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
+    var_transition.Bind(transition);
+    Goto(&if_transition);
+  }
+
+  Bind(&array_handler);
+  {
+    Node* length = SmiUntag(maybe_transition_cell);
+    BuildFastLoop(MachineType::PointerRepresentation(),
+                  IntPtrConstant(StoreHandler::kFirstPrototypeIndex), length,
+                  [this, p, handler, miss](CodeStubAssembler*, Node* current) {
+                    Node* prototype_cell = LoadFixedArrayElement(
+                        handler, current, 0, INTPTR_PARAMETERS);
+                    CheckPrototype(prototype_cell, p->name, miss);
+                  },
+                  1, IndexAdvanceMode::kPost);
+
+    Node* maybe_transition_cell = LoadFixedArrayElement(
+        handler, IntPtrConstant(StoreHandler::kTransitionCellIndex), 0,
+        INTPTR_PARAMETERS);
+    Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
+    var_transition.Bind(transition);
+    Goto(&if_transition);
+  }
+
+  Bind(&if_transition);
+  {
+    Node* holder = p->receiver;
+    Node* transition = var_transition.value();
+    Node* handler_word = SmiUntag(smi_handler);
+
+    GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(transition)), miss);
+
+    Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+    GotoIf(WordEqual(handler_kind,
+                     IntPtrConstant(StoreHandler::kTransitionToConstant)),
+           &if_transition_to_constant);
+
+    // Handle transitioning field stores.
+    HandleStoreICSmiHandlerCase(handler_word, holder, p->value, transition,
+                                miss);
+
+    Bind(&if_transition_to_constant);
+    {
+      // Check that constant matches value.
+      Node* value_index_in_descriptor =
+          DecodeWord<StoreHandler::DescriptorValueIndexBits>(handler_word);
+      Node* descriptors = LoadMapDescriptors(transition);
+      Node* constant = LoadFixedArrayElement(
+          descriptors, value_index_in_descriptor, 0, INTPTR_PARAMETERS);
+      GotoIf(WordNotEqual(p->value, constant), miss);
+
+      StoreObjectField(p->receiver, JSObject::kMapOffset, transition);
+      Return(p->value);
+    }
+  }
+}
+
 void CodeStubAssembler::StoreIC(const StoreICParameters* p) {
   Variable var_handler(this, MachineRepresentation::kTagged);
   // TODO(ishell): defer blocks when it works.
@@ -4999,9 +6439,7 @@
   Bind(&if_handler);
   {
     Comment("StoreIC_if_handler");
-    StoreWithVectorDescriptor descriptor(isolate());
-    TailCallStub(descriptor, var_handler.value(), p->context, p->receiver,
-                 p->name, p->value, p->slot, p->vector);
+    HandleStoreICHandlerCase(p, var_handler.value(), &miss);
   }
 
   Bind(&try_polymorphic);
@@ -5032,15 +6470,95 @@
   }
 }
 
+void CodeStubAssembler::KeyedStoreIC(const StoreICParameters* p,
+                                     LanguageMode language_mode) {
+  Variable var_handler(this, MachineRepresentation::kTagged);
+  // This is to make |miss| label see the var_handler bound on all paths.
+  var_handler.Bind(IntPtrConstant(0));
+
+  // TODO(ishell): defer blocks when it works.
+  Label if_handler(this, &var_handler), try_polymorphic(this),
+      try_megamorphic(this /*, Label::kDeferred*/),
+      try_polymorphic_name(this /*, Label::kDeferred*/),
+      miss(this /*, Label::kDeferred*/);
+
+  Node* receiver_map = LoadReceiverMap(p->receiver);
+
+  // Check monomorphic case.
+  Node* feedback =
+      TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+                         &var_handler, &try_polymorphic);
+  Bind(&if_handler);
+  {
+    Comment("KeyedStoreIC_if_handler");
+    HandleStoreICHandlerCase(p, var_handler.value(), &miss);
+  }
+
+  Bind(&try_polymorphic);
+  {
+    // CheckPolymorphic case.
+    Comment("KeyedStoreIC_try_polymorphic");
+    GotoUnless(
+        WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
+        &try_megamorphic);
+    Label if_transition_handler(this);
+    Variable var_transition_map_cell(this, MachineRepresentation::kTagged);
+    HandleKeyedStorePolymorphicCase(receiver_map, feedback, &if_handler,
+                                    &var_handler, &if_transition_handler,
+                                    &var_transition_map_cell, &miss);
+    Bind(&if_transition_handler);
+    Comment("KeyedStoreIC_polymorphic_transition");
+    Node* transition_map =
+        LoadWeakCellValue(var_transition_map_cell.value(), &miss);
+    StoreTransitionDescriptor descriptor(isolate());
+    TailCallStub(descriptor, var_handler.value(), p->context, p->receiver,
+                 p->name, transition_map, p->value, p->slot, p->vector);
+  }
+
+  Bind(&try_megamorphic);
+  {
+    // Check megamorphic case.
+    Comment("KeyedStoreIC_try_megamorphic");
+    GotoUnless(
+        WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+        &try_polymorphic_name);
+    TailCallStub(
+        CodeFactory::KeyedStoreIC_Megamorphic(isolate(), language_mode),
+        p->context, p->receiver, p->name, p->value, p->slot, p->vector);
+  }
+
+  Bind(&try_polymorphic_name);
+  {
+    // We might have a name in feedback, and a fixed array in the next slot.
+    Comment("KeyedStoreIC_try_polymorphic_name");
+    GotoUnless(WordEqual(feedback, p->name), &miss);
+    // If the name comparison succeeded, we know we have a FixedArray with
+    // at least one map/handler pair.
+    Node* offset = ElementOffsetFromIndex(
+        p->slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS,
+        FixedArray::kHeaderSize + kPointerSize - kHeapObjectTag);
+    Node* array = Load(MachineType::AnyTagged(), p->vector, offset);
+    HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss,
+                          1);
+  }
+
+  Bind(&miss);
+  {
+    Comment("KeyedStoreIC_miss");
+    TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value, p->slot,
+                    p->vector, p->receiver, p->name);
+  }
+}
+
 void CodeStubAssembler::LoadGlobalIC(const LoadICParameters* p) {
   Label try_handler(this), miss(this);
   Node* weak_cell =
       LoadFixedArrayElement(p->vector, p->slot, 0, SMI_PARAMETERS);
-  AssertInstanceType(weak_cell, WEAK_CELL_TYPE);
+  CSA_ASSERT(this, HasInstanceType(weak_cell, WEAK_CELL_TYPE));
 
   // Load value or try handler case if the {weak_cell} is cleared.
   Node* property_cell = LoadWeakCellValue(weak_cell, &try_handler);
-  AssertInstanceType(property_cell, PROPERTY_CELL_TYPE);
+  CSA_ASSERT(this, HasInstanceType(property_cell, PROPERTY_CELL_TYPE));
 
   Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
   GotoIf(WordEqual(value, TheHoleConstant()), &miss);
@@ -5054,7 +6572,7 @@
            &miss);
 
     // In this case {handler} must be a Code object.
-    AssertInstanceType(handler, CODE_TYPE);
+    CSA_ASSERT(this, HasInstanceType(handler, CODE_TYPE));
     LoadWithVectorDescriptor descriptor(isolate());
     Node* native_context = LoadNativeContext(p->context);
     Node* receiver =
@@ -5086,8 +6604,9 @@
          FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind));
   // The size of a new properties backing store is guaranteed to be small
   // enough that the new backing store will be allocated in new space.
-  Assert(UintPtrLessThan(new_capacity, IntPtrConstant(kMaxNumberOfDescriptors +
-                                                      JSObject::kFieldsAdded)));
+  CSA_ASSERT(this, UintPtrLessThan(new_capacity,
+                                   IntPtrConstant(kMaxNumberOfDescriptors +
+                                                  JSObject::kFieldsAdded)));
 
   Node* new_properties = AllocateFixedArray(kind, new_capacity, mode);
 
@@ -5106,30 +6625,13 @@
                                               Representation representation,
                                               Label* bailout) {
   if (representation.IsDouble()) {
-    Variable var_value(this, MachineRepresentation::kFloat64);
-    Label if_smi(this), if_heap_object(this), done(this);
-    Branch(WordIsSmi(value), &if_smi, &if_heap_object);
-    Bind(&if_smi);
-    {
-      var_value.Bind(SmiToFloat64(value));
-      Goto(&done);
-    }
-    Bind(&if_heap_object);
-    {
-      GotoUnless(
-          Word32Equal(LoadInstanceType(value), Int32Constant(HEAP_NUMBER_TYPE)),
-          bailout);
-      var_value.Bind(LoadHeapNumberValue(value));
-      Goto(&done);
-    }
-    Bind(&done);
-    value = var_value.value();
+    value = TryTaggedToFloat64(value, bailout);
   } else if (representation.IsHeapObject()) {
     // Field type is checked by the handler, here we only check if the value
     // is a heap object.
-    GotoIf(WordIsSmi(value), bailout);
+    GotoIf(TaggedIsSmi(value), bailout);
   } else if (representation.IsSmi()) {
-    GotoUnless(WordIsSmi(value), bailout);
+    GotoUnless(TaggedIsSmi(value), bailout);
   } else {
     DCHECK(representation.IsTagged());
   }
@@ -5211,7 +6713,7 @@
 
   bool is_load = value == nullptr;
 
-  GotoUnless(WordIsSmi(key), bailout);
+  GotoUnless(TaggedIsSmi(key), bailout);
   key = SmiUntag(key);
   GotoIf(IntPtrLessThan(key, IntPtrConstant(0)), bailout);
 
@@ -5234,7 +6736,7 @@
 
   Bind(&if_mapped);
   {
-    Assert(WordIsSmi(mapped_index));
+    CSA_ASSERT(this, TaggedIsSmi(mapped_index));
     mapped_index = SmiUntag(mapped_index);
     Node* the_context = LoadFixedArrayElement(elements, IntPtrConstant(0), 0,
                                               INTPTR_PARAMETERS);
@@ -5246,7 +6748,7 @@
     if (is_load) {
       Node* result = LoadFixedArrayElement(the_context, mapped_index, 0,
                                            INTPTR_PARAMETERS);
-      Assert(WordNotEqual(result, TheHoleConstant()));
+      CSA_ASSERT(this, WordNotEqual(result, TheHoleConstant()));
       var_result.Bind(result);
     } else {
       StoreFixedArrayElement(the_context, mapped_index, value,
@@ -5294,21 +6796,6 @@
               IntPtrConstant(offset));
 }
 
-Node* CodeStubAssembler::ClampedToUint8(Node* int32_value) {
-  Label done(this);
-  Node* int32_zero = Int32Constant(0);
-  Node* int32_255 = Int32Constant(255);
-  Variable var_value(this, MachineRepresentation::kWord32);
-  var_value.Bind(int32_value);
-  GotoIf(Uint32LessThanOrEqual(int32_value, int32_255), &done);
-  var_value.Bind(int32_zero);
-  GotoIf(Int32LessThan(int32_value, int32_zero), &done);
-  var_value.Bind(int32_255);
-  Goto(&done);
-  Bind(&done);
-  return var_value.value();
-}
-
 namespace {
 
 // Converts typed array elements kind to a machine representations.
@@ -5341,7 +6828,8 @@
                                      ParameterMode mode) {
   if (IsFixedTypedArrayElementsKind(kind)) {
     if (kind == UINT8_CLAMPED_ELEMENTS) {
-      value = ClampedToUint8(value);
+      CSA_ASSERT(this,
+                 Word32Equal(value, Word32And(Int32Constant(0xff), value)));
     }
     Node* offset = ElementOffsetFromIndex(index, kind, mode, 0);
     MachineRepresentation rep = ElementsKindToMachineRepresentation(kind);
@@ -5360,6 +6848,106 @@
   }
 }
 
+Node* CodeStubAssembler::Int32ToUint8Clamped(Node* int32_value) {
+  Label done(this);
+  Node* int32_zero = Int32Constant(0);
+  Node* int32_255 = Int32Constant(255);
+  Variable var_value(this, MachineRepresentation::kWord32);
+  var_value.Bind(int32_value);
+  GotoIf(Uint32LessThanOrEqual(int32_value, int32_255), &done);
+  var_value.Bind(int32_zero);
+  GotoIf(Int32LessThan(int32_value, int32_zero), &done);
+  var_value.Bind(int32_255);
+  Goto(&done);
+  Bind(&done);
+  return var_value.value();
+}
+
+Node* CodeStubAssembler::Float64ToUint8Clamped(Node* float64_value) {
+  Label done(this);
+  Variable var_value(this, MachineRepresentation::kWord32);
+  var_value.Bind(Int32Constant(0));
+  GotoIf(Float64LessThanOrEqual(float64_value, Float64Constant(0.0)), &done);
+  var_value.Bind(Int32Constant(255));
+  GotoIf(Float64LessThanOrEqual(Float64Constant(255.0), float64_value), &done);
+  {
+    Node* rounded_value = Float64RoundToEven(float64_value);
+    var_value.Bind(TruncateFloat64ToWord32(rounded_value));
+    Goto(&done);
+  }
+  Bind(&done);
+  return var_value.value();
+}
+
+Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
+    Node* input, ElementsKind elements_kind, Label* bailout) {
+  DCHECK(IsFixedTypedArrayElementsKind(elements_kind));
+
+  MachineRepresentation rep;
+  switch (elements_kind) {
+    case UINT8_ELEMENTS:
+    case INT8_ELEMENTS:
+    case UINT16_ELEMENTS:
+    case INT16_ELEMENTS:
+    case UINT32_ELEMENTS:
+    case INT32_ELEMENTS:
+    case UINT8_CLAMPED_ELEMENTS:
+      rep = MachineRepresentation::kWord32;
+      break;
+    case FLOAT32_ELEMENTS:
+      rep = MachineRepresentation::kFloat32;
+      break;
+    case FLOAT64_ELEMENTS:
+      rep = MachineRepresentation::kFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return nullptr;
+  }
+
+  Variable var_result(this, rep);
+  Label done(this, &var_result), if_smi(this);
+  GotoIf(TaggedIsSmi(input), &if_smi);
+  // Try to convert a heap number to a Smi.
+  GotoUnless(IsHeapNumberMap(LoadMap(input)), bailout);
+  {
+    Node* value = LoadHeapNumberValue(input);
+    if (rep == MachineRepresentation::kWord32) {
+      if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
+        value = Float64ToUint8Clamped(value);
+      } else {
+        value = TruncateFloat64ToWord32(value);
+      }
+    } else if (rep == MachineRepresentation::kFloat32) {
+      value = TruncateFloat64ToFloat32(value);
+    } else {
+      DCHECK_EQ(MachineRepresentation::kFloat64, rep);
+    }
+    var_result.Bind(value);
+    Goto(&done);
+  }
+
+  Bind(&if_smi);
+  {
+    Node* value = SmiToWord32(input);
+    if (rep == MachineRepresentation::kFloat32) {
+      value = RoundInt32ToFloat32(value);
+    } else if (rep == MachineRepresentation::kFloat64) {
+      value = ChangeInt32ToFloat64(value);
+    } else {
+      DCHECK_EQ(MachineRepresentation::kWord32, rep);
+      if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
+        value = Int32ToUint8Clamped(value);
+      }
+    }
+    var_result.Bind(value);
+    Goto(&done);
+  }
+
+  Bind(&done);
+  return var_result.value();
+}
+
 void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
                                          bool is_jsarray,
                                          ElementsKind elements_kind,
@@ -5382,16 +6970,7 @@
     // TODO(ishell): call ToNumber() on value and don't bailout but be careful
     // to call it only once if we decide to bailout because of bounds checks.
 
-    if (IsFixedFloatElementsKind(elements_kind)) {
-      // TODO(ishell): move float32 truncation into PrepareValueForWrite.
-      value = PrepareValueForWrite(value, Representation::Double(), bailout);
-      if (elements_kind == FLOAT32_ELEMENTS) {
-        value = TruncateFloat64ToFloat32(value);
-      }
-    } else {
-      // TODO(ishell): It's fine for word8/16/32 to truncate the result.
-      value = TryToIntptr(value, bailout);
-    }
+    value = PrepareValueForWriteToTypedArray(value, elements_kind, bailout);
 
     // There must be no allocations between the buffer load and
     // and the actual store to backing store, because GC may decide that
@@ -5443,9 +7022,9 @@
   // a smi before manipulating the backing store. Otherwise the backing store
   // may be left in an invalid state.
   if (IsFastSmiElementsKind(elements_kind)) {
-    GotoUnless(WordIsSmi(value), bailout);
+    GotoUnless(TaggedIsSmi(value), bailout);
   } else if (IsFastDoubleElementsKind(elements_kind)) {
-    value = PrepareValueForWrite(value, Representation::Double(), bailout);
+    value = TryTaggedToFloat64(value, bailout);
   }
 
   if (IsGrowStoreMode(store_mode)) {
@@ -5583,42 +7162,44 @@
 
   Node* new_space_top_address = ExternalConstant(
       ExternalReference::new_space_allocation_top_address(isolate()));
-  const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
-  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+  const int kMementoMapOffset = JSArray::kSize;
+  const int kMementoLastWordOffset =
+      kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
 
   // Bail out if the object is not in new space.
   Node* object_page = PageFromAddress(object);
   {
-    const int mask =
-        (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
-    Node* page_flags = Load(MachineType::IntPtr(), object_page);
-    GotoIf(
-        WordEqual(WordAnd(page_flags, IntPtrConstant(mask)), IntPtrConstant(0)),
-        &no_memento_found);
+    Node* page_flags = Load(MachineType::IntPtr(), object_page,
+                            IntPtrConstant(Page::kFlagsOffset));
+    GotoIf(WordEqual(WordAnd(page_flags,
+                             IntPtrConstant(MemoryChunk::kIsInNewSpaceMask)),
+                     IntPtrConstant(0)),
+           &no_memento_found);
   }
 
-  Node* memento_end = IntPtrAdd(object, IntPtrConstant(kMementoEndOffset));
-  Node* memento_end_page = PageFromAddress(memento_end);
+  Node* memento_last_word = IntPtrAdd(
+      object, IntPtrConstant(kMementoLastWordOffset - kHeapObjectTag));
+  Node* memento_last_word_page = PageFromAddress(memento_last_word);
 
   Node* new_space_top = Load(MachineType::Pointer(), new_space_top_address);
   Node* new_space_top_page = PageFromAddress(new_space_top);
 
-  // If the object is in new space, we need to check whether it is and
-  // respective potential memento object on the same page as the current top.
-  GotoIf(WordEqual(memento_end_page, new_space_top_page), &top_check);
+  // If the object is in new space, we need to check whether respective
+  // potential memento object is on the same page as the current top.
+  GotoIf(WordEqual(memento_last_word_page, new_space_top_page), &top_check);
 
   // The object is on a different page than allocation top. Bail out if the
   // object sits on the page boundary as no memento can follow and we cannot
   // touch the memory following it.
-  Branch(WordEqual(object_page, memento_end_page), &map_check,
+  Branch(WordEqual(object_page, memento_last_word_page), &map_check,
          &no_memento_found);
 
   // If top is on the same page as the current object, we need to check whether
   // we are below top.
   Bind(&top_check);
   {
-    Branch(UintPtrGreaterThan(memento_end, new_space_top), &no_memento_found,
-           &map_check);
+    Branch(UintPtrGreaterThanOrEqual(memento_last_word, new_space_top),
+           &no_memento_found, &map_check);
   }
 
   // Memento map check.
@@ -5638,8 +7219,9 @@
 }
 
 Node* CodeStubAssembler::EnumLength(Node* map) {
+  CSA_ASSERT(this, IsMap(map));
   Node* bitfield_3 = LoadMapBitField3(map);
-  Node* enum_length = BitFieldDecode<Map::EnumLengthBits>(bitfield_3);
+  Node* enum_length = DecodeWordFromWord32<Map::EnumLengthBits>(bitfield_3);
   return SmiTag(enum_length);
 }
 
@@ -5661,8 +7243,8 @@
     Node* invalid_enum_cache_sentinel =
         SmiConstant(Smi::FromInt(kInvalidEnumCacheSentinel));
     Node* enum_length = EnumLength(current_map.value());
-    BranchIfWordEqual(enum_length, invalid_enum_cache_sentinel, use_runtime,
-                      &loop);
+    Branch(WordEqual(enum_length, invalid_enum_cache_sentinel), use_runtime,
+           &loop);
   }
 
   // Check that there are no elements. |current_js_object| contains
@@ -5673,24 +7255,24 @@
     Node* elements = LoadElements(current_js_object.value());
     Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
     // Check that there are no elements.
-    BranchIfWordEqual(elements, empty_fixed_array, &if_no_elements,
-                      &if_elements);
+    Branch(WordEqual(elements, empty_fixed_array), &if_no_elements,
+           &if_elements);
     Bind(&if_elements);
     {
       // Second chance, the object may be using the empty slow element
       // dictionary.
       Node* slow_empty_dictionary =
           LoadRoot(Heap::kEmptySlowElementDictionaryRootIndex);
-      BranchIfWordNotEqual(elements, slow_empty_dictionary, use_runtime,
-                           &if_no_elements);
+      Branch(WordNotEqual(elements, slow_empty_dictionary), use_runtime,
+             &if_no_elements);
     }
 
     Bind(&if_no_elements);
     {
       // Update map prototype.
       current_js_object.Bind(LoadMapPrototype(current_map.value()));
-      BranchIfWordEqual(current_js_object.value(), NullConstant(), use_cache,
-                        &next);
+      Branch(WordEqual(current_js_object.value(), NullConstant()), use_cache,
+             &next);
     }
   }
 
@@ -5699,8 +7281,8 @@
     // For all objects but the receiver, check that the cache is empty.
     current_map.Bind(LoadMap(current_js_object.value()));
     Node* enum_length = EnumLength(current_map.value());
-    Node* zero_constant = SmiConstant(Smi::FromInt(0));
-    BranchIf(WordEqual(enum_length, zero_constant), &loop, use_runtime);
+    Node* zero_constant = SmiConstant(Smi::kZero);
+    Branch(WordEqual(enum_length, zero_constant), &loop, use_runtime);
   }
 }
 
@@ -5768,5 +7350,1999 @@
   return cell;
 }
 
+void CodeStubAssembler::BuildFastLoop(
+    const CodeStubAssembler::VariableList& vars,
+    MachineRepresentation index_rep, Node* start_index, Node* end_index,
+    std::function<void(CodeStubAssembler* assembler, Node* index)> body,
+    int increment, IndexAdvanceMode mode) {
+  Variable var(this, index_rep);
+  VariableList vars_copy(vars, zone());
+  vars_copy.Add(&var, zone());
+  var.Bind(start_index);
+  Label loop(this, vars_copy);
+  Label after_loop(this);
+  // Introduce an explicit second check of the termination condition before the
+  // loop that helps turbofan generate better code. If there's only a single
+  // check, then the CodeStubAssembler forces it to be at the beginning of the
+  // loop requiring a backwards branch at the end of the loop (it's not possible
+  // to force the loop header check at the end of the loop and branch forward to
+  // it from the pre-header). The extra branch is slower in the case that the
+  // loop actually iterates.
+  Branch(WordEqual(var.value(), end_index), &after_loop, &loop);
+  Bind(&loop);
+  {
+    if (mode == IndexAdvanceMode::kPre) {
+      var.Bind(IntPtrAdd(var.value(), IntPtrConstant(increment)));
+    }
+    body(this, var.value());
+    if (mode == IndexAdvanceMode::kPost) {
+      var.Bind(IntPtrAdd(var.value(), IntPtrConstant(increment)));
+    }
+    Branch(WordNotEqual(var.value(), end_index), &loop, &after_loop);
+  }
+  Bind(&after_loop);
+}
+
+void CodeStubAssembler::BuildFastFixedArrayForEach(
+    compiler::Node* fixed_array, ElementsKind kind,
+    compiler::Node* first_element_inclusive,
+    compiler::Node* last_element_exclusive,
+    std::function<void(CodeStubAssembler* assembler,
+                       compiler::Node* fixed_array, compiler::Node* offset)>
+        body,
+    ParameterMode mode, ForEachDirection direction) {
+  STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
+  int32_t first_val;
+  bool constant_first = ToInt32Constant(first_element_inclusive, first_val);
+  int32_t last_val;
+  bool constent_last = ToInt32Constant(last_element_exclusive, last_val);
+  if (constant_first && constent_last) {
+    int delta = last_val - first_val;
+    DCHECK(delta >= 0);
+    if (delta <= kElementLoopUnrollThreshold) {
+      if (direction == ForEachDirection::kForward) {
+        for (int i = first_val; i < last_val; ++i) {
+          Node* index = IntPtrConstant(i);
+          Node* offset =
+              ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
+                                     FixedArray::kHeaderSize - kHeapObjectTag);
+          body(this, fixed_array, offset);
+        }
+      } else {
+        for (int i = last_val - 1; i >= first_val; --i) {
+          Node* index = IntPtrConstant(i);
+          Node* offset =
+              ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
+                                     FixedArray::kHeaderSize - kHeapObjectTag);
+          body(this, fixed_array, offset);
+        }
+      }
+      return;
+    }
+  }
+
+  Node* start =
+      ElementOffsetFromIndex(first_element_inclusive, kind, mode,
+                             FixedArray::kHeaderSize - kHeapObjectTag);
+  Node* limit =
+      ElementOffsetFromIndex(last_element_exclusive, kind, mode,
+                             FixedArray::kHeaderSize - kHeapObjectTag);
+  if (direction == ForEachDirection::kReverse) std::swap(start, limit);
+
+  int increment = IsFastDoubleElementsKind(kind) ? kDoubleSize : kPointerSize;
+  BuildFastLoop(
+      MachineType::PointerRepresentation(), start, limit,
+      [fixed_array, body](CodeStubAssembler* assembler, Node* offset) {
+        body(assembler, fixed_array, offset);
+      },
+      direction == ForEachDirection::kReverse ? -increment : increment,
+      direction == ForEachDirection::kReverse ? IndexAdvanceMode::kPre
+                                              : IndexAdvanceMode::kPost);
+}
+
+void CodeStubAssembler::BranchIfNumericRelationalComparison(
+    RelationalComparisonMode mode, compiler::Node* lhs, compiler::Node* rhs,
+    Label* if_true, Label* if_false) {
+  typedef compiler::Node Node;
+
+  Label end(this);
+  Variable result(this, MachineRepresentation::kTagged);
+
+  // Shared entry for floating point comparison.
+  Label do_fcmp(this);
+  Variable var_fcmp_lhs(this, MachineRepresentation::kFloat64),
+      var_fcmp_rhs(this, MachineRepresentation::kFloat64);
+
+  // Check if the {lhs} is a Smi or a HeapObject.
+  Label if_lhsissmi(this), if_lhsisnotsmi(this);
+  Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+  Bind(&if_lhsissmi);
+  {
+    // Check if {rhs} is a Smi or a HeapObject.
+    Label if_rhsissmi(this), if_rhsisnotsmi(this);
+    Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+    Bind(&if_rhsissmi);
+    {
+      // Both {lhs} and {rhs} are Smi, so just perform a fast Smi comparison.
+      switch (mode) {
+        case kLessThan:
+          BranchIfSmiLessThan(lhs, rhs, if_true, if_false);
+          break;
+        case kLessThanOrEqual:
+          BranchIfSmiLessThanOrEqual(lhs, rhs, if_true, if_false);
+          break;
+        case kGreaterThan:
+          BranchIfSmiLessThan(rhs, lhs, if_true, if_false);
+          break;
+        case kGreaterThanOrEqual:
+          BranchIfSmiLessThanOrEqual(rhs, lhs, if_true, if_false);
+          break;
+      }
+    }
+
+    Bind(&if_rhsisnotsmi);
+    {
+      CSA_ASSERT(this, WordEqual(LoadMap(rhs), HeapNumberMapConstant()));
+      // Convert the {lhs} and {rhs} to floating point values, and
+      // perform a floating point comparison.
+      var_fcmp_lhs.Bind(SmiToFloat64(lhs));
+      var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+      Goto(&do_fcmp);
+    }
+  }
+
+  Bind(&if_lhsisnotsmi);
+  {
+    CSA_ASSERT(this, WordEqual(LoadMap(lhs), HeapNumberMapConstant()));
+
+    // Check if {rhs} is a Smi or a HeapObject.
+    Label if_rhsissmi(this), if_rhsisnotsmi(this);
+    Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+    Bind(&if_rhsissmi);
+    {
+      // Convert the {lhs} and {rhs} to floating point values, and
+      // perform a floating point comparison.
+      var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
+      var_fcmp_rhs.Bind(SmiToFloat64(rhs));
+      Goto(&do_fcmp);
+    }
+
+    Bind(&if_rhsisnotsmi);
+    {
+      CSA_ASSERT(this, WordEqual(LoadMap(rhs), HeapNumberMapConstant()));
+
+      // Convert the {lhs} and {rhs} to floating point values, and
+      // perform a floating point comparison.
+      var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
+      var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+      Goto(&do_fcmp);
+    }
+  }
+
+  Bind(&do_fcmp);
+  {
+    // Load the {lhs} and {rhs} floating point values.
+    Node* lhs = var_fcmp_lhs.value();
+    Node* rhs = var_fcmp_rhs.value();
+
+    // Perform a fast floating point comparison.
+    switch (mode) {
+      case kLessThan:
+        Branch(Float64LessThan(lhs, rhs), if_true, if_false);
+        break;
+      case kLessThanOrEqual:
+        Branch(Float64LessThanOrEqual(lhs, rhs), if_true, if_false);
+        break;
+      case kGreaterThan:
+        Branch(Float64GreaterThan(lhs, rhs), if_true, if_false);
+        break;
+      case kGreaterThanOrEqual:
+        Branch(Float64GreaterThanOrEqual(lhs, rhs), if_true, if_false);
+        break;
+    }
+  }
+}
+
+void CodeStubAssembler::GotoUnlessNumberLessThan(compiler::Node* lhs,
+                                                 compiler::Node* rhs,
+                                                 Label* if_false) {
+  Label if_true(this);
+  BranchIfNumericRelationalComparison(kLessThan, lhs, rhs, &if_true, if_false);
+  Bind(&if_true);
+}
+
+compiler::Node* CodeStubAssembler::RelationalComparison(
+    RelationalComparisonMode mode, compiler::Node* lhs, compiler::Node* rhs,
+    compiler::Node* context) {
+  typedef compiler::Node Node;
+
+  Label return_true(this), return_false(this), end(this);
+  Variable result(this, MachineRepresentation::kTagged);
+
+  // Shared entry for floating point comparison.
+  Label do_fcmp(this);
+  Variable var_fcmp_lhs(this, MachineRepresentation::kFloat64),
+      var_fcmp_rhs(this, MachineRepresentation::kFloat64);
+
+  // We might need to loop several times due to ToPrimitive and/or ToNumber
+  // conversions.
+  Variable var_lhs(this, MachineRepresentation::kTagged),
+      var_rhs(this, MachineRepresentation::kTagged);
+  Variable* loop_vars[2] = {&var_lhs, &var_rhs};
+  Label loop(this, 2, loop_vars);
+  var_lhs.Bind(lhs);
+  var_rhs.Bind(rhs);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    // Load the current {lhs} and {rhs} values.
+    lhs = var_lhs.value();
+    rhs = var_rhs.value();
+
+    // Check if the {lhs} is a Smi or a HeapObject.
+    Label if_lhsissmi(this), if_lhsisnotsmi(this);
+    Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+    Bind(&if_lhsissmi);
+    {
+      // Check if {rhs} is a Smi or a HeapObject.
+      Label if_rhsissmi(this), if_rhsisnotsmi(this);
+      Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+      Bind(&if_rhsissmi);
+      {
+        // Both {lhs} and {rhs} are Smi, so just perform a fast Smi comparison.
+        switch (mode) {
+          case kLessThan:
+            BranchIfSmiLessThan(lhs, rhs, &return_true, &return_false);
+            break;
+          case kLessThanOrEqual:
+            BranchIfSmiLessThanOrEqual(lhs, rhs, &return_true, &return_false);
+            break;
+          case kGreaterThan:
+            BranchIfSmiLessThan(rhs, lhs, &return_true, &return_false);
+            break;
+          case kGreaterThanOrEqual:
+            BranchIfSmiLessThanOrEqual(rhs, lhs, &return_true, &return_false);
+            break;
+        }
+      }
+
+      Bind(&if_rhsisnotsmi);
+      {
+        // Load the map of {rhs}.
+        Node* rhs_map = LoadMap(rhs);
+
+        // Check if the {rhs} is a HeapNumber.
+        Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
+        Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
+
+        Bind(&if_rhsisnumber);
+        {
+          // Convert the {lhs} and {rhs} to floating point values, and
+          // perform a floating point comparison.
+          var_fcmp_lhs.Bind(SmiToFloat64(lhs));
+          var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+          Goto(&do_fcmp);
+        }
+
+        Bind(&if_rhsisnotnumber);
+        {
+          // Convert the {rhs} to a Number; we don't need to perform the
+          // dedicated ToPrimitive(rhs, hint Number) operation, as the
+          // ToNumber(rhs) will by itself already invoke ToPrimitive with
+          // a Number hint.
+          Callable callable = CodeFactory::NonNumberToNumber(isolate());
+          var_rhs.Bind(CallStub(callable, context, rhs));
+          Goto(&loop);
+        }
+      }
+    }
+
+    Bind(&if_lhsisnotsmi);
+    {
+      // Load the HeapNumber map for later comparisons.
+      Node* number_map = HeapNumberMapConstant();
+
+      // Load the map of {lhs}.
+      Node* lhs_map = LoadMap(lhs);
+
+      // Check if {rhs} is a Smi or a HeapObject.
+      Label if_rhsissmi(this), if_rhsisnotsmi(this);
+      Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+      Bind(&if_rhsissmi);
+      {
+        // Check if the {lhs} is a HeapNumber.
+        Label if_lhsisnumber(this), if_lhsisnotnumber(this, Label::kDeferred);
+        Branch(WordEqual(lhs_map, number_map), &if_lhsisnumber,
+               &if_lhsisnotnumber);
+
+        Bind(&if_lhsisnumber);
+        {
+          // Convert the {lhs} and {rhs} to floating point values, and
+          // perform a floating point comparison.
+          var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
+          var_fcmp_rhs.Bind(SmiToFloat64(rhs));
+          Goto(&do_fcmp);
+        }
+
+        Bind(&if_lhsisnotnumber);
+        {
+          // Convert the {lhs} to a Number; we don't need to perform the
+          // dedicated ToPrimitive(lhs, hint Number) operation, as the
+          // ToNumber(lhs) will by itself already invoke ToPrimitive with
+          // a Number hint.
+          Callable callable = CodeFactory::NonNumberToNumber(isolate());
+          var_lhs.Bind(CallStub(callable, context, lhs));
+          Goto(&loop);
+        }
+      }
+
+      Bind(&if_rhsisnotsmi);
+      {
+        // Load the map of {rhs}.
+        Node* rhs_map = LoadMap(rhs);
+
+        // Check if {lhs} is a HeapNumber.
+        Label if_lhsisnumber(this), if_lhsisnotnumber(this);
+        Branch(WordEqual(lhs_map, number_map), &if_lhsisnumber,
+               &if_lhsisnotnumber);
+
+        Bind(&if_lhsisnumber);
+        {
+          // Check if {rhs} is also a HeapNumber.
+          Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
+          Branch(WordEqual(lhs_map, rhs_map), &if_rhsisnumber,
+                 &if_rhsisnotnumber);
+
+          Bind(&if_rhsisnumber);
+          {
+            // Convert the {lhs} and {rhs} to floating point values, and
+            // perform a floating point comparison.
+            var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
+            var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+            Goto(&do_fcmp);
+          }
+
+          Bind(&if_rhsisnotnumber);
+          {
+            // Convert the {rhs} to a Number; we don't need to perform
+            // dedicated ToPrimitive(rhs, hint Number) operation, as the
+            // ToNumber(rhs) will by itself already invoke ToPrimitive with
+            // a Number hint.
+            Callable callable = CodeFactory::NonNumberToNumber(isolate());
+            var_rhs.Bind(CallStub(callable, context, rhs));
+            Goto(&loop);
+          }
+        }
+
+        Bind(&if_lhsisnotnumber);
+        {
+          // Load the instance type of {lhs}.
+          Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
+
+          // Check if {lhs} is a String.
+          Label if_lhsisstring(this), if_lhsisnotstring(this, Label::kDeferred);
+          Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
+                 &if_lhsisnotstring);
+
+          Bind(&if_lhsisstring);
+          {
+            // Load the instance type of {rhs}.
+            Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+
+            // Check if {rhs} is also a String.
+            Label if_rhsisstring(this, Label::kDeferred),
+                if_rhsisnotstring(this, Label::kDeferred);
+            Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+                   &if_rhsisnotstring);
+
+            Bind(&if_rhsisstring);
+            {
+              // Both {lhs} and {rhs} are strings.
+              switch (mode) {
+                case kLessThan:
+                  result.Bind(CallStub(CodeFactory::StringLessThan(isolate()),
+                                       context, lhs, rhs));
+                  Goto(&end);
+                  break;
+                case kLessThanOrEqual:
+                  result.Bind(
+                      CallStub(CodeFactory::StringLessThanOrEqual(isolate()),
+                               context, lhs, rhs));
+                  Goto(&end);
+                  break;
+                case kGreaterThan:
+                  result.Bind(
+                      CallStub(CodeFactory::StringGreaterThan(isolate()),
+                               context, lhs, rhs));
+                  Goto(&end);
+                  break;
+                case kGreaterThanOrEqual:
+                  result.Bind(
+                      CallStub(CodeFactory::StringGreaterThanOrEqual(isolate()),
+                               context, lhs, rhs));
+                  Goto(&end);
+                  break;
+              }
+            }
+
+            Bind(&if_rhsisnotstring);
+            {
+              // The {lhs} is a String, while {rhs} is neither a Number nor a
+              // String, so we need to call ToPrimitive(rhs, hint Number) if
+              // {rhs} is a receiver or ToNumber(lhs) and ToNumber(rhs) in the
+              // other cases.
+              STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+              Label if_rhsisreceiver(this, Label::kDeferred),
+                  if_rhsisnotreceiver(this, Label::kDeferred);
+              Branch(IsJSReceiverInstanceType(rhs_instance_type),
+                     &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+              Bind(&if_rhsisreceiver);
+              {
+                // Convert {rhs} to a primitive first passing Number hint.
+                Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+                    isolate(), ToPrimitiveHint::kNumber);
+                var_rhs.Bind(CallStub(callable, context, rhs));
+                Goto(&loop);
+              }
+
+              Bind(&if_rhsisnotreceiver);
+              {
+                // Convert both {lhs} and {rhs} to Number.
+                Callable callable = CodeFactory::ToNumber(isolate());
+                var_lhs.Bind(CallStub(callable, context, lhs));
+                var_rhs.Bind(CallStub(callable, context, rhs));
+                Goto(&loop);
+              }
+            }
+          }
+
+          Bind(&if_lhsisnotstring);
+          {
+            // The {lhs} is neither a Number nor a String, so we need to call
+            // ToPrimitive(lhs, hint Number) if {lhs} is a receiver or
+            // ToNumber(lhs) and ToNumber(rhs) in the other cases.
+            STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+            Label if_lhsisreceiver(this, Label::kDeferred),
+                if_lhsisnotreceiver(this, Label::kDeferred);
+            Branch(IsJSReceiverInstanceType(lhs_instance_type),
+                   &if_lhsisreceiver, &if_lhsisnotreceiver);
+
+            Bind(&if_lhsisreceiver);
+            {
+              // Convert {lhs} to a primitive first passing Number hint.
+              Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+                  isolate(), ToPrimitiveHint::kNumber);
+              var_lhs.Bind(CallStub(callable, context, lhs));
+              Goto(&loop);
+            }
+
+            Bind(&if_lhsisnotreceiver);
+            {
+              // Convert both {lhs} and {rhs} to Number.
+              Callable callable = CodeFactory::ToNumber(isolate());
+              var_lhs.Bind(CallStub(callable, context, lhs));
+              var_rhs.Bind(CallStub(callable, context, rhs));
+              Goto(&loop);
+            }
+          }
+        }
+      }
+    }
+  }
+
+  Bind(&do_fcmp);
+  {
+    // Load the {lhs} and {rhs} floating point values.
+    Node* lhs = var_fcmp_lhs.value();
+    Node* rhs = var_fcmp_rhs.value();
+
+    // Perform a fast floating point comparison.
+    switch (mode) {
+      case kLessThan:
+        Branch(Float64LessThan(lhs, rhs), &return_true, &return_false);
+        break;
+      case kLessThanOrEqual:
+        Branch(Float64LessThanOrEqual(lhs, rhs), &return_true, &return_false);
+        break;
+      case kGreaterThan:
+        Branch(Float64GreaterThan(lhs, rhs), &return_true, &return_false);
+        break;
+      case kGreaterThanOrEqual:
+        Branch(Float64GreaterThanOrEqual(lhs, rhs), &return_true,
+               &return_false);
+        break;
+    }
+  }
+
+  Bind(&return_true);
+  {
+    result.Bind(BooleanConstant(true));
+    Goto(&end);
+  }
+
+  Bind(&return_false);
+  {
+    result.Bind(BooleanConstant(false));
+    Goto(&end);
+  }
+
+  Bind(&end);
+  return result.value();
+}
+
+namespace {
+
+void GenerateEqual_Same(CodeStubAssembler* assembler, compiler::Node* value,
+                        CodeStubAssembler::Label* if_equal,
+                        CodeStubAssembler::Label* if_notequal) {
+  // In case of abstract or strict equality checks, we need additional checks
+  // for NaN values because they are not considered equal, even if both the
+  // left and the right hand side reference exactly the same value.
+  // TODO(bmeurer): This seems to violate the SIMD.js specification, but it
+  // seems to be what is tested in the current SIMD.js testsuite.
+
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  // Check if {value} is a Smi or a HeapObject.
+  Label if_valueissmi(assembler), if_valueisnotsmi(assembler);
+  assembler->Branch(assembler->TaggedIsSmi(value), &if_valueissmi,
+                    &if_valueisnotsmi);
+
+  assembler->Bind(&if_valueisnotsmi);
+  {
+    // Load the map of {value}.
+    Node* value_map = assembler->LoadMap(value);
+
+    // Check if {value} (and therefore {rhs}) is a HeapNumber.
+    Label if_valueisnumber(assembler), if_valueisnotnumber(assembler);
+    assembler->Branch(assembler->IsHeapNumberMap(value_map), &if_valueisnumber,
+                      &if_valueisnotnumber);
+
+    assembler->Bind(&if_valueisnumber);
+    {
+      // Convert {value} (and therefore {rhs}) to floating point value.
+      Node* value_value = assembler->LoadHeapNumberValue(value);
+
+      // Check if the HeapNumber value is a NaN.
+      assembler->BranchIfFloat64IsNaN(value_value, if_notequal, if_equal);
+    }
+
+    assembler->Bind(&if_valueisnotnumber);
+    assembler->Goto(if_equal);
+  }
+
+  assembler->Bind(&if_valueissmi);
+  assembler->Goto(if_equal);
+}
+
+void GenerateEqual_Simd128Value_HeapObject(
+    CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* lhs_map,
+    compiler::Node* rhs, compiler::Node* rhs_map,
+    CodeStubAssembler::Label* if_equal, CodeStubAssembler::Label* if_notequal) {
+  assembler->BranchIfSimd128Equal(lhs, lhs_map, rhs, rhs_map, if_equal,
+                                  if_notequal);
+}
+
+}  // namespace
+
+// ES6 section 7.2.12 Abstract Equality Comparison
+compiler::Node* CodeStubAssembler::Equal(ResultMode mode, compiler::Node* lhs,
+                                         compiler::Node* rhs,
+                                         compiler::Node* context) {
+  // This is a slightly optimized version of Object::Equals represented as
+  // scheduled TurboFan graph utilizing the CodeStubAssembler. Whenever you
+  // change something functionality wise in here, remember to update the
+  // Object::Equals method as well.
+  typedef compiler::Node Node;
+
+  Label if_equal(this), if_notequal(this),
+      do_rhsstringtonumber(this, Label::kDeferred), end(this);
+  Variable result(this, MachineRepresentation::kTagged);
+
+  // Shared entry for floating point comparison.
+  Label do_fcmp(this);
+  Variable var_fcmp_lhs(this, MachineRepresentation::kFloat64),
+      var_fcmp_rhs(this, MachineRepresentation::kFloat64);
+
+  // We might need to loop several times due to ToPrimitive and/or ToNumber
+  // conversions.
+  Variable var_lhs(this, MachineRepresentation::kTagged),
+      var_rhs(this, MachineRepresentation::kTagged);
+  Variable* loop_vars[2] = {&var_lhs, &var_rhs};
+  Label loop(this, 2, loop_vars);
+  var_lhs.Bind(lhs);
+  var_rhs.Bind(rhs);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    // Load the current {lhs} and {rhs} values.
+    lhs = var_lhs.value();
+    rhs = var_rhs.value();
+
+    // Check if {lhs} and {rhs} refer to the same object.
+    Label if_same(this), if_notsame(this);
+    Branch(WordEqual(lhs, rhs), &if_same, &if_notsame);
+
+    Bind(&if_same);
+    {
+      // The {lhs} and {rhs} reference the exact same value, yet we need special
+      // treatment for HeapNumber, as NaN is not equal to NaN.
+      GenerateEqual_Same(this, lhs, &if_equal, &if_notequal);
+    }
+
+    Bind(&if_notsame);
+    {
+      // Check if {lhs} is a Smi or a HeapObject.
+      Label if_lhsissmi(this), if_lhsisnotsmi(this);
+      Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+      Bind(&if_lhsissmi);
+      {
+        // Check if {rhs} is a Smi or a HeapObject.
+        Label if_rhsissmi(this), if_rhsisnotsmi(this);
+        Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+        Bind(&if_rhsissmi);
+        // We have already checked for {lhs} and {rhs} being the same value, so
+        // if both are Smis when we get here they must not be equal.
+        Goto(&if_notequal);
+
+        Bind(&if_rhsisnotsmi);
+        {
+          // Load the map of {rhs}.
+          Node* rhs_map = LoadMap(rhs);
+
+          // Check if {rhs} is a HeapNumber.
+          Node* number_map = HeapNumberMapConstant();
+          Label if_rhsisnumber(this), if_rhsisnotnumber(this);
+          Branch(WordEqual(rhs_map, number_map), &if_rhsisnumber,
+                 &if_rhsisnotnumber);
+
+          Bind(&if_rhsisnumber);
+          {
+            // Convert {lhs} and {rhs} to floating point values, and
+            // perform a floating point comparison.
+            var_fcmp_lhs.Bind(SmiToFloat64(lhs));
+            var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+            Goto(&do_fcmp);
+          }
+
+          Bind(&if_rhsisnotnumber);
+          {
+            // Load the instance type of the {rhs}.
+            Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+
+            // Check if the {rhs} is a String.
+            Label if_rhsisstring(this, Label::kDeferred),
+                if_rhsisnotstring(this);
+            Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+                   &if_rhsisnotstring);
+
+            Bind(&if_rhsisstring);
+            {
+              // The {rhs} is a String and the {lhs} is a Smi; we need
+              // to convert the {rhs} to a Number and compare the output to
+              // the Number on the {lhs}.
+              Goto(&do_rhsstringtonumber);
+            }
+
+            Bind(&if_rhsisnotstring);
+            {
+              // Check if the {rhs} is a Boolean.
+              Label if_rhsisboolean(this), if_rhsisnotboolean(this);
+              Branch(IsBooleanMap(rhs_map), &if_rhsisboolean,
+                     &if_rhsisnotboolean);
+
+              Bind(&if_rhsisboolean);
+              {
+                // The {rhs} is a Boolean, load its number value.
+                var_rhs.Bind(LoadObjectField(rhs, Oddball::kToNumberOffset));
+                Goto(&loop);
+              }
+
+              Bind(&if_rhsisnotboolean);
+              {
+                // Check if the {rhs} is a Receiver.
+                STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+                Label if_rhsisreceiver(this, Label::kDeferred),
+                    if_rhsisnotreceiver(this);
+                Branch(IsJSReceiverInstanceType(rhs_instance_type),
+                       &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+                Bind(&if_rhsisreceiver);
+                {
+                  // Convert {rhs} to a primitive first (passing no hint).
+                  Callable callable =
+                      CodeFactory::NonPrimitiveToPrimitive(isolate());
+                  var_rhs.Bind(CallStub(callable, context, rhs));
+                  Goto(&loop);
+                }
+
+                Bind(&if_rhsisnotreceiver);
+                Goto(&if_notequal);
+              }
+            }
+          }
+        }
+      }
+
+      Bind(&if_lhsisnotsmi);
+      {
+        // Check if {rhs} is a Smi or a HeapObject.
+        Label if_rhsissmi(this), if_rhsisnotsmi(this);
+        Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+        Bind(&if_rhsissmi);
+        {
+          // The {lhs} is a HeapObject and the {rhs} is a Smi; swapping {lhs}
+          // and {rhs} is not observable and doesn't matter for the result, so
+          // we can just swap them and use the Smi handling above (for {lhs}
+          // being a Smi).
+          var_lhs.Bind(rhs);
+          var_rhs.Bind(lhs);
+          Goto(&loop);
+        }
+
+        Bind(&if_rhsisnotsmi);
+        {
+          Label if_lhsisstring(this), if_lhsisnumber(this),
+              if_lhsissymbol(this), if_lhsissimd128value(this),
+              if_lhsisoddball(this), if_lhsisreceiver(this);
+
+          // Both {lhs} and {rhs} are HeapObjects, load their maps
+          // and their instance types.
+          Node* lhs_map = LoadMap(lhs);
+          Node* rhs_map = LoadMap(rhs);
+
+          // Load the instance types of {lhs} and {rhs}.
+          Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
+          Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+
+          // Dispatch based on the instance type of {lhs}.
+          size_t const kNumCases = FIRST_NONSTRING_TYPE + 4;
+          Label* case_labels[kNumCases];
+          int32_t case_values[kNumCases];
+          for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
+            case_labels[i] = new Label(this);
+            case_values[i] = i;
+          }
+          case_labels[FIRST_NONSTRING_TYPE + 0] = &if_lhsisnumber;
+          case_values[FIRST_NONSTRING_TYPE + 0] = HEAP_NUMBER_TYPE;
+          case_labels[FIRST_NONSTRING_TYPE + 1] = &if_lhsissymbol;
+          case_values[FIRST_NONSTRING_TYPE + 1] = SYMBOL_TYPE;
+          case_labels[FIRST_NONSTRING_TYPE + 2] = &if_lhsissimd128value;
+          case_values[FIRST_NONSTRING_TYPE + 2] = SIMD128_VALUE_TYPE;
+          case_labels[FIRST_NONSTRING_TYPE + 3] = &if_lhsisoddball;
+          case_values[FIRST_NONSTRING_TYPE + 3] = ODDBALL_TYPE;
+          Switch(lhs_instance_type, &if_lhsisreceiver, case_values, case_labels,
+                 arraysize(case_values));
+          for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
+            Bind(case_labels[i]);
+            Goto(&if_lhsisstring);
+            delete case_labels[i];
+          }
+
+          Bind(&if_lhsisstring);
+          {
+            // Check if {rhs} is also a String.
+            Label if_rhsisstring(this, Label::kDeferred),
+                if_rhsisnotstring(this);
+            Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+                   &if_rhsisnotstring);
+
+            Bind(&if_rhsisstring);
+            {
+              // Both {lhs} and {rhs} are of type String, just do the
+              // string comparison then.
+              Callable callable = (mode == kDontNegateResult)
+                                      ? CodeFactory::StringEqual(isolate())
+                                      : CodeFactory::StringNotEqual(isolate());
+              result.Bind(CallStub(callable, context, lhs, rhs));
+              Goto(&end);
+            }
+
+            Bind(&if_rhsisnotstring);
+            {
+              // The {lhs} is a String and the {rhs} is some other HeapObject.
+              // Swapping {lhs} and {rhs} is not observable and doesn't matter
+              // for the result, so we can just swap them and use the String
+              // handling below (for {rhs} being a String).
+              var_lhs.Bind(rhs);
+              var_rhs.Bind(lhs);
+              Goto(&loop);
+            }
+          }
+
+          Bind(&if_lhsisnumber);
+          {
+            // Check if {rhs} is also a HeapNumber.
+            Label if_rhsisnumber(this), if_rhsisnotnumber(this);
+            Branch(Word32Equal(lhs_instance_type, rhs_instance_type),
+                   &if_rhsisnumber, &if_rhsisnotnumber);
+
+            Bind(&if_rhsisnumber);
+            {
+              // Convert {lhs} and {rhs} to floating point values, and
+              // perform a floating point comparison.
+              var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
+              var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+              Goto(&do_fcmp);
+            }
+
+            Bind(&if_rhsisnotnumber);
+            {
+              // The {lhs} is a Number, the {rhs} is some other HeapObject.
+              Label if_rhsisstring(this, Label::kDeferred),
+                  if_rhsisnotstring(this);
+              Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+                     &if_rhsisnotstring);
+
+              Bind(&if_rhsisstring);
+              {
+                // The {rhs} is a String and the {lhs} is a HeapNumber; we need
+                // to convert the {rhs} to a Number and compare the output to
+                // the Number on the {lhs}.
+                Goto(&do_rhsstringtonumber);
+              }
+
+              Bind(&if_rhsisnotstring);
+              {
+                // Check if the {rhs} is a JSReceiver.
+                Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
+                STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+                Branch(IsJSReceiverInstanceType(rhs_instance_type),
+                       &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+                Bind(&if_rhsisreceiver);
+                {
+                  // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
+                  // Swapping {lhs} and {rhs} is not observable and doesn't
+                  // matter for the result, so we can just swap them and use
+                  // the JSReceiver handling below (for {lhs} being a
+                  // JSReceiver).
+                  var_lhs.Bind(rhs);
+                  var_rhs.Bind(lhs);
+                  Goto(&loop);
+                }
+
+                Bind(&if_rhsisnotreceiver);
+                {
+                  // Check if {rhs} is a Boolean.
+                  Label if_rhsisboolean(this), if_rhsisnotboolean(this);
+                  Branch(IsBooleanMap(rhs_map), &if_rhsisboolean,
+                         &if_rhsisnotboolean);
+
+                  Bind(&if_rhsisboolean);
+                  {
+                    // The {rhs} is a Boolean, convert it to a Smi first.
+                    var_rhs.Bind(
+                        LoadObjectField(rhs, Oddball::kToNumberOffset));
+                    Goto(&loop);
+                  }
+
+                  Bind(&if_rhsisnotboolean);
+                  Goto(&if_notequal);
+                }
+              }
+            }
+          }
+
+          Bind(&if_lhsisoddball);
+          {
+            // The {lhs} is an Oddball and {rhs} is some other HeapObject.
+            Label if_lhsisboolean(this), if_lhsisnotboolean(this);
+            Node* boolean_map = BooleanMapConstant();
+            Branch(WordEqual(lhs_map, boolean_map), &if_lhsisboolean,
+                   &if_lhsisnotboolean);
+
+            Bind(&if_lhsisboolean);
+            {
+              // The {lhs} is a Boolean, check if {rhs} is also a Boolean.
+              Label if_rhsisboolean(this), if_rhsisnotboolean(this);
+              Branch(WordEqual(rhs_map, boolean_map), &if_rhsisboolean,
+                     &if_rhsisnotboolean);
+
+              Bind(&if_rhsisboolean);
+              {
+                // Both {lhs} and {rhs} are distinct Boolean values.
+                Goto(&if_notequal);
+              }
+
+              Bind(&if_rhsisnotboolean);
+              {
+                // Convert the {lhs} to a Number first.
+                var_lhs.Bind(LoadObjectField(lhs, Oddball::kToNumberOffset));
+                Goto(&loop);
+              }
+            }
+
+            Bind(&if_lhsisnotboolean);
+            {
+              // The {lhs} is either Null or Undefined; check if the {rhs} is
+              // undetectable (i.e. either also Null or Undefined or some
+              // undetectable JSReceiver).
+              Node* rhs_bitfield = LoadMapBitField(rhs_map);
+              Branch(Word32Equal(
+                         Word32And(rhs_bitfield,
+                                   Int32Constant(1 << Map::kIsUndetectable)),
+                         Int32Constant(0)),
+                     &if_notequal, &if_equal);
+            }
+          }
+
+          Bind(&if_lhsissymbol);
+          {
+            // Check if the {rhs} is a JSReceiver.
+            Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
+            STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+            Branch(IsJSReceiverInstanceType(rhs_instance_type),
+                   &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+            Bind(&if_rhsisreceiver);
+            {
+              // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
+              // Swapping {lhs} and {rhs} is not observable and doesn't
+              // matter for the result, so we can just swap them and use
+              // the JSReceiver handling below (for {lhs} being a JSReceiver).
+              var_lhs.Bind(rhs);
+              var_rhs.Bind(lhs);
+              Goto(&loop);
+            }
+
+            Bind(&if_rhsisnotreceiver);
+            {
+              // The {rhs} is not a JSReceiver and also not the same Symbol
+              // as the {lhs}, so this is equality check is considered false.
+              Goto(&if_notequal);
+            }
+          }
+
+          Bind(&if_lhsissimd128value);
+          {
+            // Check if the {rhs} is also a Simd128Value.
+            Label if_rhsissimd128value(this), if_rhsisnotsimd128value(this);
+            Branch(Word32Equal(lhs_instance_type, rhs_instance_type),
+                   &if_rhsissimd128value, &if_rhsisnotsimd128value);
+
+            Bind(&if_rhsissimd128value);
+            {
+              // Both {lhs} and {rhs} is a Simd128Value.
+              GenerateEqual_Simd128Value_HeapObject(
+                  this, lhs, lhs_map, rhs, rhs_map, &if_equal, &if_notequal);
+            }
+
+            Bind(&if_rhsisnotsimd128value);
+            {
+              // Check if the {rhs} is a JSReceiver.
+              Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
+              STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+              Branch(IsJSReceiverInstanceType(rhs_instance_type),
+                     &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+              Bind(&if_rhsisreceiver);
+              {
+                // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
+                // Swapping {lhs} and {rhs} is not observable and doesn't
+                // matter for the result, so we can just swap them and use
+                // the JSReceiver handling below (for {lhs} being a JSReceiver).
+                var_lhs.Bind(rhs);
+                var_rhs.Bind(lhs);
+                Goto(&loop);
+              }
+
+              Bind(&if_rhsisnotreceiver);
+              {
+                // The {rhs} is some other Primitive.
+                Goto(&if_notequal);
+              }
+            }
+          }
+
+          Bind(&if_lhsisreceiver);
+          {
+            // Check if the {rhs} is also a JSReceiver.
+            Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
+            STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+            Branch(IsJSReceiverInstanceType(rhs_instance_type),
+                   &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+            Bind(&if_rhsisreceiver);
+            {
+              // Both {lhs} and {rhs} are different JSReceiver references, so
+              // this cannot be considered equal.
+              Goto(&if_notequal);
+            }
+
+            Bind(&if_rhsisnotreceiver);
+            {
+              // Check if {rhs} is Null or Undefined (an undetectable check
+              // is sufficient here, since we already know that {rhs} is not
+              // a JSReceiver).
+              Label if_rhsisundetectable(this),
+                  if_rhsisnotundetectable(this, Label::kDeferred);
+              Node* rhs_bitfield = LoadMapBitField(rhs_map);
+              Branch(Word32Equal(
+                         Word32And(rhs_bitfield,
+                                   Int32Constant(1 << Map::kIsUndetectable)),
+                         Int32Constant(0)),
+                     &if_rhsisnotundetectable, &if_rhsisundetectable);
+
+              Bind(&if_rhsisundetectable);
+              {
+                // Check if {lhs} is an undetectable JSReceiver.
+                Node* lhs_bitfield = LoadMapBitField(lhs_map);
+                Branch(Word32Equal(
+                           Word32And(lhs_bitfield,
+                                     Int32Constant(1 << Map::kIsUndetectable)),
+                           Int32Constant(0)),
+                       &if_notequal, &if_equal);
+              }
+
+              Bind(&if_rhsisnotundetectable);
+              {
+                // The {rhs} is some Primitive different from Null and
+                // Undefined, need to convert {lhs} to Primitive first.
+                Callable callable =
+                    CodeFactory::NonPrimitiveToPrimitive(isolate());
+                var_lhs.Bind(CallStub(callable, context, lhs));
+                Goto(&loop);
+              }
+            }
+          }
+        }
+      }
+    }
+
+    Bind(&do_rhsstringtonumber);
+    {
+      Callable callable = CodeFactory::StringToNumber(isolate());
+      var_rhs.Bind(CallStub(callable, context, rhs));
+      Goto(&loop);
+    }
+  }
+
+  Bind(&do_fcmp);
+  {
+    // Load the {lhs} and {rhs} floating point values.
+    Node* lhs = var_fcmp_lhs.value();
+    Node* rhs = var_fcmp_rhs.value();
+
+    // Perform a fast floating point comparison.
+    Branch(Float64Equal(lhs, rhs), &if_equal, &if_notequal);
+  }
+
+  Bind(&if_equal);
+  {
+    result.Bind(BooleanConstant(mode == kDontNegateResult));
+    Goto(&end);
+  }
+
+  Bind(&if_notequal);
+  {
+    result.Bind(BooleanConstant(mode == kNegateResult));
+    Goto(&end);
+  }
+
+  Bind(&end);
+  return result.value();
+}
+
+compiler::Node* CodeStubAssembler::StrictEqual(ResultMode mode,
+                                               compiler::Node* lhs,
+                                               compiler::Node* rhs,
+                                               compiler::Node* context) {
+  // Here's pseudo-code for the algorithm below in case of kDontNegateResult
+  // mode; for kNegateResult mode we properly negate the result.
+  //
+  // if (lhs == rhs) {
+  //   if (lhs->IsHeapNumber()) return HeapNumber::cast(lhs)->value() != NaN;
+  //   return true;
+  // }
+  // if (!lhs->IsSmi()) {
+  //   if (lhs->IsHeapNumber()) {
+  //     if (rhs->IsSmi()) {
+  //       return Smi::cast(rhs)->value() == HeapNumber::cast(lhs)->value();
+  //     } else if (rhs->IsHeapNumber()) {
+  //       return HeapNumber::cast(rhs)->value() ==
+  //       HeapNumber::cast(lhs)->value();
+  //     } else {
+  //       return false;
+  //     }
+  //   } else {
+  //     if (rhs->IsSmi()) {
+  //       return false;
+  //     } else {
+  //       if (lhs->IsString()) {
+  //         if (rhs->IsString()) {
+  //           return %StringEqual(lhs, rhs);
+  //         } else {
+  //           return false;
+  //         }
+  //       } else if (lhs->IsSimd128()) {
+  //         if (rhs->IsSimd128()) {
+  //           return %StrictEqual(lhs, rhs);
+  //         }
+  //       } else {
+  //         return false;
+  //       }
+  //     }
+  //   }
+  // } else {
+  //   if (rhs->IsSmi()) {
+  //     return false;
+  //   } else {
+  //     if (rhs->IsHeapNumber()) {
+  //       return Smi::cast(lhs)->value() == HeapNumber::cast(rhs)->value();
+  //     } else {
+  //       return false;
+  //     }
+  //   }
+  // }
+
+  typedef compiler::Node Node;
+
+  Label if_equal(this), if_notequal(this), end(this);
+  Variable result(this, MachineRepresentation::kTagged);
+
+  // Check if {lhs} and {rhs} refer to the same object.
+  Label if_same(this), if_notsame(this);
+  Branch(WordEqual(lhs, rhs), &if_same, &if_notsame);
+
+  Bind(&if_same);
+  {
+    // The {lhs} and {rhs} reference the exact same value, yet we need special
+    // treatment for HeapNumber, as NaN is not equal to NaN.
+    GenerateEqual_Same(this, lhs, &if_equal, &if_notequal);
+  }
+
+  Bind(&if_notsame);
+  {
+    // The {lhs} and {rhs} reference different objects, yet for Smi, HeapNumber,
+    // String and Simd128Value they can still be considered equal.
+    Node* number_map = HeapNumberMapConstant();
+
+    // Check if {lhs} is a Smi or a HeapObject.
+    Label if_lhsissmi(this), if_lhsisnotsmi(this);
+    Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+    Bind(&if_lhsisnotsmi);
+    {
+      // Load the map of {lhs}.
+      Node* lhs_map = LoadMap(lhs);
+
+      // Check if {lhs} is a HeapNumber.
+      Label if_lhsisnumber(this), if_lhsisnotnumber(this);
+      Branch(WordEqual(lhs_map, number_map), &if_lhsisnumber,
+             &if_lhsisnotnumber);
+
+      Bind(&if_lhsisnumber);
+      {
+        // Check if {rhs} is a Smi or a HeapObject.
+        Label if_rhsissmi(this), if_rhsisnotsmi(this);
+        Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+        Bind(&if_rhsissmi);
+        {
+          // Convert {lhs} and {rhs} to floating point values.
+          Node* lhs_value = LoadHeapNumberValue(lhs);
+          Node* rhs_value = SmiToFloat64(rhs);
+
+          // Perform a floating point comparison of {lhs} and {rhs}.
+          Branch(Float64Equal(lhs_value, rhs_value), &if_equal, &if_notequal);
+        }
+
+        Bind(&if_rhsisnotsmi);
+        {
+          // Load the map of {rhs}.
+          Node* rhs_map = LoadMap(rhs);
+
+          // Check if {rhs} is also a HeapNumber.
+          Label if_rhsisnumber(this), if_rhsisnotnumber(this);
+          Branch(WordEqual(rhs_map, number_map), &if_rhsisnumber,
+                 &if_rhsisnotnumber);
+
+          Bind(&if_rhsisnumber);
+          {
+            // Convert {lhs} and {rhs} to floating point values.
+            Node* lhs_value = LoadHeapNumberValue(lhs);
+            Node* rhs_value = LoadHeapNumberValue(rhs);
+
+            // Perform a floating point comparison of {lhs} and {rhs}.
+            Branch(Float64Equal(lhs_value, rhs_value), &if_equal, &if_notequal);
+          }
+
+          Bind(&if_rhsisnotnumber);
+          Goto(&if_notequal);
+        }
+      }
+
+      Bind(&if_lhsisnotnumber);
+      {
+        // Check if {rhs} is a Smi or a HeapObject.
+        Label if_rhsissmi(this), if_rhsisnotsmi(this);
+        Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+        Bind(&if_rhsissmi);
+        Goto(&if_notequal);
+
+        Bind(&if_rhsisnotsmi);
+        {
+          // Load the instance type of {lhs}.
+          Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
+
+          // Check if {lhs} is a String.
+          Label if_lhsisstring(this), if_lhsisnotstring(this);
+          Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
+                 &if_lhsisnotstring);
+
+          Bind(&if_lhsisstring);
+          {
+            // Load the instance type of {rhs}.
+            Node* rhs_instance_type = LoadInstanceType(rhs);
+
+            // Check if {rhs} is also a String.
+            Label if_rhsisstring(this, Label::kDeferred),
+                if_rhsisnotstring(this);
+            Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+                   &if_rhsisnotstring);
+
+            Bind(&if_rhsisstring);
+            {
+              Callable callable = (mode == kDontNegateResult)
+                                      ? CodeFactory::StringEqual(isolate())
+                                      : CodeFactory::StringNotEqual(isolate());
+              result.Bind(CallStub(callable, context, lhs, rhs));
+              Goto(&end);
+            }
+
+            Bind(&if_rhsisnotstring);
+            Goto(&if_notequal);
+          }
+
+          Bind(&if_lhsisnotstring);
+          {
+            // Check if {lhs} is a Simd128Value.
+            Label if_lhsissimd128value(this), if_lhsisnotsimd128value(this);
+            Branch(Word32Equal(lhs_instance_type,
+                               Int32Constant(SIMD128_VALUE_TYPE)),
+                   &if_lhsissimd128value, &if_lhsisnotsimd128value);
+
+            Bind(&if_lhsissimd128value);
+            {
+              // Load the map of {rhs}.
+              Node* rhs_map = LoadMap(rhs);
+
+              // Check if {rhs} is also a Simd128Value that is equal to {lhs}.
+              GenerateEqual_Simd128Value_HeapObject(
+                  this, lhs, lhs_map, rhs, rhs_map, &if_equal, &if_notequal);
+            }
+
+            Bind(&if_lhsisnotsimd128value);
+            Goto(&if_notequal);
+          }
+        }
+      }
+    }
+
+    Bind(&if_lhsissmi);
+    {
+      // We already know that {lhs} and {rhs} are not reference equal, and {lhs}
+      // is a Smi; so {lhs} and {rhs} can only be strictly equal if {rhs} is a
+      // HeapNumber with an equal floating point value.
+
+      // Check if {rhs} is a Smi or a HeapObject.
+      Label if_rhsissmi(this), if_rhsisnotsmi(this);
+      Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+      Bind(&if_rhsissmi);
+      Goto(&if_notequal);
+
+      Bind(&if_rhsisnotsmi);
+      {
+        // Load the map of the {rhs}.
+        Node* rhs_map = LoadMap(rhs);
+
+        // The {rhs} could be a HeapNumber with the same value as {lhs}.
+        Label if_rhsisnumber(this), if_rhsisnotnumber(this);
+        Branch(WordEqual(rhs_map, number_map), &if_rhsisnumber,
+               &if_rhsisnotnumber);
+
+        Bind(&if_rhsisnumber);
+        {
+          // Convert {lhs} and {rhs} to floating point values.
+          Node* lhs_value = SmiToFloat64(lhs);
+          Node* rhs_value = LoadHeapNumberValue(rhs);
+
+          // Perform a floating point comparison of {lhs} and {rhs}.
+          Branch(Float64Equal(lhs_value, rhs_value), &if_equal, &if_notequal);
+        }
+
+        Bind(&if_rhsisnotnumber);
+        Goto(&if_notequal);
+      }
+    }
+  }
+
+  Bind(&if_equal);
+  {
+    result.Bind(BooleanConstant(mode == kDontNegateResult));
+    Goto(&end);
+  }
+
+  Bind(&if_notequal);
+  {
+    result.Bind(BooleanConstant(mode == kNegateResult));
+    Goto(&end);
+  }
+
+  Bind(&end);
+  return result.value();
+}
+
+// ECMA#sec-samevalue
+// This algorithm differs from the Strict Equality Comparison Algorithm in its
+// treatment of signed zeroes and NaNs.
+compiler::Node* CodeStubAssembler::SameValue(compiler::Node* lhs,
+                                             compiler::Node* rhs,
+                                             compiler::Node* context) {
+  Variable var_result(this, MachineType::PointerRepresentation());
+  Label strict_equal(this), out(this);
+
+  Node* const int_false = IntPtrConstant(0);
+  Node* const int_true = IntPtrConstant(1);
+
+  Label if_equal(this), if_notequal(this);
+  Branch(WordEqual(lhs, rhs), &if_equal, &if_notequal);
+
+  Bind(&if_equal);
+  {
+    // This covers the case when {lhs} == {rhs}. We can simply return true
+    // because SameValue considers two NaNs to be equal.
+
+    var_result.Bind(int_true);
+    Goto(&out);
+  }
+
+  Bind(&if_notequal);
+  {
+    // This covers the case when {lhs} != {rhs}. We only handle numbers here
+    // and defer to StrictEqual for the rest.
+
+    Node* const lhs_float = TryTaggedToFloat64(lhs, &strict_equal);
+    Node* const rhs_float = TryTaggedToFloat64(rhs, &strict_equal);
+
+    Label if_lhsisnan(this), if_lhsnotnan(this);
+    BranchIfFloat64IsNaN(lhs_float, &if_lhsisnan, &if_lhsnotnan);
+
+    Bind(&if_lhsisnan);
+    {
+      // Return true iff {rhs} is NaN.
+
+      Node* const result =
+          Select(Float64Equal(rhs_float, rhs_float), int_false, int_true,
+                 MachineType::PointerRepresentation());
+      var_result.Bind(result);
+      Goto(&out);
+    }
+
+    Bind(&if_lhsnotnan);
+    {
+      Label if_floatisequal(this), if_floatnotequal(this);
+      Branch(Float64Equal(lhs_float, rhs_float), &if_floatisequal,
+             &if_floatnotequal);
+
+      Bind(&if_floatisequal);
+      {
+        // We still need to handle the case when {lhs} and {rhs} are -0.0 and
+        // 0.0 (or vice versa). Compare the high word to
+        // distinguish between the two.
+
+        Node* const lhs_hi_word = Float64ExtractHighWord32(lhs_float);
+        Node* const rhs_hi_word = Float64ExtractHighWord32(rhs_float);
+
+        // If x is +0 and y is -0, return false.
+        // If x is -0 and y is +0, return false.
+
+        Node* const result = Word32Equal(lhs_hi_word, rhs_hi_word);
+        var_result.Bind(result);
+        Goto(&out);
+      }
+
+      Bind(&if_floatnotequal);
+      {
+        var_result.Bind(int_false);
+        Goto(&out);
+      }
+    }
+  }
+
+  Bind(&strict_equal);
+  {
+    Node* const is_equal = StrictEqual(kDontNegateResult, lhs, rhs, context);
+    Node* const result = WordEqual(is_equal, TrueConstant());
+    var_result.Bind(result);
+    Goto(&out);
+  }
+
+  Bind(&out);
+  return var_result.value();
+}
+
+compiler::Node* CodeStubAssembler::ForInFilter(compiler::Node* key,
+                                               compiler::Node* object,
+                                               compiler::Node* context) {
+  Label return_undefined(this, Label::kDeferred), return_to_name(this),
+      end(this);
+
+  Variable var_result(this, MachineRepresentation::kTagged);
+
+  Node* has_property =
+      HasProperty(object, key, context, Runtime::kForInHasProperty);
+
+  Branch(WordEqual(has_property, BooleanConstant(true)), &return_to_name,
+         &return_undefined);
+
+  Bind(&return_to_name);
+  {
+    var_result.Bind(ToName(context, key));
+    Goto(&end);
+  }
+
+  Bind(&return_undefined);
+  {
+    var_result.Bind(UndefinedConstant());
+    Goto(&end);
+  }
+
+  Bind(&end);
+  return var_result.value();
+}
+
+compiler::Node* CodeStubAssembler::HasProperty(
+    compiler::Node* object, compiler::Node* key, compiler::Node* context,
+    Runtime::FunctionId fallback_runtime_function_id) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Label call_runtime(this, Label::kDeferred), return_true(this),
+      return_false(this), end(this);
+
+  CodeStubAssembler::LookupInHolder lookup_property_in_holder =
+      [this, &return_true](Node* receiver, Node* holder, Node* holder_map,
+                           Node* holder_instance_type, Node* unique_name,
+                           Label* next_holder, Label* if_bailout) {
+        TryHasOwnProperty(holder, holder_map, holder_instance_type, unique_name,
+                          &return_true, next_holder, if_bailout);
+      };
+
+  CodeStubAssembler::LookupInHolder lookup_element_in_holder =
+      [this, &return_true](Node* receiver, Node* holder, Node* holder_map,
+                           Node* holder_instance_type, Node* index,
+                           Label* next_holder, Label* if_bailout) {
+        TryLookupElement(holder, holder_map, holder_instance_type, index,
+                         &return_true, next_holder, if_bailout);
+      };
+
+  TryPrototypeChainLookup(object, key, lookup_property_in_holder,
+                          lookup_element_in_holder, &return_false,
+                          &call_runtime);
+
+  Variable result(this, MachineRepresentation::kTagged);
+  Bind(&return_true);
+  {
+    result.Bind(BooleanConstant(true));
+    Goto(&end);
+  }
+
+  Bind(&return_false);
+  {
+    result.Bind(BooleanConstant(false));
+    Goto(&end);
+  }
+
+  Bind(&call_runtime);
+  {
+    result.Bind(
+        CallRuntime(fallback_runtime_function_id, context, object, key));
+    Goto(&end);
+  }
+
+  Bind(&end);
+  return result.value();
+}
+
+compiler::Node* CodeStubAssembler::Typeof(compiler::Node* value,
+                                          compiler::Node* context) {
+  Variable result_var(this, MachineRepresentation::kTagged);
+
+  Label return_number(this, Label::kDeferred), if_oddball(this),
+      return_function(this), return_undefined(this), return_object(this),
+      return_string(this), return_result(this);
+
+  GotoIf(TaggedIsSmi(value), &return_number);
+
+  Node* map = LoadMap(value);
+
+  GotoIf(IsHeapNumberMap(map), &return_number);
+
+  Node* instance_type = LoadMapInstanceType(map);
+
+  GotoIf(Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE)), &if_oddball);
+
+  Node* callable_or_undetectable_mask = Word32And(
+      LoadMapBitField(map),
+      Int32Constant(1 << Map::kIsCallable | 1 << Map::kIsUndetectable));
+
+  GotoIf(Word32Equal(callable_or_undetectable_mask,
+                     Int32Constant(1 << Map::kIsCallable)),
+         &return_function);
+
+  GotoUnless(Word32Equal(callable_or_undetectable_mask, Int32Constant(0)),
+             &return_undefined);
+
+  GotoIf(IsJSReceiverInstanceType(instance_type), &return_object);
+
+  GotoIf(IsStringInstanceType(instance_type), &return_string);
+
+#define SIMD128_BRANCH(TYPE, Type, type, lane_count, lane_type) \
+  Label return_##type(this);                                    \
+  Node* type##_map = HeapConstant(factory()->type##_map());     \
+  GotoIf(WordEqual(map, type##_map), &return_##type);
+  SIMD128_TYPES(SIMD128_BRANCH)
+#undef SIMD128_BRANCH
+
+  CSA_ASSERT(this, Word32Equal(instance_type, Int32Constant(SYMBOL_TYPE)));
+  result_var.Bind(HeapConstant(isolate()->factory()->symbol_string()));
+  Goto(&return_result);
+
+  Bind(&return_number);
+  {
+    result_var.Bind(HeapConstant(isolate()->factory()->number_string()));
+    Goto(&return_result);
+  }
+
+  Bind(&if_oddball);
+  {
+    Node* type = LoadObjectField(value, Oddball::kTypeOfOffset);
+    result_var.Bind(type);
+    Goto(&return_result);
+  }
+
+  Bind(&return_function);
+  {
+    result_var.Bind(HeapConstant(isolate()->factory()->function_string()));
+    Goto(&return_result);
+  }
+
+  Bind(&return_undefined);
+  {
+    result_var.Bind(HeapConstant(isolate()->factory()->undefined_string()));
+    Goto(&return_result);
+  }
+
+  Bind(&return_object);
+  {
+    result_var.Bind(HeapConstant(isolate()->factory()->object_string()));
+    Goto(&return_result);
+  }
+
+  Bind(&return_string);
+  {
+    result_var.Bind(HeapConstant(isolate()->factory()->string_string()));
+    Goto(&return_result);
+  }
+
+#define SIMD128_BIND_RETURN(TYPE, Type, type, lane_count, lane_type)      \
+  Bind(&return_##type);                                                   \
+  {                                                                       \
+    result_var.Bind(HeapConstant(isolate()->factory()->type##_string())); \
+    Goto(&return_result);                                                 \
+  }
+  SIMD128_TYPES(SIMD128_BIND_RETURN)
+#undef SIMD128_BIND_RETURN
+
+  Bind(&return_result);
+  return result_var.value();
+}
+
+compiler::Node* CodeStubAssembler::InstanceOf(compiler::Node* object,
+                                              compiler::Node* callable,
+                                              compiler::Node* context) {
+  Label return_runtime(this, Label::kDeferred), end(this);
+  Variable result(this, MachineRepresentation::kTagged);
+
+  // Check if no one installed @@hasInstance somewhere.
+  GotoUnless(
+      WordEqual(LoadObjectField(LoadRoot(Heap::kHasInstanceProtectorRootIndex),
+                                PropertyCell::kValueOffset),
+                SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
+      &return_runtime);
+
+  // Check if {callable} is a valid receiver.
+  GotoIf(TaggedIsSmi(callable), &return_runtime);
+  GotoUnless(IsCallableMap(LoadMap(callable)), &return_runtime);
+
+  // Use the inline OrdinaryHasInstance directly.
+  result.Bind(OrdinaryHasInstance(context, callable, object));
+  Goto(&end);
+
+  // TODO(bmeurer): Use GetPropertyStub here once available.
+  Bind(&return_runtime);
+  {
+    result.Bind(CallRuntime(Runtime::kInstanceOf, context, object, callable));
+    Goto(&end);
+  }
+
+  Bind(&end);
+  return result.value();
+}
+
+compiler::Node* CodeStubAssembler::NumberInc(compiler::Node* value) {
+  Variable var_result(this, MachineRepresentation::kTagged),
+      var_finc_value(this, MachineRepresentation::kFloat64);
+  Label if_issmi(this), if_isnotsmi(this), do_finc(this), end(this);
+  Branch(TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
+
+  Bind(&if_issmi);
+  {
+    // Try fast Smi addition first.
+    Node* one = SmiConstant(Smi::FromInt(1));
+    Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(value),
+                                       BitcastTaggedToWord(one));
+    Node* overflow = Projection(1, pair);
+
+    // Check if the Smi addition overflowed.
+    Label if_overflow(this), if_notoverflow(this);
+    Branch(overflow, &if_overflow, &if_notoverflow);
+
+    Bind(&if_notoverflow);
+    var_result.Bind(Projection(0, pair));
+    Goto(&end);
+
+    Bind(&if_overflow);
+    {
+      var_finc_value.Bind(SmiToFloat64(value));
+      Goto(&do_finc);
+    }
+  }
+
+  Bind(&if_isnotsmi);
+  {
+    // Check if the value is a HeapNumber.
+    CSA_ASSERT(this, IsHeapNumberMap(LoadMap(value)));
+
+    // Load the HeapNumber value.
+    var_finc_value.Bind(LoadHeapNumberValue(value));
+    Goto(&do_finc);
+  }
+
+  Bind(&do_finc);
+  {
+    Node* finc_value = var_finc_value.value();
+    Node* one = Float64Constant(1.0);
+    Node* finc_result = Float64Add(finc_value, one);
+    var_result.Bind(AllocateHeapNumberWithValue(finc_result));
+    Goto(&end);
+  }
+
+  Bind(&end);
+  return var_result.value();
+}
+
+compiler::Node* CodeStubAssembler::CreateArrayIterator(
+    compiler::Node* array, compiler::Node* array_map,
+    compiler::Node* array_type, compiler::Node* context, IterationKind mode) {
+  int kBaseMapIndex = 0;
+  switch (mode) {
+    case IterationKind::kKeys:
+      kBaseMapIndex = Context::TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX;
+      break;
+    case IterationKind::kValues:
+      kBaseMapIndex = Context::UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX;
+      break;
+    case IterationKind::kEntries:
+      kBaseMapIndex = Context::UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX;
+      break;
+  }
+
+  // Fast Array iterator map index:
+  // (kBaseIndex + kFastIteratorOffset) + ElementsKind (for JSArrays)
+  // kBaseIndex + (ElementsKind - UINT8_ELEMENTS) (for JSTypedArrays)
+  const int kFastIteratorOffset =
+      Context::FAST_SMI_ARRAY_VALUE_ITERATOR_MAP_INDEX -
+      Context::UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX;
+  STATIC_ASSERT(kFastIteratorOffset ==
+                (Context::FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX -
+                 Context::UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX));
+
+  // Slow Array iterator map index: (kBaseIndex + kSlowIteratorOffset)
+  const int kSlowIteratorOffset =
+      Context::GENERIC_ARRAY_VALUE_ITERATOR_MAP_INDEX -
+      Context::UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX;
+  STATIC_ASSERT(kSlowIteratorOffset ==
+                (Context::GENERIC_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX -
+                 Context::UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX));
+
+  // Assert: Type(array) is Object
+  CSA_ASSERT(this, IsJSReceiverInstanceType(array_type));
+
+  Variable var_result(this, MachineRepresentation::kTagged);
+  Variable var_map_index(this, MachineType::PointerRepresentation());
+  Variable var_array_map(this, MachineRepresentation::kTagged);
+
+  Label return_result(this);
+  Label allocate_iterator(this);
+
+  if (mode == IterationKind::kKeys) {
+    // There are only two key iterator maps, branch depending on whether or not
+    // the receiver is a TypedArray or not.
+
+    Label if_istypedarray(this), if_isgeneric(this);
+
+    Branch(Word32Equal(array_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
+           &if_istypedarray, &if_isgeneric);
+
+    Bind(&if_isgeneric);
+    {
+      Label if_isfast(this), if_isslow(this);
+      BranchIfFastJSArray(array, context, &if_isfast, &if_isslow);
+
+      Bind(&if_isfast);
+      {
+        var_map_index.Bind(
+            IntPtrConstant(Context::FAST_ARRAY_KEY_ITERATOR_MAP_INDEX));
+        var_array_map.Bind(array_map);
+        Goto(&allocate_iterator);
+      }
+
+      Bind(&if_isslow);
+      {
+        var_map_index.Bind(
+            IntPtrConstant(Context::GENERIC_ARRAY_KEY_ITERATOR_MAP_INDEX));
+        var_array_map.Bind(UndefinedConstant());
+        Goto(&allocate_iterator);
+      }
+    }
+
+    Bind(&if_istypedarray);
+    {
+      var_map_index.Bind(
+          IntPtrConstant(Context::TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX));
+      var_array_map.Bind(UndefinedConstant());
+      Goto(&allocate_iterator);
+    }
+  } else {
+    Label if_istypedarray(this), if_isgeneric(this);
+    Branch(Word32Equal(array_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
+           &if_istypedarray, &if_isgeneric);
+
+    Bind(&if_isgeneric);
+    {
+      Label if_isfast(this), if_isslow(this);
+      BranchIfFastJSArray(array, context, &if_isfast, &if_isslow);
+
+      Bind(&if_isfast);
+      {
+        Label if_ispacked(this), if_isholey(this);
+        Node* elements_kind = LoadMapElementsKind(array_map);
+        Branch(IsHoleyFastElementsKind(elements_kind), &if_isholey,
+               &if_ispacked);
+
+        Bind(&if_isholey);
+        {
+          // Fast holey JSArrays can treat the hole as undefined if the
+          // protector cell is valid, and the prototype chain is unchanged from
+          // its initial state (because the protector cell is only tracked for
+          // initial the Array and Object prototypes). Check these conditions
+          // here, and take the slow path if any fail.
+          Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
+          DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
+          GotoUnless(
+              WordEqual(
+                  LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+                  SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
+              &if_isslow);
+
+          Node* native_context = LoadNativeContext(context);
+
+          Node* prototype = LoadMapPrototype(array_map);
+          Node* array_prototype = LoadContextElement(
+              native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
+          GotoUnless(WordEqual(prototype, array_prototype), &if_isslow);
+
+          Node* map = LoadMap(prototype);
+          prototype = LoadMapPrototype(map);
+          Node* object_prototype = LoadContextElement(
+              native_context, Context::INITIAL_OBJECT_PROTOTYPE_INDEX);
+          GotoUnless(WordEqual(prototype, object_prototype), &if_isslow);
+
+          map = LoadMap(prototype);
+          prototype = LoadMapPrototype(map);
+          Branch(IsNull(prototype), &if_ispacked, &if_isslow);
+        }
+        Bind(&if_ispacked);
+        {
+          Node* map_index =
+              IntPtrAdd(IntPtrConstant(kBaseMapIndex + kFastIteratorOffset),
+                        LoadMapElementsKind(array_map));
+          CSA_ASSERT(this, IntPtrGreaterThanOrEqual(
+                               map_index, IntPtrConstant(kBaseMapIndex +
+                                                         kFastIteratorOffset)));
+          CSA_ASSERT(this, IntPtrLessThan(map_index,
+                                          IntPtrConstant(kBaseMapIndex +
+                                                         kSlowIteratorOffset)));
+
+          var_map_index.Bind(map_index);
+          var_array_map.Bind(array_map);
+          Goto(&allocate_iterator);
+        }
+      }
+
+      Bind(&if_isslow);
+      {
+        Node* map_index = IntPtrAdd(IntPtrConstant(kBaseMapIndex),
+                                    IntPtrConstant(kSlowIteratorOffset));
+        var_map_index.Bind(map_index);
+        var_array_map.Bind(UndefinedConstant());
+        Goto(&allocate_iterator);
+      }
+    }
+
+    Bind(&if_istypedarray);
+    {
+      Node* map_index =
+          IntPtrAdd(IntPtrConstant(kBaseMapIndex - UINT8_ELEMENTS),
+                    LoadMapElementsKind(array_map));
+      CSA_ASSERT(
+          this, IntPtrLessThan(map_index, IntPtrConstant(kBaseMapIndex +
+                                                         kFastIteratorOffset)));
+      CSA_ASSERT(this, IntPtrGreaterThanOrEqual(map_index,
+                                                IntPtrConstant(kBaseMapIndex)));
+      var_map_index.Bind(map_index);
+      var_array_map.Bind(UndefinedConstant());
+      Goto(&allocate_iterator);
+    }
+  }
+
+  Bind(&allocate_iterator);
+  {
+    Node* map =
+        LoadFixedArrayElement(LoadNativeContext(context), var_map_index.value(),
+                              0, CodeStubAssembler::INTPTR_PARAMETERS);
+    var_result.Bind(AllocateJSArrayIterator(array, var_array_map.value(), map));
+    Goto(&return_result);
+  }
+
+  Bind(&return_result);
+  return var_result.value();
+}
+
+compiler::Node* CodeStubAssembler::AllocateJSArrayIterator(
+    compiler::Node* array, compiler::Node* array_map, compiler::Node* map) {
+  Node* iterator = Allocate(JSArrayIterator::kSize);
+  StoreMapNoWriteBarrier(iterator, map);
+  StoreObjectFieldRoot(iterator, JSArrayIterator::kPropertiesOffset,
+                       Heap::kEmptyFixedArrayRootIndex);
+  StoreObjectFieldRoot(iterator, JSArrayIterator::kElementsOffset,
+                       Heap::kEmptyFixedArrayRootIndex);
+  StoreObjectFieldNoWriteBarrier(iterator,
+                                 JSArrayIterator::kIteratedObjectOffset, array);
+  StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
+                                 SmiConstant(Smi::FromInt(0)));
+  StoreObjectFieldNoWriteBarrier(
+      iterator, JSArrayIterator::kIteratedObjectMapOffset, array_map);
+  return iterator;
+}
+
+compiler::Node* CodeStubAssembler::IsDetachedBuffer(compiler::Node* buffer) {
+  CSA_ASSERT(this, HasInstanceType(buffer, JS_ARRAY_BUFFER_TYPE));
+
+  Node* buffer_bit_field = LoadObjectField(
+      buffer, JSArrayBuffer::kBitFieldOffset, MachineType::Uint32());
+  Node* was_neutered_mask = Int32Constant(JSArrayBuffer::WasNeutered::kMask);
+
+  return Word32NotEqual(Word32And(buffer_bit_field, was_neutered_mask),
+                        Int32Constant(0));
+}
+
+CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler,
+                                     compiler::Node* argc,
+                                     CodeStubAssembler::ParameterMode mode)
+    : assembler_(assembler),
+      argc_(argc),
+      arguments_(nullptr),
+      fp_(assembler->LoadFramePointer()) {
+  compiler::Node* offset = assembler->ElementOffsetFromIndex(
+      argc_, FAST_ELEMENTS, mode,
+      (StandardFrameConstants::kFixedSlotCountAboveFp - 1) * kPointerSize);
+  arguments_ = assembler_->IntPtrAddFoldConstants(fp_, offset);
+  if (mode == CodeStubAssembler::INTEGER_PARAMETERS) {
+    argc_ = assembler->ChangeInt32ToIntPtr(argc_);
+  } else if (mode == CodeStubAssembler::SMI_PARAMETERS) {
+    argc_ = assembler->SmiUntag(argc_);
+  }
+}
+
+compiler::Node* CodeStubArguments::GetReceiver() {
+  return assembler_->Load(MachineType::AnyTagged(), arguments_,
+                          assembler_->IntPtrConstant(kPointerSize));
+}
+
+compiler::Node* CodeStubArguments::AtIndex(
+    compiler::Node* index, CodeStubAssembler::ParameterMode mode) {
+  typedef compiler::Node Node;
+  Node* negated_index = assembler_->IntPtrSubFoldConstants(
+      assembler_->IntPtrOrSmiConstant(0, mode), index);
+  Node* offset =
+      assembler_->ElementOffsetFromIndex(negated_index, FAST_ELEMENTS, mode, 0);
+  return assembler_->Load(MachineType::AnyTagged(), arguments_, offset);
+}
+
+compiler::Node* CodeStubArguments::AtIndex(int index) {
+  return AtIndex(assembler_->IntPtrConstant(index));
+}
+
+void CodeStubArguments::ForEach(const CodeStubAssembler::VariableList& vars,
+                                CodeStubArguments::ForEachBodyFunction body,
+                                compiler::Node* first, compiler::Node* last,
+                                CodeStubAssembler::ParameterMode mode) {
+  assembler_->Comment("CodeStubArguments::ForEach");
+  DCHECK_IMPLIES(first == nullptr || last == nullptr,
+                 mode == CodeStubAssembler::INTPTR_PARAMETERS);
+  if (first == nullptr) {
+    first = assembler_->IntPtrOrSmiConstant(0, mode);
+  }
+  if (last == nullptr) {
+    last = argc_;
+  }
+  compiler::Node* start = assembler_->IntPtrSubFoldConstants(
+      arguments_,
+      assembler_->ElementOffsetFromIndex(first, FAST_ELEMENTS, mode));
+  compiler::Node* end = assembler_->IntPtrSubFoldConstants(
+      arguments_,
+      assembler_->ElementOffsetFromIndex(last, FAST_ELEMENTS, mode));
+  assembler_->BuildFastLoop(
+      vars, MachineType::PointerRepresentation(), start, end,
+      [body](CodeStubAssembler* assembler, compiler::Node* current) {
+        Node* arg = assembler->Load(MachineType::AnyTagged(), current);
+        body(assembler, arg);
+      },
+      -kPointerSize, CodeStubAssembler::IndexAdvanceMode::kPost);
+}
+
+void CodeStubArguments::PopAndReturn(compiler::Node* value) {
+  assembler_->PopAndReturn(
+      assembler_->IntPtrAddFoldConstants(argc_, assembler_->IntPtrConstant(1)),
+      value);
+}
+
+compiler::Node* CodeStubAssembler::IsFastElementsKind(
+    compiler::Node* elements_kind) {
+  return Uint32LessThanOrEqual(elements_kind,
+                               Int32Constant(LAST_FAST_ELEMENTS_KIND));
+}
+
+compiler::Node* CodeStubAssembler::IsHoleyFastElementsKind(
+    compiler::Node* elements_kind) {
+  CSA_ASSERT(this, IsFastElementsKind(elements_kind));
+
+  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == (FAST_SMI_ELEMENTS | 1));
+  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == (FAST_ELEMENTS | 1));
+  STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == (FAST_DOUBLE_ELEMENTS | 1));
+
+  // Check prototype chain if receiver does not have packed elements.
+  Node* holey_elements = Word32And(elements_kind, Int32Constant(1));
+  return Word32Equal(holey_elements, Int32Constant(1));
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/code-stub-assembler.h b/src/code-stub-assembler.h
index c55f48c..f8f2686 100644
--- a/src/code-stub-assembler.h
+++ b/src/code-stub-assembler.h
@@ -8,6 +8,7 @@
 #include <functional>
 
 #include "src/compiler/code-assembler.h"
+#include "src/globals.h"
 #include "src/objects.h"
 
 namespace v8 {
@@ -21,8 +22,10 @@
 
 #define HEAP_CONSTANT_LIST(V)                 \
   V(BooleanMap, BooleanMap)                   \
+  V(CodeMap, CodeMap)                         \
   V(empty_string, EmptyString)                \
   V(EmptyFixedArray, EmptyFixedArray)         \
+  V(FalseValue, False)                        \
   V(FixedArrayMap, FixedArrayMap)             \
   V(FixedCOWArrayMap, FixedCOWArrayMap)       \
   V(FixedDoubleArrayMap, FixedDoubleArrayMap) \
@@ -31,6 +34,7 @@
   V(NanValue, Nan)                            \
   V(NullValue, Null)                          \
   V(TheHoleValue, TheHole)                    \
+  V(TrueValue, True)                          \
   V(UndefinedValue, Undefined)
 
 // Provides JavaScript-specific "macro-assembler" functionality on top of the
@@ -38,7 +42,7 @@
 // it's possible to add JavaScript-specific useful CodeAssembler "macros"
 // without modifying files in the compiler directory (and requiring a review
 // from a compiler directory OWNER).
-class CodeStubAssembler : public compiler::CodeAssembler {
+class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
  public:
   // Create with CallStub linkage.
   // |result_size| specifies the number of results returned by the stub.
@@ -99,10 +103,19 @@
 
   compiler::Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
 
+  compiler::Node* IntPtrAddFoldConstants(compiler::Node* left,
+                                         compiler::Node* right);
+  compiler::Node* IntPtrSubFoldConstants(compiler::Node* left,
+                                         compiler::Node* right);
+  // Round the 32bits payload of the provided word up to the next power of two.
+  compiler::Node* IntPtrRoundUpToPowerOfTwo32(compiler::Node* value);
+  compiler::Node* IntPtrMax(compiler::Node* left, compiler::Node* right);
+
   // Float64 operations.
   compiler::Node* Float64Ceil(compiler::Node* x);
   compiler::Node* Float64Floor(compiler::Node* x);
   compiler::Node* Float64Round(compiler::Node* x);
+  compiler::Node* Float64RoundToEven(compiler::Node* x);
   compiler::Node* Float64Trunc(compiler::Node* x);
 
   // Tag a Word as a Smi value.
@@ -119,9 +132,7 @@
 
   // Smi operations.
   compiler::Node* SmiAdd(compiler::Node* a, compiler::Node* b);
-  compiler::Node* SmiAddWithOverflow(compiler::Node* a, compiler::Node* b);
   compiler::Node* SmiSub(compiler::Node* a, compiler::Node* b);
-  compiler::Node* SmiSubWithOverflow(compiler::Node* a, compiler::Node* b);
   compiler::Node* SmiEqual(compiler::Node* a, compiler::Node* b);
   compiler::Node* SmiAbove(compiler::Node* a, compiler::Node* b);
   compiler::Node* SmiAboveOrEqual(compiler::Node* a, compiler::Node* b);
@@ -135,41 +146,51 @@
   // Computes a * b for Smi inputs a and b; result is not necessarily a Smi.
   compiler::Node* SmiMul(compiler::Node* a, compiler::Node* b);
   compiler::Node* SmiOr(compiler::Node* a, compiler::Node* b) {
-    return WordOr(a, b);
+    return BitcastWordToTaggedSigned(
+        WordOr(BitcastTaggedToWord(a), BitcastTaggedToWord(b)));
   }
 
+  // Smi | HeapNumber operations.
+  compiler::Node* NumberInc(compiler::Node* value);
+
   // Allocate an object of the given size.
   compiler::Node* Allocate(compiler::Node* size, AllocationFlags flags = kNone);
   compiler::Node* Allocate(int size, AllocationFlags flags = kNone);
   compiler::Node* InnerAllocate(compiler::Node* previous, int offset);
   compiler::Node* InnerAllocate(compiler::Node* previous,
                                 compiler::Node* offset);
+  compiler::Node* IsRegularHeapObjectSize(compiler::Node* size);
 
-  void Assert(compiler::Node* condition);
+  typedef std::function<compiler::Node*()> ConditionBody;
+  void Assert(ConditionBody condition_body, const char* string = nullptr,
+              const char* file = nullptr, int line = 0);
 
   // Check a value for smi-ness
-  compiler::Node* WordIsSmi(compiler::Node* a);
+  compiler::Node* TaggedIsSmi(compiler::Node* a);
   // Check that the value is a non-negative smi.
   compiler::Node* WordIsPositiveSmi(compiler::Node* a);
+  // Check that a word has a word-aligned address.
+  compiler::Node* WordIsWordAligned(compiler::Node* word);
+  compiler::Node* WordIsPowerOfTwo(compiler::Node* value);
 
   void BranchIfSmiEqual(compiler::Node* a, compiler::Node* b, Label* if_true,
                         Label* if_false) {
-    BranchIf(SmiEqual(a, b), if_true, if_false);
+    Branch(SmiEqual(a, b), if_true, if_false);
   }
 
   void BranchIfSmiLessThan(compiler::Node* a, compiler::Node* b, Label* if_true,
                            Label* if_false) {
-    BranchIf(SmiLessThan(a, b), if_true, if_false);
+    Branch(SmiLessThan(a, b), if_true, if_false);
   }
 
   void BranchIfSmiLessThanOrEqual(compiler::Node* a, compiler::Node* b,
                                   Label* if_true, Label* if_false) {
-    BranchIf(SmiLessThanOrEqual(a, b), if_true, if_false);
+    Branch(SmiLessThanOrEqual(a, b), if_true, if_false);
   }
 
   void BranchIfFloat64IsNaN(compiler::Node* value, Label* if_true,
                             Label* if_false) {
-    BranchIfFloat64Equal(value, value, if_false, if_true);
+    Branch(Float64Equal(value, value), if_false, if_true);
   }
 
   // Branches to {if_true} if ToBoolean applied to {value} yields true,
@@ -186,6 +207,10 @@
                          if_notequal);
   }
 
+  void BranchIfJSReceiver(compiler::Node* object, Label* if_true,
+                          Label* if_false);
+  void BranchIfJSObject(compiler::Node* object, Label* if_true,
+                        Label* if_false);
   void BranchIfFastJSArray(compiler::Node* object, compiler::Node* context,
                            Label* if_true, Label* if_false);
 
@@ -221,8 +246,8 @@
   compiler::Node* LoadMap(compiler::Node* object);
   // Load the instance type of an HeapObject.
   compiler::Node* LoadInstanceType(compiler::Node* object);
-  // Checks that given heap object has given instance type.
-  void AssertInstanceType(compiler::Node* object, InstanceType instance_type);
+  // Compare the instance the type of the object against the provided one.
+  compiler::Node* HasInstanceType(compiler::Node* object, InstanceType type);
   // Load the properties backing store of a JSObject.
   compiler::Node* LoadProperties(compiler::Node* object);
   // Load the elements backing store of a JSObject.
@@ -247,6 +272,10 @@
   compiler::Node* LoadMapDescriptors(compiler::Node* map);
   // Load the prototype of a map.
   compiler::Node* LoadMapPrototype(compiler::Node* map);
+  // Load the prototype info of a map. The result has to be checked if it is a
+  // prototype info object or not.
+  compiler::Node* LoadMapPrototypeInfo(compiler::Node* map,
+                                       Label* if_has_no_proto_info);
   // Load the instance size of a Map.
   compiler::Node* LoadMapInstanceSize(compiler::Node* map);
   // Load the inobject properties count of a Map (valid only for JSObjects).
@@ -255,6 +284,8 @@
   compiler::Node* LoadMapConstructorFunctionIndex(compiler::Node* map);
   // Load the constructor of a Map (equivalent to Map::GetConstructor()).
   compiler::Node* LoadMapConstructor(compiler::Node* map);
+  // Check if the map is set for slow properties.
+  compiler::Node* IsDictionaryMap(compiler::Node* map);
 
   // Load the hash field of a name as an uint32 value.
   compiler::Node* LoadNameHashField(compiler::Node* name);
@@ -269,6 +300,7 @@
   // Load value field of a JSValue object.
   compiler::Node* LoadJSValueValue(compiler::Node* object);
   // Load value field of a WeakCell object.
+  compiler::Node* LoadWeakCellValueUnchecked(compiler::Node* weak_cell);
   compiler::Node* LoadWeakCellValue(compiler::Node* weak_cell,
                                     Label* if_cleared = nullptr);
 
@@ -293,9 +325,20 @@
   compiler::Node* LoadDoubleWithHoleCheck(
       compiler::Node* base, compiler::Node* offset, Label* if_hole,
       MachineType machine_type = MachineType::Float64());
+  compiler::Node* LoadFixedTypedArrayElement(
+      compiler::Node* data_pointer, compiler::Node* index_node,
+      ElementsKind elements_kind,
+      ParameterMode parameter_mode = INTEGER_PARAMETERS);
 
   // Context manipulation
   compiler::Node* LoadContextElement(compiler::Node* context, int slot_index);
+  compiler::Node* LoadContextElement(compiler::Node* context,
+                                     compiler::Node* slot_index);
+  compiler::Node* StoreContextElement(compiler::Node* context, int slot_index,
+                                      compiler::Node* value);
+  compiler::Node* StoreContextElement(compiler::Node* context,
+                                      compiler::Node* slot_index,
+                                      compiler::Node* value);
   compiler::Node* LoadNativeContext(compiler::Node* context);
 
   compiler::Node* LoadJSArrayElementsMap(ElementsKind kind,
@@ -323,6 +366,14 @@
                                        Heap::RootListIndex root);
   // Store an array element to a FixedArray.
   compiler::Node* StoreFixedArrayElement(
+      compiler::Node* object, int index, compiler::Node* value,
+      WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
+      ParameterMode parameter_mode = INTEGER_PARAMETERS) {
+    return StoreFixedArrayElement(object, Int32Constant(index), value,
+                                  barrier_mode, parameter_mode);
+  }
+
+  compiler::Node* StoreFixedArrayElement(
       compiler::Node* object, compiler::Node* index, compiler::Node* value,
       WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
       ParameterMode parameter_mode = INTEGER_PARAMETERS);
@@ -331,19 +382,27 @@
       compiler::Node* object, compiler::Node* index, compiler::Node* value,
       ParameterMode parameter_mode = INTEGER_PARAMETERS);
 
+  void StoreFieldsNoWriteBarrier(compiler::Node* start_address,
+                                 compiler::Node* end_address,
+                                 compiler::Node* value);
+
   // Allocate a HeapNumber without initializing its value.
   compiler::Node* AllocateHeapNumber(MutableMode mode = IMMUTABLE);
   // Allocate a HeapNumber with a specific value.
   compiler::Node* AllocateHeapNumberWithValue(compiler::Node* value,
                                               MutableMode mode = IMMUTABLE);
   // Allocate a SeqOneByteString with the given length.
-  compiler::Node* AllocateSeqOneByteString(int length);
-  compiler::Node* AllocateSeqOneByteString(compiler::Node* context,
-                                           compiler::Node* length);
+  compiler::Node* AllocateSeqOneByteString(int length,
+                                           AllocationFlags flags = kNone);
+  compiler::Node* AllocateSeqOneByteString(
+      compiler::Node* context, compiler::Node* length,
+      ParameterMode mode = INTPTR_PARAMETERS, AllocationFlags flags = kNone);
   // Allocate a SeqTwoByteString with the given length.
-  compiler::Node* AllocateSeqTwoByteString(int length);
-  compiler::Node* AllocateSeqTwoByteString(compiler::Node* context,
-                                           compiler::Node* length);
+  compiler::Node* AllocateSeqTwoByteString(int length,
+                                           AllocationFlags flags = kNone);
+  compiler::Node* AllocateSeqTwoByteString(
+      compiler::Node* context, compiler::Node* length,
+      ParameterMode mode = INTPTR_PARAMETERS, AllocationFlags flags = kNone);
 
   // Allocate a SlicedOneByteString with the given length, parent and offset.
   // |length| and |offset| are expected to be tagged.
@@ -356,6 +415,27 @@
                                               compiler::Node* parent,
                                               compiler::Node* offset);
 
+  // Allocate a one-byte ConsString with the given length, first and second
+  // parts. |length| is expected to be tagged, and |first| and |second| are
+  // expected to be one-byte strings.
+  compiler::Node* AllocateOneByteConsString(compiler::Node* length,
+                                            compiler::Node* first,
+                                            compiler::Node* second,
+                                            AllocationFlags flags = kNone);
+  // Allocate a two-byte ConsString with the given length, first and second
+  // parts. |length| is expected to be tagged, and |first| and |second| are
+  // expected to be two-byte strings.
+  compiler::Node* AllocateTwoByteConsString(compiler::Node* length,
+                                            compiler::Node* first,
+                                            compiler::Node* second,
+                                            AllocationFlags flags = kNone);
+
+  // Allocate an appropriate one- or two-byte ConsString with the first and
+  // second parts specified by |first| and |second|.
+  compiler::Node* NewConsString(compiler::Node* context, compiler::Node* length,
+                                compiler::Node* left, compiler::Node* right,
+                                AllocationFlags flags = kNone);
+
   // Allocate a RegExpResult with the given length (the number of captures,
   // including the match itself), index (the index where the match starts),
   // and input string. |length| and |index| are expected to be tagged, and
@@ -365,6 +445,22 @@
                                        compiler::Node* index,
                                        compiler::Node* input);
 
+  compiler::Node* AllocateNameDictionary(int capacity);
+  compiler::Node* AllocateNameDictionary(compiler::Node* capacity);
+
+  compiler::Node* AllocateJSObjectFromMap(compiler::Node* map,
+                                          compiler::Node* properties = nullptr,
+                                          compiler::Node* elements = nullptr);
+
+  void InitializeJSObjectFromMap(compiler::Node* object, compiler::Node* map,
+                                 compiler::Node* size,
+                                 compiler::Node* properties = nullptr,
+                                 compiler::Node* elements = nullptr);
+
+  void InitializeJSObjectBody(compiler::Node* object, compiler::Node* map,
+                              compiler::Node* size,
+                              int start_offset = JSObject::kHeaderSize);
+
   // Allocate a JSArray without elements and initialize the header fields.
   compiler::Node* AllocateUninitializedJSArrayWithoutElements(
       ElementsKind kind, compiler::Node* array_map, compiler::Node* length,
@@ -389,6 +485,17 @@
                                      ParameterMode mode = INTEGER_PARAMETERS,
                                      AllocationFlags flags = kNone);
 
+  // Perform CreateArrayIterator (ES6 #sec-createarrayiterator).
+  compiler::Node* CreateArrayIterator(compiler::Node* array,
+                                      compiler::Node* array_map,
+                                      compiler::Node* array_type,
+                                      compiler::Node* context,
+                                      IterationKind mode);
+
+  compiler::Node* AllocateJSArrayIterator(compiler::Node* array,
+                                          compiler::Node* array_map,
+                                          compiler::Node* map);
+
   void FillFixedArrayWithValue(ElementsKind kind, compiler::Node* array,
                                compiler::Node* from_index,
                                compiler::Node* to_index,
@@ -416,14 +523,20 @@
       ParameterMode mode = INTEGER_PARAMETERS);
 
   // Copies |character_count| elements from |from_string| to |to_string|
-  // starting at the |from_index|'th character. |from_index| and
-  // |character_count| must be Smis s.t.
-  // 0 <= |from_index| <= |from_index| + |character_count| < from_string.length.
+  // starting at the |from_index|'th character. |from_string| and |to_string|
+  // can either be one-byte strings or two-byte strings, although if
+  // |from_string| is two-byte, then |to_string| must be two-byte.
+  // |from_index|, |to_index| and |character_count| must be either Smis or
+  // intptr_ts depending on |mode| s.t. 0 <= |from_index| <= |from_index| +
+  // |character_count| <= from_string.length and 0 <= |to_index| <= |to_index| +
+  // |character_count| <= to_string.length.
   void CopyStringCharacters(compiler::Node* from_string,
                             compiler::Node* to_string,
                             compiler::Node* from_index,
+                            compiler::Node* to_index,
                             compiler::Node* character_count,
-                            String::Encoding encoding);
+                            String::Encoding from_encoding,
+                            String::Encoding to_encoding, ParameterMode mode);
 
   // Loads an element from |array| of |from_kind| elements by given |offset|
   // (NOTE: not index!), does a hole check if |if_hole| is provided and
@@ -466,6 +579,8 @@
                                    int base_allocation_size,
                                    compiler::Node* allocation_site);
 
+  compiler::Node* TryTaggedToFloat64(compiler::Node* value,
+                                     Label* if_valueisnotnumber);
   compiler::Node* TruncateTaggedToFloat64(compiler::Node* context,
                                           compiler::Node* value);
   compiler::Node* TruncateTaggedToWord32(compiler::Node* context,
@@ -498,8 +613,31 @@
                                          char const* method_name);
 
   // Type checks.
+  // Check whether the map is for an object with special properties, such as a
+  // JSProxy or an object with interceptors.
+  compiler::Node* IsSpecialReceiverMap(compiler::Node* map);
+  compiler::Node* IsSpecialReceiverInstanceType(compiler::Node* instance_type);
   compiler::Node* IsStringInstanceType(compiler::Node* instance_type);
+  compiler::Node* IsString(compiler::Node* object);
+  compiler::Node* IsJSObject(compiler::Node* object);
+  compiler::Node* IsJSGlobalProxy(compiler::Node* object);
   compiler::Node* IsJSReceiverInstanceType(compiler::Node* instance_type);
+  compiler::Node* IsJSReceiver(compiler::Node* object);
+  compiler::Node* IsMap(compiler::Node* object);
+  compiler::Node* IsCallableMap(compiler::Node* map);
+  compiler::Node* IsName(compiler::Node* object);
+  compiler::Node* IsJSValue(compiler::Node* object);
+  compiler::Node* IsJSArray(compiler::Node* object);
+  compiler::Node* IsNativeContext(compiler::Node* object);
+  compiler::Node* IsWeakCell(compiler::Node* object);
+  compiler::Node* IsFixedDoubleArray(compiler::Node* object);
+  compiler::Node* IsHashTable(compiler::Node* object);
+  compiler::Node* IsDictionary(compiler::Node* object);
+  compiler::Node* IsUnseededNumberDictionary(compiler::Node* object);
+
+  // ElementsKind helpers:
+  compiler::Node* IsFastElementsKind(compiler::Node* elements_kind);
+  compiler::Node* IsHoleyFastElementsKind(compiler::Node* elements_kind);
 
   // String helpers.
   // Load a character from a String (might flatten a ConsString).
@@ -512,6 +650,20 @@
   compiler::Node* SubString(compiler::Node* context, compiler::Node* string,
                             compiler::Node* from, compiler::Node* to);
 
+  // Return a new string object produced by concatenating |first| with |second|.
+  compiler::Node* StringAdd(compiler::Node* context, compiler::Node* first,
+                            compiler::Node* second,
+                            AllocationFlags flags = kNone);
+
+  // Return the first index >= {from} at which {needle_char} was found in
+  // {string}, or -1 if such an index does not exist. The returned value is
+  // a Smi, {string} is expected to be a String, {needle_char} is an intptr,
+  // and {from} is expected to be tagged.
+  compiler::Node* StringIndexOfChar(compiler::Node* context,
+                                    compiler::Node* string,
+                                    compiler::Node* needle_char,
+                                    compiler::Node* from);
+
   compiler::Node* StringFromCodePoint(compiler::Node* codepoint,
                                       UnicodeEncoding encoding);
 
@@ -519,6 +671,8 @@
   // Convert a String to a Number.
   compiler::Node* StringToNumber(compiler::Node* context,
                                  compiler::Node* input);
+  compiler::Node* NumberToString(compiler::Node* context,
+                                 compiler::Node* input);
   // Convert an object to a name.
   compiler::Node* ToName(compiler::Node* context, compiler::Node* input);
   // Convert a Non-Number object to a Number.
@@ -527,6 +681,16 @@
   // Convert any object to a Number.
   compiler::Node* ToNumber(compiler::Node* context, compiler::Node* input);
 
+  // Convert any object to a String.
+  compiler::Node* ToString(compiler::Node* context, compiler::Node* input);
+
+  // Convert any object to a Primitive.
+  compiler::Node* JSReceiverToPrimitive(compiler::Node* context,
+                                        compiler::Node* input);
+
+  // Convert a String to a flat String.
+  compiler::Node* FlattenString(compiler::Node* string);
+
   enum ToIntegerTruncationMode {
     kNoTruncation,
     kTruncateMinusZero,
@@ -539,20 +703,50 @@
   // Returns a node that contains a decoded (unsigned!) value of a bit
   // field |T| in |word32|. Returns result as an uint32 node.
   template <typename T>
-  compiler::Node* BitFieldDecode(compiler::Node* word32) {
-    return BitFieldDecode(word32, T::kShift, T::kMask);
+  compiler::Node* DecodeWord32(compiler::Node* word32) {
+    return DecodeWord32(word32, T::kShift, T::kMask);
+  }
+
+  // Returns a node that contains a decoded (unsigned!) value of a bit
+  // field |T| in |word|. Returns result as a word-size node.
+  template <typename T>
+  compiler::Node* DecodeWord(compiler::Node* word) {
+    return DecodeWord(word, T::kShift, T::kMask);
   }
 
   // Returns a node that contains a decoded (unsigned!) value of a bit
   // field |T| in |word32|. Returns result as a word-size node.
   template <typename T>
-  compiler::Node* BitFieldDecodeWord(compiler::Node* word32) {
-    return ChangeUint32ToWord(BitFieldDecode<T>(word32));
+  compiler::Node* DecodeWordFromWord32(compiler::Node* word32) {
+    return DecodeWord<T>(ChangeUint32ToWord(word32));
   }
 
   // Decodes an unsigned (!) value from |word32| to an uint32 node.
-  compiler::Node* BitFieldDecode(compiler::Node* word32, uint32_t shift,
-                                 uint32_t mask);
+  compiler::Node* DecodeWord32(compiler::Node* word32, uint32_t shift,
+                               uint32_t mask);
+
+  // Decodes an unsigned (!) value from |word| to a word-size node.
+  compiler::Node* DecodeWord(compiler::Node* word, uint32_t shift,
+                             uint32_t mask);
+
+  // Returns true if any of the |T|'s bits in given |word32| are set.
+  template <typename T>
+  compiler::Node* IsSetWord32(compiler::Node* word32) {
+    return IsSetWord32(word32, T::kMask);
+  }
+
+  // Returns true if any of the mask's bits in given |word32| are set.
+  compiler::Node* IsSetWord32(compiler::Node* word32, uint32_t mask) {
+    return Word32NotEqual(Word32And(word32, Int32Constant(mask)),
+                          Int32Constant(0));
+  }
+
+  // Returns true if any of the |T|'s bits in given |word| are set.
+  template <typename T>
+  compiler::Node* IsSetWord(compiler::Node* word) {
+    return WordNotEqual(WordAnd(word, IntPtrConstant(T::kMask)),
+                        IntPtrConstant(0));
+  }
 
   void SetCounter(StatsCounter* counter, int value);
   void IncrementCounter(StatsCounter* counter, int delta);
@@ -575,6 +769,8 @@
   compiler::Node* EntryToIndex(compiler::Node* entry) {
     return EntryToIndex<Dictionary>(entry, Dictionary::kEntryKeyIndex);
   }
+  // Calculate a valid size for the a hash table.
+  compiler::Node* HashTableComputeCapacity(compiler::Node* at_least_space_for);
 
   // Looks up an entry in a NameDictionaryBase successor. If the entry is found
   // control goes to {if_found} and {var_name_index} contains an index of the
@@ -722,6 +918,12 @@
                              compiler::Node* feedback, Label* if_handler,
                              Variable* var_handler, Label* if_miss,
                              int unroll_count);
+  void HandleKeyedStorePolymorphicCase(compiler::Node* receiver_map,
+                                       compiler::Node* feedback,
+                                       Label* if_handler, Variable* var_handler,
+                                       Label* if_transition_handler,
+                                       Variable* var_transition_map_cell,
+                                       Label* if_miss);
 
   compiler::Node* StubCachePrimaryOffset(compiler::Node* name,
                                          compiler::Node* map);
@@ -775,7 +977,12 @@
   // Loads script context from the script context table.
   compiler::Node* LoadScriptContext(compiler::Node* context, int context_index);
 
-  compiler::Node* ClampedToUint8(compiler::Node* int32_value);
+  compiler::Node* Int32ToUint8Clamped(compiler::Node* int32_value);
+  compiler::Node* Float64ToUint8Clamped(compiler::Node* float64_value);
+
+  compiler::Node* PrepareValueForWriteToTypedArray(compiler::Node* key,
+                                                   ElementsKind elements_kind,
+                                                   Label* bailout);
 
   // Store value to an elements array with given elements kind.
   void StoreElement(compiler::Node* elements, ElementsKind kind,
@@ -800,10 +1007,12 @@
                                       ParameterMode mode, Label* bailout);
 
   void LoadIC(const LoadICParameters* p);
+  void LoadICProtoArray(const LoadICParameters* p, compiler::Node* handler);
   void LoadGlobalIC(const LoadICParameters* p);
   void KeyedLoadIC(const LoadICParameters* p);
   void KeyedLoadICGeneric(const LoadICParameters* p);
   void StoreIC(const StoreICParameters* p);
+  void KeyedStoreIC(const StoreICParameters* p, LanguageMode language_mode);
 
   void TransitionElementsKind(compiler::Node* object, compiler::Node* map,
                               ElementsKind from_kind, ElementsKind to_kind,
@@ -832,14 +1041,110 @@
   compiler::Node* CreateAllocationSiteInFeedbackVector(
       compiler::Node* feedback_vector, compiler::Node* slot);
 
+  enum class IndexAdvanceMode { kPre, kPost };
+
+  void BuildFastLoop(
+      const VariableList& var_list, MachineRepresentation index_rep,
+      compiler::Node* start_index, compiler::Node* end_index,
+      std::function<void(CodeStubAssembler* assembler, compiler::Node* index)>
+          body,
+      int increment, IndexAdvanceMode mode = IndexAdvanceMode::kPre);
+
+  void BuildFastLoop(
+      MachineRepresentation index_rep, compiler::Node* start_index,
+      compiler::Node* end_index,
+      std::function<void(CodeStubAssembler* assembler, compiler::Node* index)>
+          body,
+      int increment, IndexAdvanceMode mode = IndexAdvanceMode::kPre) {
+    BuildFastLoop(VariableList(0, zone()), index_rep, start_index, end_index,
+                  body, increment, mode);
+  }
+
+  enum class ForEachDirection { kForward, kReverse };
+
+  void BuildFastFixedArrayForEach(
+      compiler::Node* fixed_array, ElementsKind kind,
+      compiler::Node* first_element_inclusive,
+      compiler::Node* last_element_exclusive,
+      std::function<void(CodeStubAssembler* assembler,
+                         compiler::Node* fixed_array, compiler::Node* offset)>
+          body,
+      ParameterMode mode = INTPTR_PARAMETERS,
+      ForEachDirection direction = ForEachDirection::kReverse);
+
+  compiler::Node* GetArrayAllocationSize(compiler::Node* element_count,
+                                         ElementsKind kind, ParameterMode mode,
+                                         int header_size) {
+    return ElementOffsetFromIndex(element_count, kind, mode, header_size);
+  }
+
   compiler::Node* GetFixedArrayAllocationSize(compiler::Node* element_count,
                                               ElementsKind kind,
                                               ParameterMode mode) {
-    return ElementOffsetFromIndex(element_count, kind, mode,
+    return GetArrayAllocationSize(element_count, kind, mode,
                                   FixedArray::kHeaderSize);
   }
 
+  enum RelationalComparisonMode {
+    kLessThan,
+    kLessThanOrEqual,
+    kGreaterThan,
+    kGreaterThanOrEqual
+  };
+
+  compiler::Node* RelationalComparison(RelationalComparisonMode mode,
+                                       compiler::Node* lhs, compiler::Node* rhs,
+                                       compiler::Node* context);
+
+  void BranchIfNumericRelationalComparison(RelationalComparisonMode mode,
+                                           compiler::Node* lhs,
+                                           compiler::Node* rhs, Label* if_true,
+                                           Label* if_false);
+
+  void GotoUnlessNumberLessThan(compiler::Node* lhs, compiler::Node* rhs,
+                                Label* if_false);
+
+  enum ResultMode { kDontNegateResult, kNegateResult };
+
+  compiler::Node* Equal(ResultMode mode, compiler::Node* lhs,
+                        compiler::Node* rhs, compiler::Node* context);
+
+  compiler::Node* StrictEqual(ResultMode mode, compiler::Node* lhs,
+                              compiler::Node* rhs, compiler::Node* context);
+
+  // ECMA#sec-samevalue
+  // Similar to StrictEqual except that NaNs are treated as equal and minus zero
+  // differs from positive zero.
+  // Unlike Equal and StrictEqual, returns a value suitable for use in Branch
+  // instructions, e.g. Branch(SameValue(...), &label).
+  compiler::Node* SameValue(compiler::Node* lhs, compiler::Node* rhs,
+                            compiler::Node* context);
+
+  compiler::Node* HasProperty(
+      compiler::Node* object, compiler::Node* key, compiler::Node* context,
+      Runtime::FunctionId fallback_runtime_function_id = Runtime::kHasProperty);
+  compiler::Node* ForInFilter(compiler::Node* key, compiler::Node* object,
+                              compiler::Node* context);
+
+  compiler::Node* Typeof(compiler::Node* value, compiler::Node* context);
+
+  compiler::Node* InstanceOf(compiler::Node* object, compiler::Node* callable,
+                             compiler::Node* context);
+
+  // TypedArray/ArrayBuffer helpers
+  compiler::Node* IsDetachedBuffer(compiler::Node* buffer);
+
+  compiler::Node* ElementOffsetFromIndex(compiler::Node* index,
+                                         ElementsKind kind, ParameterMode mode,
+                                         int base_size = 0);
+
+ protected:
+  void HandleStoreICHandlerCase(const StoreICParameters* p,
+                                compiler::Node* handler, Label* miss);
+
  private:
+  friend class CodeStubArguments;
+
   enum ElementSupport { kOnlyProperties, kSupportElements };
 
   void DescriptorLookupLinear(compiler::Node* unique_name,
@@ -855,6 +1160,47 @@
   void HandleLoadICHandlerCase(
       const LoadICParameters* p, compiler::Node* handler, Label* miss,
       ElementSupport support_elements = kOnlyProperties);
+
+  void HandleLoadICSmiHandlerCase(const LoadICParameters* p,
+                                  compiler::Node* holder,
+                                  compiler::Node* smi_handler, Label* miss,
+                                  ElementSupport support_elements);
+
+  void HandleLoadICProtoHandler(const LoadICParameters* p,
+                                compiler::Node* handler, Variable* var_holder,
+                                Variable* var_smi_handler,
+                                Label* if_smi_handler, Label* miss);
+
+  compiler::Node* EmitLoadICProtoArrayCheck(const LoadICParameters* p,
+                                            compiler::Node* handler,
+                                            compiler::Node* handler_length,
+                                            compiler::Node* handler_flags,
+                                            Label* miss);
+
+  void CheckPrototype(compiler::Node* prototype_cell, compiler::Node* name,
+                      Label* miss);
+
+  void NameDictionaryNegativeLookup(compiler::Node* object,
+                                    compiler::Node* name, Label* miss);
+
+  // If |transition| is nullptr then the normal field store is generated or
+  // transitioning store otherwise.
+  void HandleStoreFieldAndReturn(compiler::Node* handler_word,
+                                 compiler::Node* holder,
+                                 Representation representation,
+                                 compiler::Node* value,
+                                 compiler::Node* transition, Label* miss);
+
+  // If |transition| is nullptr then the normal field store is generated or
+  // transitioning store otherwise.
+  void HandleStoreICSmiHandlerCase(compiler::Node* handler_word,
+                                   compiler::Node* holder,
+                                   compiler::Node* value,
+                                   compiler::Node* transition, Label* miss);
+
+  void HandleStoreICProtoHandler(const StoreICParameters* p,
+                                 compiler::Node* handler, Label* miss);
+
   compiler::Node* TryToIntptr(compiler::Node* key, Label* miss);
   void EmitFastElementsBoundsCheck(compiler::Node* object,
                                    compiler::Node* elements,
@@ -871,10 +1217,6 @@
                                         Label* definitely_no_elements,
                                         Label* possibly_elements);
 
-  compiler::Node* ElementOffsetFromIndex(compiler::Node* index,
-                                         ElementsKind kind, ParameterMode mode,
-                                         int base_size = 0);
-
   compiler::Node* AllocateRawAligned(compiler::Node* size_in_bytes,
                                      AllocationFlags flags,
                                      compiler::Node* top_address,
@@ -900,9 +1242,83 @@
                                            compiler::Node* value,
                                            Label* bailout);
 
+  compiler::Node* AllocateSlicedString(Heap::RootListIndex map_root_index,
+                                       compiler::Node* length,
+                                       compiler::Node* parent,
+                                       compiler::Node* offset);
+
+  compiler::Node* AllocateConsString(Heap::RootListIndex map_root_index,
+                                     compiler::Node* length,
+                                     compiler::Node* first,
+                                     compiler::Node* second,
+                                     AllocationFlags flags);
+
   static const int kElementLoopUnrollThreshold = 8;
 };
 
+class CodeStubArguments {
+ public:
+  // |argc| specifies the number of arguments passed to the builtin excluding
+  // the receiver.
+  CodeStubArguments(CodeStubAssembler* assembler, compiler::Node* argc,
+                    CodeStubAssembler::ParameterMode mode =
+                        CodeStubAssembler::INTPTR_PARAMETERS);
+
+  compiler::Node* GetReceiver();
+
+  // |index| is zero-based and does not include the receiver
+  compiler::Node* AtIndex(compiler::Node* index,
+                          CodeStubAssembler::ParameterMode mode =
+                              CodeStubAssembler::INTPTR_PARAMETERS);
+
+  compiler::Node* AtIndex(int index);
+
+  typedef std::function<void(CodeStubAssembler* assembler, compiler::Node* arg)>
+      ForEachBodyFunction;
+
+  // Iteration doesn't include the receiver. |first| and |last| are zero-based.
+  void ForEach(ForEachBodyFunction body, compiler::Node* first = nullptr,
+               compiler::Node* last = nullptr,
+               CodeStubAssembler::ParameterMode mode =
+                   CodeStubAssembler::INTPTR_PARAMETERS) {
+    CodeStubAssembler::VariableList list(0, assembler_->zone());
+    ForEach(list, body, first, last);
+  }
+
+  // Iteration doesn't include the receiver. |first| and |last| are zero-based.
+  void ForEach(const CodeStubAssembler::VariableList& vars,
+               ForEachBodyFunction body, compiler::Node* first = nullptr,
+               compiler::Node* last = nullptr,
+               CodeStubAssembler::ParameterMode mode =
+                   CodeStubAssembler::INTPTR_PARAMETERS);
+
+  void PopAndReturn(compiler::Node* value);
+
+ private:
+  compiler::Node* GetArguments();
+
+  CodeStubAssembler* assembler_;
+  compiler::Node* argc_;
+  compiler::Node* arguments_;
+  compiler::Node* fp_;
+};
+
+#ifdef DEBUG
+#define CSA_ASSERT(csa, x) \
+  (csa)->Assert([&] { return (x); }, #x, __FILE__, __LINE__)
+#else
+#define CSA_ASSERT(csa, x) ((void)0)
+#endif
+
+#ifdef ENABLE_SLOW_DCHECKS
+#define CSA_SLOW_ASSERT(csa, x)                                 \
+  if (FLAG_enable_slow_asserts) {                               \
+    (csa)->Assert([&] { return (x); }, #x, __FILE__, __LINE__); \
+  }
+#else
+#define CSA_SLOW_ASSERT(csa, x) ((void)0)
+#endif
+
 DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags);
 
 }  // namespace internal
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index a294d56..790f687 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -249,7 +249,7 @@
 Handle<Code> HydrogenCodeStub::GenerateRuntimeTailCall(
     CodeStubDescriptor* descriptor) {
   const char* name = CodeStub::MajorName(MajorKey());
-  Zone zone(isolate()->allocator());
+  Zone zone(isolate()->allocator(), ZONE_NAME);
   CallInterfaceDescriptor interface_descriptor(GetCallInterfaceDescriptor());
   CodeStubAssembler assembler(isolate(), &zone, interface_descriptor,
                               GetCodeFlags(), name);
@@ -307,7 +307,7 @@
   if (FLAG_profile_hydrogen_code_stub_compilation) {
     timer.Start();
   }
-  Zone zone(isolate->allocator());
+  Zone zone(isolate->allocator(), ZONE_NAME);
   CompilationInfo info(CStrVector(CodeStub::MajorName(stub->MajorKey())),
                        isolate, &zone, stub->GetCodeFlags());
   // Parameter count is number of stack parameters.
@@ -328,18 +328,6 @@
 }
 
 
-template <>
-HValue* CodeStubGraphBuilder<NumberToStringStub>::BuildCodeStub() {
-  info()->MarkAsSavesCallerDoubles();
-  HValue* number = GetParameter(Descriptor::kArgument);
-  return BuildNumberToString(number, AstType::Number());
-}
-
-
-Handle<Code> NumberToStringStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
 HValue* CodeStubGraphBuilderBase::BuildPushElement(HValue* object, HValue* argc,
                                                    HValue* argument_elements,
                                                    ElementsKind kind) {
@@ -1043,7 +1031,7 @@
       }
       if_inputisprimitive.End();
       // Convert the primitive to a string value.
-      HValue* values[] = {context(), Pop()};
+      HValue* values[] = {Pop()};
       Callable toString = CodeFactory::ToString(isolate());
       Push(AddUncasted<HCallWithDescriptor>(Add<HConstant>(toString.code()), 0,
                                             toString.descriptor(),
@@ -1132,39 +1120,11 @@
   return Pop();
 }
 
-
-template <>
-HValue* CodeStubGraphBuilder<StringAddStub>::BuildCodeInitializedStub() {
-  StringAddStub* stub = casted_stub();
-  StringAddFlags flags = stub->flags();
-  PretenureFlag pretenure_flag = stub->pretenure_flag();
-
-  HValue* left = GetParameter(Descriptor::kLeft);
-  HValue* right = GetParameter(Descriptor::kRight);
-
-  // Make sure that both arguments are strings if not known in advance.
-  if ((flags & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
-    left =
-        BuildToString(left, (flags & STRING_ADD_CONVERT) == STRING_ADD_CONVERT);
-  }
-  if ((flags & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
-    right = BuildToString(right,
-                          (flags & STRING_ADD_CONVERT) == STRING_ADD_CONVERT);
-  }
-
-  return BuildStringAdd(left, right, HAllocationMode(pretenure_flag));
-}
-
-
-Handle<Code> StringAddStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
 template <>
 HValue* CodeStubGraphBuilder<ToBooleanICStub>::BuildCodeInitializedStub() {
   ToBooleanICStub* stub = casted_stub();
   IfBuilder if_true(this);
-  if_true.If<HBranch>(GetParameter(Descriptor::kArgument), stub->types());
+  if_true.If<HBranch>(GetParameter(Descriptor::kArgument), stub->hints());
   if_true.Then();
   if_true.Return(graph()->GetConstantTrue());
   if_true.Else();
@@ -1193,276 +1153,5 @@
   return DoGenerateCode(this);
 }
 
-
-template<>
-HValue* CodeStubGraphBuilder<RegExpConstructResultStub>::BuildCodeStub() {
-  // Determine the parameters.
-  HValue* length = GetParameter(Descriptor::kLength);
-  HValue* index = GetParameter(Descriptor::kIndex);
-  HValue* input = GetParameter(Descriptor::kInput);
-
-  // TODO(turbofan): This codestub has regressed to need a frame on ia32 at some
-  // point and wasn't caught since it wasn't built in the snapshot. We should
-  // probably just replace with a TurboFan stub rather than fixing it.
-#if !(V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87)
-  info()->MarkMustNotHaveEagerFrame();
-#endif
-
-  return BuildRegExpConstructResult(length, index, input);
-}
-
-
-Handle<Code> RegExpConstructResultStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
-template <>
-class CodeStubGraphBuilder<KeyedLoadGenericStub>
-    : public CodeStubGraphBuilderBase {
- public:
-  explicit CodeStubGraphBuilder(CompilationInfo* info, CodeStub* stub)
-      : CodeStubGraphBuilderBase(info, stub) {}
-
-  typedef KeyedLoadGenericStub::Descriptor Descriptor;
-
- protected:
-  virtual HValue* BuildCodeStub();
-
-  void BuildElementsKindLimitCheck(HGraphBuilder::IfBuilder* if_builder,
-                                   HValue* bit_field2,
-                                   ElementsKind kind);
-
-  void BuildFastElementLoad(HGraphBuilder::IfBuilder* if_builder,
-                            HValue* receiver,
-                            HValue* key,
-                            HValue* instance_type,
-                            HValue* bit_field2,
-                            ElementsKind kind);
-
-  KeyedLoadGenericStub* casted_stub() {
-    return static_cast<KeyedLoadGenericStub*>(stub());
-  }
-};
-
-
-void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildElementsKindLimitCheck(
-    HGraphBuilder::IfBuilder* if_builder, HValue* bit_field2,
-    ElementsKind kind) {
-  ElementsKind next_kind = static_cast<ElementsKind>(kind + 1);
-  HValue* kind_limit = Add<HConstant>(
-      static_cast<int>(Map::ElementsKindBits::encode(next_kind)));
-
-  if_builder->If<HCompareNumericAndBranch>(bit_field2, kind_limit, Token::LT);
-  if_builder->Then();
-}
-
-
-void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildFastElementLoad(
-    HGraphBuilder::IfBuilder* if_builder, HValue* receiver, HValue* key,
-    HValue* instance_type, HValue* bit_field2, ElementsKind kind) {
-  BuildElementsKindLimitCheck(if_builder, bit_field2, kind);
-
-  IfBuilder js_array_check(this);
-  js_array_check.If<HCompareNumericAndBranch>(
-      instance_type, Add<HConstant>(JS_ARRAY_TYPE), Token::EQ);
-  js_array_check.Then();
-  Push(BuildUncheckedMonomorphicElementAccess(receiver, key, NULL,
-                                              true, kind,
-                                              LOAD, NEVER_RETURN_HOLE,
-                                              STANDARD_STORE));
-  js_array_check.Else();
-  Push(BuildUncheckedMonomorphicElementAccess(receiver, key, NULL,
-                                              false, kind,
-                                              LOAD, NEVER_RETURN_HOLE,
-                                              STANDARD_STORE));
-  js_array_check.End();
-}
-
-
-HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
-  HValue* receiver = GetParameter(Descriptor::kReceiver);
-  HValue* key = GetParameter(Descriptor::kName);
-  // Split into a smi/integer case and unique string case.
-  HIfContinuation index_name_split_continuation(graph()->CreateBasicBlock(),
-                                                graph()->CreateBasicBlock());
-
-  BuildKeyedIndexCheck(key, &index_name_split_continuation);
-
-  IfBuilder index_name_split(this, &index_name_split_continuation);
-  index_name_split.Then();
-  {
-    // Key is an index (number)
-    key = Pop();
-
-    int bit_field_mask = (1 << Map::kIsAccessCheckNeeded) |
-      (1 << Map::kHasIndexedInterceptor);
-    BuildJSObjectCheck(receiver, bit_field_mask);
-
-    HValue* map =
-        Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
-
-    HValue* instance_type =
-        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
-
-    HValue* bit_field2 =
-        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField2());
-
-    IfBuilder kind_if(this);
-    BuildFastElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
-                         FAST_HOLEY_ELEMENTS);
-
-    kind_if.Else();
-    {
-      BuildFastElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
-                           FAST_HOLEY_DOUBLE_ELEMENTS);
-    }
-    kind_if.Else();
-
-    // The DICTIONARY_ELEMENTS check generates a "kind_if.Then"
-    BuildElementsKindLimitCheck(&kind_if, bit_field2, DICTIONARY_ELEMENTS);
-    {
-      HValue* elements = AddLoadElements(receiver);
-
-      HValue* hash = BuildElementIndexHash(key);
-
-      Push(BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash));
-    }
-    kind_if.Else();
-
-    // The SLOW_SLOPPY_ARGUMENTS_ELEMENTS check generates a "kind_if.Then"
-    STATIC_ASSERT(FAST_SLOPPY_ARGUMENTS_ELEMENTS <
-                  SLOW_SLOPPY_ARGUMENTS_ELEMENTS);
-    BuildElementsKindLimitCheck(&kind_if, bit_field2,
-                                SLOW_SLOPPY_ARGUMENTS_ELEMENTS);
-    // Non-strict elements are not handled.
-    Add<HDeoptimize>(DeoptimizeReason::kNonStrictElementsInKeyedLoadGenericStub,
-                     Deoptimizer::EAGER);
-    Push(graph()->GetConstant0());
-
-    kind_if.ElseDeopt(
-        DeoptimizeReason::kElementsKindUnhandledInKeyedLoadGenericStub);
-
-    kind_if.End();
-  }
-  index_name_split.Else();
-  {
-    // Key is a unique string.
-    key = Pop();
-
-    int bit_field_mask = (1 << Map::kIsAccessCheckNeeded) |
-        (1 << Map::kHasNamedInterceptor);
-    BuildJSObjectCheck(receiver, bit_field_mask);
-
-    HIfContinuation continuation;
-    BuildTestForDictionaryProperties(receiver, &continuation);
-    IfBuilder if_dict_properties(this, &continuation);
-    if_dict_properties.Then();
-    {
-      //  Key is string, properties are dictionary mode
-      BuildNonGlobalObjectCheck(receiver);
-
-      HValue* properties = Add<HLoadNamedField>(
-          receiver, nullptr, HObjectAccess::ForPropertiesPointer());
-
-      HValue* hash =
-          Add<HLoadNamedField>(key, nullptr, HObjectAccess::ForNameHashField());
-
-      hash = AddUncasted<HShr>(hash, Add<HConstant>(Name::kHashShift));
-
-      HValue* value =
-          BuildUncheckedDictionaryElementLoad(receiver, properties, key, hash);
-      Push(value);
-    }
-    if_dict_properties.Else();
-    {
-      // TODO(dcarney): don't use keyed lookup cache, but convert to use
-      // megamorphic stub cache.
-      UNREACHABLE();
-      //  Key is string, properties are fast mode
-      HValue* hash = BuildKeyedLookupCacheHash(receiver, key);
-
-      ExternalReference cache_keys_ref =
-          ExternalReference::keyed_lookup_cache_keys(isolate());
-      HValue* cache_keys = Add<HConstant>(cache_keys_ref);
-
-      HValue* map =
-          Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
-      HValue* base_index = AddUncasted<HMul>(hash, Add<HConstant>(2));
-      base_index->ClearFlag(HValue::kCanOverflow);
-
-      HIfContinuation inline_or_runtime_continuation(
-          graph()->CreateBasicBlock(), graph()->CreateBasicBlock());
-      {
-        IfBuilder lookup_ifs[KeyedLookupCache::kEntriesPerBucket];
-        for (int probe = 0; probe < KeyedLookupCache::kEntriesPerBucket;
-             ++probe) {
-          IfBuilder* lookup_if = &lookup_ifs[probe];
-          lookup_if->Initialize(this);
-          int probe_base = probe * KeyedLookupCache::kEntryLength;
-          HValue* map_index = AddUncasted<HAdd>(
-              base_index,
-              Add<HConstant>(probe_base + KeyedLookupCache::kMapIndex));
-          map_index->ClearFlag(HValue::kCanOverflow);
-          HValue* key_index = AddUncasted<HAdd>(
-              base_index,
-              Add<HConstant>(probe_base + KeyedLookupCache::kKeyIndex));
-          key_index->ClearFlag(HValue::kCanOverflow);
-          HValue* map_to_check =
-              Add<HLoadKeyed>(cache_keys, map_index, nullptr, nullptr,
-                              FAST_ELEMENTS, NEVER_RETURN_HOLE, 0);
-          lookup_if->If<HCompareObjectEqAndBranch>(map_to_check, map);
-          lookup_if->And();
-          HValue* key_to_check =
-              Add<HLoadKeyed>(cache_keys, key_index, nullptr, nullptr,
-                              FAST_ELEMENTS, NEVER_RETURN_HOLE, 0);
-          lookup_if->If<HCompareObjectEqAndBranch>(key_to_check, key);
-          lookup_if->Then();
-          {
-            ExternalReference cache_field_offsets_ref =
-                ExternalReference::keyed_lookup_cache_field_offsets(isolate());
-            HValue* cache_field_offsets =
-                Add<HConstant>(cache_field_offsets_ref);
-            HValue* index = AddUncasted<HAdd>(hash, Add<HConstant>(probe));
-            index->ClearFlag(HValue::kCanOverflow);
-            HValue* property_index =
-                Add<HLoadKeyed>(cache_field_offsets, index, nullptr, cache_keys,
-                                INT32_ELEMENTS, NEVER_RETURN_HOLE, 0);
-            Push(property_index);
-          }
-          lookup_if->Else();
-        }
-        for (int i = 0; i < KeyedLookupCache::kEntriesPerBucket; ++i) {
-          lookup_ifs[i].JoinContinuation(&inline_or_runtime_continuation);
-        }
-      }
-
-      IfBuilder inline_or_runtime(this, &inline_or_runtime_continuation);
-      inline_or_runtime.Then();
-      {
-        // Found a cached index, load property inline.
-        Push(Add<HLoadFieldByIndex>(receiver, Pop()));
-      }
-      inline_or_runtime.Else();
-      {
-        // KeyedLookupCache miss; call runtime.
-        Add<HPushArguments>(receiver, key);
-        Push(Add<HCallRuntime>(
-            Runtime::FunctionForId(Runtime::kKeyedGetProperty), 2));
-      }
-      inline_or_runtime.End();
-    }
-    if_dict_properties.End();
-  }
-  index_name_split.End();
-
-  return Pop();
-}
-
-
-Handle<Code> KeyedLoadGenericStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index b899943..2ee5ece 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -22,7 +22,7 @@
 
 RUNTIME_FUNCTION(UnexpectedStubMiss) {
   FATAL("Unexpected deopt of a stub");
-  return Smi::FromInt(0);
+  return Smi::kZero;
 }
 
 CodeStubDescriptor::CodeStubDescriptor(CodeStub* stub)
@@ -318,33 +318,42 @@
   }
 }
 
-
-std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags) {
-  switch (flags) {
-    case STRING_ADD_CHECK_NONE:
-      return os << "CheckNone";
-    case STRING_ADD_CHECK_LEFT:
-      return os << "CheckLeft";
-    case STRING_ADD_CHECK_RIGHT:
-      return os << "CheckRight";
-    case STRING_ADD_CHECK_BOTH:
-      return os << "CheckBoth";
-    case STRING_ADD_CONVERT_LEFT:
-      return os << "ConvertLeft";
-    case STRING_ADD_CONVERT_RIGHT:
-      return os << "ConvertRight";
-    case STRING_ADD_CONVERT:
-      break;
-  }
-  UNREACHABLE();
-  return os;
-}
-
-
 void StringAddStub::PrintBaseName(std::ostream& os) const {  // NOLINT
   os << "StringAddStub_" << flags() << "_" << pretenure_flag();
 }
 
+void StringAddStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+  Node* left = assembler->Parameter(Descriptor::kLeft);
+  Node* right = assembler->Parameter(Descriptor::kRight);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  if ((flags() & STRING_ADD_CHECK_LEFT) != 0) {
+    DCHECK((flags() & STRING_ADD_CONVERT) != 0);
+    // TODO(danno): The ToString and JSReceiverToPrimitive below could be
+    // combined to avoid duplicate smi and instance type checks.
+    left = assembler->ToString(context,
+                               assembler->JSReceiverToPrimitive(context, left));
+  }
+  if ((flags() & STRING_ADD_CHECK_RIGHT) != 0) {
+    DCHECK((flags() & STRING_ADD_CONVERT) != 0);
+    // TODO(danno): The ToString and JSReceiverToPrimitive below could be
+    // combined to avoid duplicate smi and instance type checks.
+    right = assembler->ToString(
+        context, assembler->JSReceiverToPrimitive(context, right));
+  }
+
+  if ((flags() & STRING_ADD_CHECK_BOTH) == 0) {
+    CodeStubAssembler::AllocationFlag flags =
+        (pretenure_flag() == TENURED) ? CodeStubAssembler::kPretenured
+                                      : CodeStubAssembler::kNone;
+    assembler->Return(assembler->StringAdd(context, left, right, flags));
+  } else {
+    Callable callable = CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE,
+                                               pretenure_flag());
+    assembler->TailCallStub(callable, context, left, right);
+  }
+}
 
 InlineCacheState CompareICStub::GetICState() const {
   CompareICState::State state = Max(left(), right());
@@ -411,7 +420,7 @@
 
 Handle<Code> TurboFanCodeStub::GenerateCode() {
   const char* name = CodeStub::MajorName(MajorKey());
-  Zone zone(isolate()->allocator());
+  Zone zone(isolate()->allocator(), ZONE_NAME);
   CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
   CodeStubAssembler assembler(isolate(), &zone, descriptor, GetCodeFlags(),
                               name);
@@ -419,7 +428,7 @@
   return assembler.GenerateCode();
 }
 
-void LoadICTrampolineTFStub::GenerateAssembly(
+void LoadICTrampolineStub::GenerateAssembly(
     CodeStubAssembler* assembler) const {
   typedef compiler::Node Node;
 
@@ -433,7 +442,7 @@
   assembler->LoadIC(&p);
 }
 
-void LoadICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+void LoadICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   typedef compiler::Node Node;
 
   Node* receiver = assembler->Parameter(Descriptor::kReceiver);
@@ -446,6 +455,21 @@
   assembler->LoadIC(&p);
 }
 
+void LoadICProtoArrayStub::GenerateAssembly(
+    CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* name = assembler->Parameter(Descriptor::kName);
+  Node* slot = assembler->Parameter(Descriptor::kSlot);
+  Node* vector = assembler->Parameter(Descriptor::kVector);
+  Node* handler = assembler->Parameter(Descriptor::kHandler);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
+  assembler->LoadICProtoArray(&p, handler);
+}
+
 void LoadGlobalICTrampolineStub::GenerateAssembly(
     CodeStubAssembler* assembler) const {
   typedef compiler::Node Node;
@@ -498,7 +522,7 @@
   assembler->KeyedLoadIC(&p);
 }
 
-void StoreICTrampolineTFStub::GenerateAssembly(
+void StoreICTrampolineStub::GenerateAssembly(
     CodeStubAssembler* assembler) const {
   typedef compiler::Node Node;
 
@@ -514,7 +538,7 @@
   assembler->StoreIC(&p);
 }
 
-void StoreICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+void StoreICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   typedef compiler::Node Node;
 
   Node* receiver = assembler->Parameter(Descriptor::kReceiver);
@@ -529,6 +553,37 @@
   assembler->StoreIC(&p);
 }
 
+void KeyedStoreICTrampolineTFStub::GenerateAssembly(
+    CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* name = assembler->Parameter(Descriptor::kName);
+  Node* value = assembler->Parameter(Descriptor::kValue);
+  Node* slot = assembler->Parameter(Descriptor::kSlot);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* vector = assembler->LoadTypeFeedbackVectorForStub();
+
+  CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
+                                         vector);
+  assembler->KeyedStoreIC(&p, StoreICState::GetLanguageMode(GetExtraICState()));
+}
+
+void KeyedStoreICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* name = assembler->Parameter(Descriptor::kName);
+  Node* value = assembler->Parameter(Descriptor::kValue);
+  Node* slot = assembler->Parameter(Descriptor::kSlot);
+  Node* vector = assembler->Parameter(Descriptor::kVector);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
+                                         vector);
+  assembler->KeyedStoreIC(&p, StoreICState::GetLanguageMode(GetExtraICState()));
+}
+
 void StoreMapStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   typedef compiler::Node Node;
 
@@ -662,368 +717,6 @@
 }
 
 // static
-compiler::Node* AddStub::Generate(CodeStubAssembler* assembler,
-                                  compiler::Node* left, compiler::Node* right,
-                                  compiler::Node* context) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
-
-  // Shared entry for floating point addition.
-  Label do_fadd(assembler);
-  Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
-      var_fadd_rhs(assembler, MachineRepresentation::kFloat64);
-
-  // We might need to loop several times due to ToPrimitive, ToString and/or
-  // ToNumber conversions.
-  Variable var_lhs(assembler, MachineRepresentation::kTagged),
-      var_rhs(assembler, MachineRepresentation::kTagged),
-      var_result(assembler, MachineRepresentation::kTagged);
-  Variable* loop_vars[2] = {&var_lhs, &var_rhs};
-  Label loop(assembler, 2, loop_vars), end(assembler),
-      string_add_convert_left(assembler, Label::kDeferred),
-      string_add_convert_right(assembler, Label::kDeferred);
-  var_lhs.Bind(left);
-  var_rhs.Bind(right);
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
-  {
-    // Load the current {lhs} and {rhs} values.
-    Node* lhs = var_lhs.value();
-    Node* rhs = var_rhs.value();
-
-    // Check if the {lhs} is a Smi or a HeapObject.
-    Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
-    assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
-
-    assembler->Bind(&if_lhsissmi);
-    {
-      // Check if the {rhs} is also a Smi.
-      Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-      assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
-                        &if_rhsisnotsmi);
-
-      assembler->Bind(&if_rhsissmi);
-      {
-        // Try fast Smi addition first.
-        Node* pair = assembler->SmiAddWithOverflow(lhs, rhs);
-        Node* overflow = assembler->Projection(1, pair);
-
-        // Check if the Smi additon overflowed.
-        Label if_overflow(assembler), if_notoverflow(assembler);
-        assembler->Branch(overflow, &if_overflow, &if_notoverflow);
-
-        assembler->Bind(&if_overflow);
-        {
-          var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
-          var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
-          assembler->Goto(&do_fadd);
-        }
-
-        assembler->Bind(&if_notoverflow);
-        var_result.Bind(assembler->Projection(0, pair));
-        assembler->Goto(&end);
-      }
-
-      assembler->Bind(&if_rhsisnotsmi);
-      {
-        // Load the map of {rhs}.
-        Node* rhs_map = assembler->LoadMap(rhs);
-
-        // Check if the {rhs} is a HeapNumber.
-        Label if_rhsisnumber(assembler),
-            if_rhsisnotnumber(assembler, Label::kDeferred);
-        assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
-                          &if_rhsisnotnumber);
-
-        assembler->Bind(&if_rhsisnumber);
-        {
-          var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
-          var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
-          assembler->Goto(&do_fadd);
-        }
-
-        assembler->Bind(&if_rhsisnotnumber);
-        {
-          // Load the instance type of {rhs}.
-          Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
-
-          // Check if the {rhs} is a String.
-          Label if_rhsisstring(assembler, Label::kDeferred),
-              if_rhsisnotstring(assembler, Label::kDeferred);
-          assembler->Branch(assembler->IsStringInstanceType(rhs_instance_type),
-                            &if_rhsisstring, &if_rhsisnotstring);
-
-          assembler->Bind(&if_rhsisstring);
-          {
-            var_lhs.Bind(lhs);
-            var_rhs.Bind(rhs);
-            assembler->Goto(&string_add_convert_left);
-          }
-
-          assembler->Bind(&if_rhsisnotstring);
-          {
-            // Check if {rhs} is a JSReceiver.
-            Label if_rhsisreceiver(assembler, Label::kDeferred),
-                if_rhsisnotreceiver(assembler, Label::kDeferred);
-            assembler->Branch(
-                assembler->IsJSReceiverInstanceType(rhs_instance_type),
-                &if_rhsisreceiver, &if_rhsisnotreceiver);
-
-            assembler->Bind(&if_rhsisreceiver);
-            {
-              // Convert {rhs} to a primitive first passing no hint.
-              Callable callable =
-                  CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
-              var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-              assembler->Goto(&loop);
-            }
-
-            assembler->Bind(&if_rhsisnotreceiver);
-            {
-              // Convert {rhs} to a Number first.
-              Callable callable =
-                  CodeFactory::NonNumberToNumber(assembler->isolate());
-              var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-              assembler->Goto(&loop);
-            }
-          }
-        }
-      }
-    }
-
-    assembler->Bind(&if_lhsisnotsmi);
-    {
-      // Load the map and instance type of {lhs}.
-      Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
-
-      // Check if {lhs} is a String.
-      Label if_lhsisstring(assembler), if_lhsisnotstring(assembler);
-      assembler->Branch(assembler->IsStringInstanceType(lhs_instance_type),
-                        &if_lhsisstring, &if_lhsisnotstring);
-
-      assembler->Bind(&if_lhsisstring);
-      {
-        var_lhs.Bind(lhs);
-        var_rhs.Bind(rhs);
-        assembler->Goto(&string_add_convert_right);
-      }
-
-      assembler->Bind(&if_lhsisnotstring);
-      {
-        // Check if {rhs} is a Smi.
-        Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-        assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
-                          &if_rhsisnotsmi);
-
-        assembler->Bind(&if_rhsissmi);
-        {
-          // Check if {lhs} is a Number.
-          Label if_lhsisnumber(assembler),
-              if_lhsisnotnumber(assembler, Label::kDeferred);
-          assembler->Branch(assembler->Word32Equal(
-                                lhs_instance_type,
-                                assembler->Int32Constant(HEAP_NUMBER_TYPE)),
-                            &if_lhsisnumber, &if_lhsisnotnumber);
-
-          assembler->Bind(&if_lhsisnumber);
-          {
-            // The {lhs} is a HeapNumber, the {rhs} is a Smi, just add them.
-            var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
-            var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
-            assembler->Goto(&do_fadd);
-          }
-
-          assembler->Bind(&if_lhsisnotnumber);
-          {
-            // The {lhs} is neither a Number nor a String, and the {rhs} is a
-            // Smi.
-            Label if_lhsisreceiver(assembler, Label::kDeferred),
-                if_lhsisnotreceiver(assembler, Label::kDeferred);
-            assembler->Branch(
-                assembler->IsJSReceiverInstanceType(lhs_instance_type),
-                &if_lhsisreceiver, &if_lhsisnotreceiver);
-
-            assembler->Bind(&if_lhsisreceiver);
-            {
-              // Convert {lhs} to a primitive first passing no hint.
-              Callable callable =
-                  CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
-              var_lhs.Bind(assembler->CallStub(callable, context, lhs));
-              assembler->Goto(&loop);
-            }
-
-            assembler->Bind(&if_lhsisnotreceiver);
-            {
-              // Convert {lhs} to a Number first.
-              Callable callable =
-                  CodeFactory::NonNumberToNumber(assembler->isolate());
-              var_lhs.Bind(assembler->CallStub(callable, context, lhs));
-              assembler->Goto(&loop);
-            }
-          }
-        }
-
-        assembler->Bind(&if_rhsisnotsmi);
-        {
-          // Load the instance type of {rhs}.
-          Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
-
-          // Check if {rhs} is a String.
-          Label if_rhsisstring(assembler), if_rhsisnotstring(assembler);
-          assembler->Branch(assembler->IsStringInstanceType(rhs_instance_type),
-                            &if_rhsisstring, &if_rhsisnotstring);
-
-          assembler->Bind(&if_rhsisstring);
-          {
-            var_lhs.Bind(lhs);
-            var_rhs.Bind(rhs);
-            assembler->Goto(&string_add_convert_left);
-          }
-
-          assembler->Bind(&if_rhsisnotstring);
-          {
-            // Check if {lhs} is a HeapNumber.
-            Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
-            assembler->Branch(assembler->Word32Equal(
-                                  lhs_instance_type,
-                                  assembler->Int32Constant(HEAP_NUMBER_TYPE)),
-                              &if_lhsisnumber, &if_lhsisnotnumber);
-
-            assembler->Bind(&if_lhsisnumber);
-            {
-              // Check if {rhs} is also a HeapNumber.
-              Label if_rhsisnumber(assembler),
-                  if_rhsisnotnumber(assembler, Label::kDeferred);
-              assembler->Branch(assembler->Word32Equal(
-                                    rhs_instance_type,
-                                    assembler->Int32Constant(HEAP_NUMBER_TYPE)),
-                                &if_rhsisnumber, &if_rhsisnotnumber);
-
-              assembler->Bind(&if_rhsisnumber);
-              {
-                // Perform a floating point addition.
-                var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
-                var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
-                assembler->Goto(&do_fadd);
-              }
-
-              assembler->Bind(&if_rhsisnotnumber);
-              {
-                // Check if {rhs} is a JSReceiver.
-                Label if_rhsisreceiver(assembler, Label::kDeferred),
-                    if_rhsisnotreceiver(assembler, Label::kDeferred);
-                assembler->Branch(
-                    assembler->IsJSReceiverInstanceType(rhs_instance_type),
-                    &if_rhsisreceiver, &if_rhsisnotreceiver);
-
-                assembler->Bind(&if_rhsisreceiver);
-                {
-                  // Convert {rhs} to a primitive first passing no hint.
-                  Callable callable = CodeFactory::NonPrimitiveToPrimitive(
-                      assembler->isolate());
-                  var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-                  assembler->Goto(&loop);
-                }
-
-                assembler->Bind(&if_rhsisnotreceiver);
-                {
-                  // Convert {rhs} to a Number first.
-                  Callable callable =
-                      CodeFactory::NonNumberToNumber(assembler->isolate());
-                  var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-                  assembler->Goto(&loop);
-                }
-              }
-            }
-
-            assembler->Bind(&if_lhsisnotnumber);
-            {
-              // Check if {lhs} is a JSReceiver.
-              Label if_lhsisreceiver(assembler, Label::kDeferred),
-                  if_lhsisnotreceiver(assembler);
-              assembler->Branch(
-                  assembler->IsJSReceiverInstanceType(lhs_instance_type),
-                  &if_lhsisreceiver, &if_lhsisnotreceiver);
-
-              assembler->Bind(&if_lhsisreceiver);
-              {
-                // Convert {lhs} to a primitive first passing no hint.
-                Callable callable =
-                    CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
-                var_lhs.Bind(assembler->CallStub(callable, context, lhs));
-                assembler->Goto(&loop);
-              }
-
-              assembler->Bind(&if_lhsisnotreceiver);
-              {
-                // Check if {rhs} is a JSReceiver.
-                Label if_rhsisreceiver(assembler, Label::kDeferred),
-                    if_rhsisnotreceiver(assembler, Label::kDeferred);
-                assembler->Branch(
-                    assembler->IsJSReceiverInstanceType(rhs_instance_type),
-                    &if_rhsisreceiver, &if_rhsisnotreceiver);
-
-                assembler->Bind(&if_rhsisreceiver);
-                {
-                  // Convert {rhs} to a primitive first passing no hint.
-                  Callable callable = CodeFactory::NonPrimitiveToPrimitive(
-                      assembler->isolate());
-                  var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-                  assembler->Goto(&loop);
-                }
-
-                assembler->Bind(&if_rhsisnotreceiver);
-                {
-                  // Convert {lhs} to a Number first.
-                  Callable callable =
-                      CodeFactory::NonNumberToNumber(assembler->isolate());
-                  var_lhs.Bind(assembler->CallStub(callable, context, lhs));
-                  assembler->Goto(&loop);
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-  assembler->Bind(&string_add_convert_left);
-  {
-    // Convert {lhs}, which is a Smi, to a String and concatenate the
-    // resulting string with the String {rhs}.
-    Callable callable = CodeFactory::StringAdd(
-        assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
-    var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
-                                        var_rhs.value()));
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&string_add_convert_right);
-  {
-    // Convert {lhs}, which is a Smi, to a String and concatenate the
-    // resulting string with the String {rhs}.
-    Callable callable = CodeFactory::StringAdd(
-        assembler->isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
-    var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
-                                        var_rhs.value()));
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&do_fadd);
-  {
-    Node* lhs_value = var_fadd_lhs.value();
-    Node* rhs_value = var_fadd_rhs.value();
-    Node* value = assembler->Float64Add(lhs_value, rhs_value);
-    Node* result = assembler->ChangeFloat64ToTagged(value);
-    var_result.Bind(result);
-    assembler->Goto(&end);
-  }
-  assembler->Bind(&end);
-  return var_result.value();
-}
-
-// static
 compiler::Node* AddWithFeedbackStub::Generate(
     CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
     compiler::Node* slot_id, compiler::Node* type_feedback_vector,
@@ -1033,8 +726,10 @@
   typedef CodeStubAssembler::Variable Variable;
 
   // Shared entry for floating point addition.
-  Label do_fadd(assembler), end(assembler),
-      do_add_any(assembler, Label::kDeferred), call_add_stub(assembler);
+  Label do_fadd(assembler), if_lhsisnotnumber(assembler, Label::kDeferred),
+      check_rhsisoddball(assembler, Label::kDeferred),
+      call_with_oddball_feedback(assembler), call_with_any_feedback(assembler),
+      call_add_stub(assembler), end(assembler);
   Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
       var_fadd_rhs(assembler, MachineRepresentation::kFloat64),
       var_type_feedback(assembler, MachineRepresentation::kWord32),
@@ -1042,18 +737,21 @@
 
   // Check if the {lhs} is a Smi or a HeapObject.
   Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
-  assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+  assembler->Branch(assembler->TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
 
   assembler->Bind(&if_lhsissmi);
   {
     // Check if the {rhs} is also a Smi.
     Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-    assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+    assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
+                      &if_rhsisnotsmi);
 
     assembler->Bind(&if_rhsissmi);
     {
       // Try fast Smi addition first.
-      Node* pair = assembler->SmiAddWithOverflow(lhs, rhs);
+      Node* pair =
+          assembler->IntPtrAddWithOverflow(assembler->BitcastTaggedToWord(lhs),
+                                           assembler->BitcastTaggedToWord(rhs));
       Node* overflow = assembler->Projection(1, pair);
 
       // Check if the Smi additon overflowed.
@@ -1071,7 +769,8 @@
       {
         var_type_feedback.Bind(
             assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
-        var_result.Bind(assembler->Projection(0, pair));
+        var_result.Bind(assembler->BitcastWordToTaggedSigned(
+            assembler->Projection(0, pair)));
         assembler->Goto(&end);
       }
     }
@@ -1082,7 +781,8 @@
       Node* rhs_map = assembler->LoadMap(rhs);
 
       // Check if the {rhs} is a HeapNumber.
-      assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map), &do_add_any);
+      assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
+                            &check_rhsisoddball);
 
       var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
       var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
@@ -1092,18 +792,17 @@
 
   assembler->Bind(&if_lhsisnotsmi);
   {
-    Label check_string(assembler);
-
     // Load the map of {lhs}.
     Node* lhs_map = assembler->LoadMap(lhs);
 
     // Check if {lhs} is a HeapNumber.
-    Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
-    assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map), &check_string);
+    assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map),
+                          &if_lhsisnotnumber);
 
     // Check if the {rhs} is Smi.
     Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-    assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+    assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
+                      &if_rhsisnotsmi);
 
     assembler->Bind(&if_rhsissmi);
     {
@@ -1118,34 +817,13 @@
       Node* rhs_map = assembler->LoadMap(rhs);
 
       // Check if the {rhs} is a HeapNumber.
-      assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map), &do_add_any);
+      assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
+                            &check_rhsisoddball);
 
       var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
       var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
       assembler->Goto(&do_fadd);
     }
-
-    assembler->Bind(&check_string);
-    {
-      // Check if the {rhs} is a smi, and exit the string check early if it is.
-      assembler->GotoIf(assembler->WordIsSmi(rhs), &do_add_any);
-
-      Node* lhs_instance_type = assembler->LoadMapInstanceType(lhs_map);
-
-      // Exit unless {lhs} is a string
-      assembler->GotoUnless(assembler->IsStringInstanceType(lhs_instance_type),
-                            &do_add_any);
-
-      Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
-
-      // Exit unless {rhs} is a string
-      assembler->GotoUnless(assembler->IsStringInstanceType(rhs_instance_type),
-                            &do_add_any);
-
-      var_type_feedback.Bind(
-          assembler->Int32Constant(BinaryOperationFeedback::kString));
-      assembler->Goto(&call_add_stub);
-    }
   }
 
   assembler->Bind(&do_fadd);
@@ -1154,12 +832,78 @@
         assembler->Int32Constant(BinaryOperationFeedback::kNumber));
     Node* value =
         assembler->Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
-    Node* result = assembler->ChangeFloat64ToTagged(value);
+    Node* result = assembler->AllocateHeapNumberWithValue(value);
     var_result.Bind(result);
     assembler->Goto(&end);
   }
 
-  assembler->Bind(&do_add_any);
+  assembler->Bind(&if_lhsisnotnumber);
+  {
+    // No checks on rhs are done yet. We just know lhs is not a number or Smi.
+    Label if_lhsisoddball(assembler), if_lhsisnotoddball(assembler);
+    Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+    Node* lhs_is_oddball = assembler->Word32Equal(
+        lhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+    assembler->Branch(lhs_is_oddball, &if_lhsisoddball, &if_lhsisnotoddball);
+
+    assembler->Bind(&if_lhsisoddball);
+    {
+      assembler->GotoIf(assembler->TaggedIsSmi(rhs),
+                        &call_with_oddball_feedback);
+
+      // Load the map of the {rhs}.
+      Node* rhs_map = assembler->LoadMap(rhs);
+
+      // Check if {rhs} is a HeapNumber.
+      assembler->Branch(assembler->IsHeapNumberMap(rhs_map),
+                        &call_with_oddball_feedback, &check_rhsisoddball);
+    }
+
+    assembler->Bind(&if_lhsisnotoddball);
+    {
+      // Exit unless {lhs} is a string
+      assembler->GotoUnless(assembler->IsStringInstanceType(lhs_instance_type),
+                            &call_with_any_feedback);
+
+      // Check if the {rhs} is a smi, and exit the string check early if it is.
+      assembler->GotoIf(assembler->TaggedIsSmi(rhs), &call_with_any_feedback);
+
+      Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+
+      // Exit unless {rhs} is a string. Since {lhs} is a string we no longer
+      // need an Oddball check.
+      assembler->GotoUnless(assembler->IsStringInstanceType(rhs_instance_type),
+                            &call_with_any_feedback);
+
+      var_type_feedback.Bind(
+          assembler->Int32Constant(BinaryOperationFeedback::kString));
+      Callable callable = CodeFactory::StringAdd(
+          assembler->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+      var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
+
+      assembler->Goto(&end);
+    }
+  }
+
+  assembler->Bind(&check_rhsisoddball);
+  {
+    // Check if rhs is an oddball. At this point we know lhs is either a
+    // Smi or number or oddball and rhs is not a number or Smi.
+    Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+    Node* rhs_is_oddball = assembler->Word32Equal(
+        rhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+    assembler->Branch(rhs_is_oddball, &call_with_oddball_feedback,
+                      &call_with_any_feedback);
+  }
+
+  assembler->Bind(&call_with_oddball_feedback);
+  {
+    var_type_feedback.Bind(
+        assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+    assembler->Goto(&call_add_stub);
+  }
+
+  assembler->Bind(&call_with_any_feedback);
   {
     var_type_feedback.Bind(
         assembler->Int32Constant(BinaryOperationFeedback::kAny));
@@ -1180,180 +924,6 @@
 }
 
 // static
-compiler::Node* SubtractStub::Generate(CodeStubAssembler* assembler,
-                                       compiler::Node* left,
-                                       compiler::Node* right,
-                                       compiler::Node* context) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
-
-  // Shared entry for floating point subtraction.
-  Label do_fsub(assembler), end(assembler);
-  Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
-      var_fsub_rhs(assembler, MachineRepresentation::kFloat64);
-
-  // We might need to loop several times due to ToPrimitive and/or ToNumber
-  // conversions.
-  Variable var_lhs(assembler, MachineRepresentation::kTagged),
-      var_rhs(assembler, MachineRepresentation::kTagged),
-      var_result(assembler, MachineRepresentation::kTagged);
-  Variable* loop_vars[2] = {&var_lhs, &var_rhs};
-  Label loop(assembler, 2, loop_vars);
-  var_lhs.Bind(left);
-  var_rhs.Bind(right);
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
-  {
-    // Load the current {lhs} and {rhs} values.
-    Node* lhs = var_lhs.value();
-    Node* rhs = var_rhs.value();
-
-    // Check if the {lhs} is a Smi or a HeapObject.
-    Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
-    assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
-
-    assembler->Bind(&if_lhsissmi);
-    {
-      // Check if the {rhs} is also a Smi.
-      Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-      assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
-                        &if_rhsisnotsmi);
-
-      assembler->Bind(&if_rhsissmi);
-      {
-        // Try a fast Smi subtraction first.
-        Node* pair = assembler->SmiSubWithOverflow(lhs, rhs);
-        Node* overflow = assembler->Projection(1, pair);
-
-        // Check if the Smi subtraction overflowed.
-        Label if_overflow(assembler), if_notoverflow(assembler);
-        assembler->Branch(overflow, &if_overflow, &if_notoverflow);
-
-        assembler->Bind(&if_overflow);
-        {
-          // The result doesn't fit into Smi range.
-          var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
-          var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
-          assembler->Goto(&do_fsub);
-        }
-
-        assembler->Bind(&if_notoverflow);
-        var_result.Bind(assembler->Projection(0, pair));
-        assembler->Goto(&end);
-      }
-
-      assembler->Bind(&if_rhsisnotsmi);
-      {
-        // Load the map of the {rhs}.
-        Node* rhs_map = assembler->LoadMap(rhs);
-
-        // Check if {rhs} is a HeapNumber.
-        Label if_rhsisnumber(assembler),
-            if_rhsisnotnumber(assembler, Label::kDeferred);
-        assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
-                          &if_rhsisnotnumber);
-
-        assembler->Bind(&if_rhsisnumber);
-        {
-          // Perform a floating point subtraction.
-          var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
-          var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
-          assembler->Goto(&do_fsub);
-        }
-
-        assembler->Bind(&if_rhsisnotnumber);
-        {
-          // Convert the {rhs} to a Number first.
-          Callable callable =
-              CodeFactory::NonNumberToNumber(assembler->isolate());
-          var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-          assembler->Goto(&loop);
-        }
-      }
-    }
-
-    assembler->Bind(&if_lhsisnotsmi);
-    {
-      // Load the map of the {lhs}.
-      Node* lhs_map = assembler->LoadMap(lhs);
-
-      // Check if the {lhs} is a HeapNumber.
-      Label if_lhsisnumber(assembler),
-          if_lhsisnotnumber(assembler, Label::kDeferred);
-      Node* number_map = assembler->HeapNumberMapConstant();
-      assembler->Branch(assembler->WordEqual(lhs_map, number_map),
-                        &if_lhsisnumber, &if_lhsisnotnumber);
-
-      assembler->Bind(&if_lhsisnumber);
-      {
-        // Check if the {rhs} is a Smi.
-        Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-        assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
-                          &if_rhsisnotsmi);
-
-        assembler->Bind(&if_rhsissmi);
-        {
-          // Perform a floating point subtraction.
-          var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
-          var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
-          assembler->Goto(&do_fsub);
-        }
-
-        assembler->Bind(&if_rhsisnotsmi);
-        {
-          // Load the map of the {rhs}.
-          Node* rhs_map = assembler->LoadMap(rhs);
-
-          // Check if the {rhs} is a HeapNumber.
-          Label if_rhsisnumber(assembler),
-              if_rhsisnotnumber(assembler, Label::kDeferred);
-          assembler->Branch(assembler->WordEqual(rhs_map, number_map),
-                            &if_rhsisnumber, &if_rhsisnotnumber);
-
-          assembler->Bind(&if_rhsisnumber);
-          {
-            // Perform a floating point subtraction.
-            var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
-            var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
-            assembler->Goto(&do_fsub);
-          }
-
-          assembler->Bind(&if_rhsisnotnumber);
-          {
-            // Convert the {rhs} to a Number first.
-            Callable callable =
-                CodeFactory::NonNumberToNumber(assembler->isolate());
-            var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-            assembler->Goto(&loop);
-          }
-        }
-      }
-
-      assembler->Bind(&if_lhsisnotnumber);
-      {
-        // Convert the {lhs} to a Number first.
-        Callable callable =
-            CodeFactory::NonNumberToNumber(assembler->isolate());
-        var_lhs.Bind(assembler->CallStub(callable, context, lhs));
-        assembler->Goto(&loop);
-      }
-    }
-  }
-
-  assembler->Bind(&do_fsub);
-  {
-    Node* lhs_value = var_fsub_lhs.value();
-    Node* rhs_value = var_fsub_rhs.value();
-    Node* value = assembler->Float64Sub(lhs_value, rhs_value);
-    var_result.Bind(assembler->ChangeFloat64ToTagged(value));
-    assembler->Goto(&end);
-  }
-  assembler->Bind(&end);
-  return var_result.value();
-}
-
-// static
 compiler::Node* SubtractWithFeedbackStub::Generate(
     CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
     compiler::Node* slot_id, compiler::Node* type_feedback_vector,
@@ -1363,8 +933,9 @@
   typedef CodeStubAssembler::Variable Variable;
 
   // Shared entry for floating point subtraction.
-  Label do_fsub(assembler), end(assembler),
-      call_subtract_stub(assembler, Label::kDeferred);
+  Label do_fsub(assembler), end(assembler), call_subtract_stub(assembler),
+      if_lhsisnotnumber(assembler), check_rhsisoddball(assembler),
+      call_with_any_feedback(assembler);
   Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
       var_fsub_rhs(assembler, MachineRepresentation::kFloat64),
       var_type_feedback(assembler, MachineRepresentation::kWord32),
@@ -1372,18 +943,21 @@
 
   // Check if the {lhs} is a Smi or a HeapObject.
   Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
-  assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+  assembler->Branch(assembler->TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
 
   assembler->Bind(&if_lhsissmi);
   {
     // Check if the {rhs} is also a Smi.
     Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-    assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+    assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
+                      &if_rhsisnotsmi);
 
     assembler->Bind(&if_rhsissmi);
     {
       // Try a fast Smi subtraction first.
-      Node* pair = assembler->SmiSubWithOverflow(lhs, rhs);
+      Node* pair =
+          assembler->IntPtrSubWithOverflow(assembler->BitcastTaggedToWord(lhs),
+                                           assembler->BitcastTaggedToWord(rhs));
       Node* overflow = assembler->Projection(1, pair);
 
       // Check if the Smi subtraction overflowed.
@@ -1403,7 +977,8 @@
       // lhs, rhs, result smi. combined - smi.
       var_type_feedback.Bind(
           assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
-      var_result.Bind(assembler->Projection(0, pair));
+      var_result.Bind(
+          assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
       assembler->Goto(&end);
     }
 
@@ -1414,7 +989,7 @@
 
       // Check if {rhs} is a HeapNumber.
       assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
-                            &call_subtract_stub);
+                            &check_rhsisoddball);
 
       // Perform a floating point subtraction.
       var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
@@ -1430,11 +1005,12 @@
 
     // Check if the {lhs} is a HeapNumber.
     assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map),
-                          &call_subtract_stub);
+                          &if_lhsisnotnumber);
 
     // Check if the {rhs} is a Smi.
     Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-    assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+    assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
+                      &if_rhsisnotsmi);
 
     assembler->Bind(&if_rhsissmi);
     {
@@ -1451,7 +1027,7 @@
 
       // Check if the {rhs} is a HeapNumber.
       assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
-                            &call_subtract_stub);
+                            &check_rhsisoddball);
 
       // Perform a floating point subtraction.
       var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
@@ -1467,14 +1043,68 @@
     Node* lhs_value = var_fsub_lhs.value();
     Node* rhs_value = var_fsub_rhs.value();
     Node* value = assembler->Float64Sub(lhs_value, rhs_value);
-    var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+    var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
     assembler->Goto(&end);
   }
 
+  assembler->Bind(&if_lhsisnotnumber);
+  {
+    // No checks on rhs are done yet. We just know lhs is not a number or Smi.
+    // Check if lhs is an oddball.
+    Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+    Node* lhs_is_oddball = assembler->Word32Equal(
+        lhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+    assembler->GotoUnless(lhs_is_oddball, &call_with_any_feedback);
+
+    Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+    assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
+                      &if_rhsisnotsmi);
+
+    assembler->Bind(&if_rhsissmi);
+    {
+      var_type_feedback.Bind(
+          assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+      assembler->Goto(&call_subtract_stub);
+    }
+
+    assembler->Bind(&if_rhsisnotsmi);
+    {
+      // Load the map of the {rhs}.
+      Node* rhs_map = assembler->LoadMap(rhs);
+
+      // Check if {rhs} is a HeapNumber.
+      assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
+                            &check_rhsisoddball);
+
+      var_type_feedback.Bind(
+          assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+      assembler->Goto(&call_subtract_stub);
+    }
+  }
+
+  assembler->Bind(&check_rhsisoddball);
+  {
+    // Check if rhs is an oddball. At this point we know lhs is either a
+    // Smi or number or oddball and rhs is not a number or Smi.
+    Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+    Node* rhs_is_oddball = assembler->Word32Equal(
+        rhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+    assembler->GotoUnless(rhs_is_oddball, &call_with_any_feedback);
+
+    var_type_feedback.Bind(
+        assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+    assembler->Goto(&call_subtract_stub);
+  }
+
+  assembler->Bind(&call_with_any_feedback);
+  {
+    var_type_feedback.Bind(
+        assembler->Int32Constant(BinaryOperationFeedback::kAny));
+    assembler->Goto(&call_subtract_stub);
+  }
+
   assembler->Bind(&call_subtract_stub);
   {
-    var_type_feedback.Bind(
-        assembler->Int32Constant(BinaryOperationFeedback::kAny));
     Callable callable = CodeFactory::Subtract(assembler->isolate());
     var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
     assembler->Goto(&end);
@@ -1486,158 +1116,6 @@
   return var_result.value();
 }
 
-// static
-compiler::Node* MultiplyStub::Generate(CodeStubAssembler* assembler,
-                                       compiler::Node* left,
-                                       compiler::Node* right,
-                                       compiler::Node* context) {
-  using compiler::Node;
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
-
-  // Shared entry point for floating point multiplication.
-  Label do_fmul(assembler), return_result(assembler);
-  Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
-      var_rhs_float64(assembler, MachineRepresentation::kFloat64);
-
-  Node* number_map = assembler->HeapNumberMapConstant();
-
-  // We might need to loop one or two times due to ToNumber conversions.
-  Variable var_lhs(assembler, MachineRepresentation::kTagged),
-      var_rhs(assembler, MachineRepresentation::kTagged),
-      var_result(assembler, MachineRepresentation::kTagged);
-  Variable* loop_variables[] = {&var_lhs, &var_rhs};
-  Label loop(assembler, 2, loop_variables);
-  var_lhs.Bind(left);
-  var_rhs.Bind(right);
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
-  {
-    Node* lhs = var_lhs.value();
-    Node* rhs = var_rhs.value();
-
-    Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
-    assembler->Branch(assembler->WordIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
-
-    assembler->Bind(&lhs_is_smi);
-    {
-      Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
-      assembler->Branch(assembler->WordIsSmi(rhs), &rhs_is_smi,
-                        &rhs_is_not_smi);
-
-      assembler->Bind(&rhs_is_smi);
-      {
-        // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
-        // in case of overflow.
-        var_result.Bind(assembler->SmiMul(lhs, rhs));
-        assembler->Goto(&return_result);
-      }
-
-      assembler->Bind(&rhs_is_not_smi);
-      {
-        Node* rhs_map = assembler->LoadMap(rhs);
-
-        // Check if {rhs} is a HeapNumber.
-        Label rhs_is_number(assembler),
-            rhs_is_not_number(assembler, Label::kDeferred);
-        assembler->Branch(assembler->WordEqual(rhs_map, number_map),
-                          &rhs_is_number, &rhs_is_not_number);
-
-        assembler->Bind(&rhs_is_number);
-        {
-          // Convert {lhs} to a double and multiply it with the value of {rhs}.
-          var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
-          var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
-          assembler->Goto(&do_fmul);
-        }
-
-        assembler->Bind(&rhs_is_not_number);
-        {
-          // Multiplication is commutative, swap {lhs} with {rhs} and loop.
-          var_lhs.Bind(rhs);
-          var_rhs.Bind(lhs);
-          assembler->Goto(&loop);
-        }
-      }
-    }
-
-    assembler->Bind(&lhs_is_not_smi);
-    {
-      Node* lhs_map = assembler->LoadMap(lhs);
-
-      // Check if {lhs} is a HeapNumber.
-      Label lhs_is_number(assembler),
-          lhs_is_not_number(assembler, Label::kDeferred);
-      assembler->Branch(assembler->WordEqual(lhs_map, number_map),
-                        &lhs_is_number, &lhs_is_not_number);
-
-      assembler->Bind(&lhs_is_number);
-      {
-        // Check if {rhs} is a Smi.
-        Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
-        assembler->Branch(assembler->WordIsSmi(rhs), &rhs_is_smi,
-                          &rhs_is_not_smi);
-
-        assembler->Bind(&rhs_is_smi);
-        {
-          // Convert {rhs} to a double and multiply it with the value of {lhs}.
-          var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
-          var_rhs_float64.Bind(assembler->SmiToFloat64(rhs));
-          assembler->Goto(&do_fmul);
-        }
-
-        assembler->Bind(&rhs_is_not_smi);
-        {
-          Node* rhs_map = assembler->LoadMap(rhs);
-
-          // Check if {rhs} is a HeapNumber.
-          Label rhs_is_number(assembler),
-              rhs_is_not_number(assembler, Label::kDeferred);
-          assembler->Branch(assembler->WordEqual(rhs_map, number_map),
-                            &rhs_is_number, &rhs_is_not_number);
-
-          assembler->Bind(&rhs_is_number);
-          {
-            // Both {lhs} and {rhs} are HeapNumbers. Load their values and
-            // multiply them.
-            var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
-            var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
-            assembler->Goto(&do_fmul);
-          }
-
-          assembler->Bind(&rhs_is_not_number);
-          {
-            // Multiplication is commutative, swap {lhs} with {rhs} and loop.
-            var_lhs.Bind(rhs);
-            var_rhs.Bind(lhs);
-            assembler->Goto(&loop);
-          }
-        }
-      }
-
-      assembler->Bind(&lhs_is_not_number);
-      {
-        // Convert {lhs} to a Number and loop.
-        Callable callable =
-            CodeFactory::NonNumberToNumber(assembler->isolate());
-        var_lhs.Bind(assembler->CallStub(callable, context, lhs));
-        assembler->Goto(&loop);
-      }
-    }
-  }
-
-  assembler->Bind(&do_fmul);
-  {
-    Node* value =
-        assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
-    Node* result = assembler->ChangeFloat64ToTagged(value);
-    var_result.Bind(result);
-    assembler->Goto(&return_result);
-  }
-
-  assembler->Bind(&return_result);
-  return var_result.value();
-}
 
 // static
 compiler::Node* MultiplyWithFeedbackStub::Generate(
@@ -1649,8 +1127,10 @@
   typedef CodeStubAssembler::Variable Variable;
 
   // Shared entry point for floating point multiplication.
-  Label do_fmul(assembler), end(assembler),
-      call_multiply_stub(assembler, Label::kDeferred);
+  Label do_fmul(assembler), if_lhsisnotnumber(assembler, Label::kDeferred),
+      check_rhsisoddball(assembler, Label::kDeferred),
+      call_with_oddball_feedback(assembler), call_with_any_feedback(assembler),
+      call_multiply_stub(assembler), end(assembler);
   Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
       var_rhs_float64(assembler, MachineRepresentation::kFloat64),
       var_result(assembler, MachineRepresentation::kTagged),
@@ -1659,12 +1139,13 @@
   Node* number_map = assembler->HeapNumberMapConstant();
 
   Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
-  assembler->Branch(assembler->WordIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
+  assembler->Branch(assembler->TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
 
   assembler->Bind(&lhs_is_smi);
   {
     Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
-    assembler->Branch(assembler->WordIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
+    assembler->Branch(assembler->TaggedIsSmi(rhs), &rhs_is_smi,
+                      &rhs_is_not_smi);
 
     assembler->Bind(&rhs_is_smi);
     {
@@ -1672,7 +1153,7 @@
       // in case of overflow.
       var_result.Bind(assembler->SmiMul(lhs, rhs));
       var_type_feedback.Bind(assembler->Select(
-          assembler->WordIsSmi(var_result.value()),
+          assembler->TaggedIsSmi(var_result.value()),
           assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall),
           assembler->Int32Constant(BinaryOperationFeedback::kNumber),
           MachineRepresentation::kWord32));
@@ -1685,7 +1166,7 @@
 
       // Check if {rhs} is a HeapNumber.
       assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
-                            &call_multiply_stub);
+                            &check_rhsisoddball);
 
       // Convert {lhs} to a double and multiply it with the value of {rhs}.
       var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
@@ -1700,11 +1181,12 @@
 
     // Check if {lhs} is a HeapNumber.
     assembler->GotoUnless(assembler->WordEqual(lhs_map, number_map),
-                          &call_multiply_stub);
+                          &if_lhsisnotnumber);
 
     // Check if {rhs} is a Smi.
     Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
-    assembler->Branch(assembler->WordIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
+    assembler->Branch(assembler->TaggedIsSmi(rhs), &rhs_is_smi,
+                      &rhs_is_not_smi);
 
     assembler->Bind(&rhs_is_smi);
     {
@@ -1720,7 +1202,7 @@
 
       // Check if {rhs} is a HeapNumber.
       assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
-                            &call_multiply_stub);
+                            &check_rhsisoddball);
 
       // Both {lhs} and {rhs} are HeapNumbers. Load their values and
       // multiply them.
@@ -1736,15 +1218,57 @@
         assembler->Int32Constant(BinaryOperationFeedback::kNumber));
     Node* value =
         assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
-    Node* result = assembler->ChangeFloat64ToTagged(value);
+    Node* result = assembler->AllocateHeapNumberWithValue(value);
     var_result.Bind(result);
     assembler->Goto(&end);
   }
 
-  assembler->Bind(&call_multiply_stub);
+  assembler->Bind(&if_lhsisnotnumber);
+  {
+    // No checks on rhs are done yet. We just know lhs is not a number or Smi.
+    // Check if lhs is an oddball.
+    Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+    Node* lhs_is_oddball = assembler->Word32Equal(
+        lhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+    assembler->GotoUnless(lhs_is_oddball, &call_with_any_feedback);
+
+    assembler->GotoIf(assembler->TaggedIsSmi(rhs), &call_with_oddball_feedback);
+
+    // Load the map of the {rhs}.
+    Node* rhs_map = assembler->LoadMap(rhs);
+
+    // Check if {rhs} is a HeapNumber.
+    assembler->Branch(assembler->IsHeapNumberMap(rhs_map),
+                      &call_with_oddball_feedback, &check_rhsisoddball);
+  }
+
+  assembler->Bind(&check_rhsisoddball);
+  {
+    // Check if rhs is an oddball. At this point we know lhs is either a
+    // Smi or number or oddball and rhs is not a number or Smi.
+    Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+    Node* rhs_is_oddball = assembler->Word32Equal(
+        rhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+    assembler->Branch(rhs_is_oddball, &call_with_oddball_feedback,
+                      &call_with_any_feedback);
+  }
+
+  assembler->Bind(&call_with_oddball_feedback);
+  {
+    var_type_feedback.Bind(
+        assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+    assembler->Goto(&call_multiply_stub);
+  }
+
+  assembler->Bind(&call_with_any_feedback);
   {
     var_type_feedback.Bind(
         assembler->Int32Constant(BinaryOperationFeedback::kAny));
+    assembler->Goto(&call_multiply_stub);
+  }
+
+  assembler->Bind(&call_multiply_stub);
+  {
     Callable callable = CodeFactory::Multiply(assembler->isolate());
     var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
     assembler->Goto(&end);
@@ -1756,223 +1280,6 @@
   return var_result.value();
 }
 
-// static
-compiler::Node* DivideStub::Generate(CodeStubAssembler* assembler,
-                                     compiler::Node* left,
-                                     compiler::Node* right,
-                                     compiler::Node* context) {
-  using compiler::Node;
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
-
-  // Shared entry point for floating point division.
-  Label do_fdiv(assembler), end(assembler);
-  Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
-      var_divisor_float64(assembler, MachineRepresentation::kFloat64);
-
-  Node* number_map = assembler->HeapNumberMapConstant();
-
-  // We might need to loop one or two times due to ToNumber conversions.
-  Variable var_dividend(assembler, MachineRepresentation::kTagged),
-      var_divisor(assembler, MachineRepresentation::kTagged),
-      var_result(assembler, MachineRepresentation::kTagged);
-  Variable* loop_variables[] = {&var_dividend, &var_divisor};
-  Label loop(assembler, 2, loop_variables);
-  var_dividend.Bind(left);
-  var_divisor.Bind(right);
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
-  {
-    Node* dividend = var_dividend.value();
-    Node* divisor = var_divisor.value();
-
-    Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
-    assembler->Branch(assembler->WordIsSmi(dividend), &dividend_is_smi,
-                      &dividend_is_not_smi);
-
-    assembler->Bind(&dividend_is_smi);
-    {
-      Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
-      assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
-                        &divisor_is_not_smi);
-
-      assembler->Bind(&divisor_is_smi);
-      {
-        Label bailout(assembler);
-
-        // Do floating point division if {divisor} is zero.
-        assembler->GotoIf(
-            assembler->WordEqual(divisor, assembler->IntPtrConstant(0)),
-            &bailout);
-
-        // Do floating point division {dividend} is zero and {divisor} is
-        // negative.
-        Label dividend_is_zero(assembler), dividend_is_not_zero(assembler);
-        assembler->Branch(
-            assembler->WordEqual(dividend, assembler->IntPtrConstant(0)),
-            &dividend_is_zero, &dividend_is_not_zero);
-
-        assembler->Bind(&dividend_is_zero);
-        {
-          assembler->GotoIf(
-              assembler->IntPtrLessThan(divisor, assembler->IntPtrConstant(0)),
-              &bailout);
-          assembler->Goto(&dividend_is_not_zero);
-        }
-        assembler->Bind(&dividend_is_not_zero);
-
-        Node* untagged_divisor = assembler->SmiUntag(divisor);
-        Node* untagged_dividend = assembler->SmiUntag(dividend);
-
-        // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
-        // if the Smi size is 31) and {divisor} is -1.
-        Label divisor_is_minus_one(assembler),
-            divisor_is_not_minus_one(assembler);
-        assembler->Branch(assembler->Word32Equal(untagged_divisor,
-                                                 assembler->Int32Constant(-1)),
-                          &divisor_is_minus_one, &divisor_is_not_minus_one);
-
-        assembler->Bind(&divisor_is_minus_one);
-        {
-          assembler->GotoIf(
-              assembler->Word32Equal(
-                  untagged_dividend,
-                  assembler->Int32Constant(
-                      kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))),
-              &bailout);
-          assembler->Goto(&divisor_is_not_minus_one);
-        }
-        assembler->Bind(&divisor_is_not_minus_one);
-
-        // TODO(epertoso): consider adding a machine instruction that returns
-        // both the result and the remainder.
-        Node* untagged_result =
-            assembler->Int32Div(untagged_dividend, untagged_divisor);
-        Node* truncated =
-            assembler->Int32Mul(untagged_result, untagged_divisor);
-        // Do floating point division if the remainder is not 0.
-        assembler->GotoIf(
-            assembler->Word32NotEqual(untagged_dividend, truncated), &bailout);
-        var_result.Bind(assembler->SmiTag(untagged_result));
-        assembler->Goto(&end);
-
-        // Bailout: convert {dividend} and {divisor} to double and do double
-        // division.
-        assembler->Bind(&bailout);
-        {
-          var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
-          var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
-          assembler->Goto(&do_fdiv);
-        }
-      }
-
-      assembler->Bind(&divisor_is_not_smi);
-      {
-        Node* divisor_map = assembler->LoadMap(divisor);
-
-        // Check if {divisor} is a HeapNumber.
-        Label divisor_is_number(assembler),
-            divisor_is_not_number(assembler, Label::kDeferred);
-        assembler->Branch(assembler->WordEqual(divisor_map, number_map),
-                          &divisor_is_number, &divisor_is_not_number);
-
-        assembler->Bind(&divisor_is_number);
-        {
-          // Convert {dividend} to a double and divide it with the value of
-          // {divisor}.
-          var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
-          var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
-          assembler->Goto(&do_fdiv);
-        }
-
-        assembler->Bind(&divisor_is_not_number);
-        {
-          // Convert {divisor} to a number and loop.
-          Callable callable =
-              CodeFactory::NonNumberToNumber(assembler->isolate());
-          var_divisor.Bind(assembler->CallStub(callable, context, divisor));
-          assembler->Goto(&loop);
-        }
-      }
-    }
-
-    assembler->Bind(&dividend_is_not_smi);
-    {
-      Node* dividend_map = assembler->LoadMap(dividend);
-
-      // Check if {dividend} is a HeapNumber.
-      Label dividend_is_number(assembler),
-          dividend_is_not_number(assembler, Label::kDeferred);
-      assembler->Branch(assembler->WordEqual(dividend_map, number_map),
-                        &dividend_is_number, &dividend_is_not_number);
-
-      assembler->Bind(&dividend_is_number);
-      {
-        // Check if {divisor} is a Smi.
-        Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
-        assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
-                          &divisor_is_not_smi);
-
-        assembler->Bind(&divisor_is_smi);
-        {
-          // Convert {divisor} to a double and use it for a floating point
-          // division.
-          var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
-          var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
-          assembler->Goto(&do_fdiv);
-        }
-
-        assembler->Bind(&divisor_is_not_smi);
-        {
-          Node* divisor_map = assembler->LoadMap(divisor);
-
-          // Check if {divisor} is a HeapNumber.
-          Label divisor_is_number(assembler),
-              divisor_is_not_number(assembler, Label::kDeferred);
-          assembler->Branch(assembler->WordEqual(divisor_map, number_map),
-                            &divisor_is_number, &divisor_is_not_number);
-
-          assembler->Bind(&divisor_is_number);
-          {
-            // Both {dividend} and {divisor} are HeapNumbers. Load their values
-            // and divide them.
-            var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
-            var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
-            assembler->Goto(&do_fdiv);
-          }
-
-          assembler->Bind(&divisor_is_not_number);
-          {
-            // Convert {divisor} to a number and loop.
-            Callable callable =
-                CodeFactory::NonNumberToNumber(assembler->isolate());
-            var_divisor.Bind(assembler->CallStub(callable, context, divisor));
-            assembler->Goto(&loop);
-          }
-        }
-      }
-
-      assembler->Bind(&dividend_is_not_number);
-      {
-        // Convert {dividend} to a Number and loop.
-        Callable callable =
-            CodeFactory::NonNumberToNumber(assembler->isolate());
-        var_dividend.Bind(assembler->CallStub(callable, context, dividend));
-        assembler->Goto(&loop);
-      }
-    }
-  }
-
-  assembler->Bind(&do_fdiv);
-  {
-    Node* value = assembler->Float64Div(var_dividend_float64.value(),
-                                        var_divisor_float64.value());
-    var_result.Bind(assembler->ChangeFloat64ToTagged(value));
-    assembler->Goto(&end);
-  }
-  assembler->Bind(&end);
-  return var_result.value();
-}
 
 // static
 compiler::Node* DivideWithFeedbackStub::Generate(
@@ -1984,7 +1291,10 @@
   typedef CodeStubAssembler::Variable Variable;
 
   // Shared entry point for floating point division.
-  Label do_fdiv(assembler), end(assembler), call_divide_stub(assembler);
+  Label do_fdiv(assembler), dividend_is_not_number(assembler, Label::kDeferred),
+      check_divisor_for_oddball(assembler, Label::kDeferred),
+      call_with_oddball_feedback(assembler), call_with_any_feedback(assembler),
+      call_divide_stub(assembler), end(assembler);
   Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
       var_divisor_float64(assembler, MachineRepresentation::kFloat64),
       var_result(assembler, MachineRepresentation::kTagged),
@@ -1993,13 +1303,13 @@
   Node* number_map = assembler->HeapNumberMapConstant();
 
   Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
-  assembler->Branch(assembler->WordIsSmi(dividend), &dividend_is_smi,
+  assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
                     &dividend_is_not_smi);
 
   assembler->Bind(&dividend_is_smi);
   {
     Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
-    assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+    assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
                       &divisor_is_not_smi);
 
     assembler->Bind(&divisor_is_smi);
@@ -2077,7 +1387,7 @@
 
       // Check if {divisor} is a HeapNumber.
       assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
-                            &call_divide_stub);
+                            &check_divisor_for_oddball);
 
       // Convert {dividend} to a double and divide it with the value of
       // {divisor}.
@@ -2092,11 +1402,11 @@
 
       // Check if {dividend} is a HeapNumber.
       assembler->GotoUnless(assembler->WordEqual(dividend_map, number_map),
-                            &call_divide_stub);
+                            &dividend_is_not_number);
 
       // Check if {divisor} is a Smi.
       Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
-      assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+      assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
                         &divisor_is_not_smi);
 
       assembler->Bind(&divisor_is_smi);
@@ -2114,7 +1424,7 @@
 
         // Check if {divisor} is a HeapNumber.
         assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
-                              &call_divide_stub);
+                              &check_divisor_for_oddball);
 
         // Both {dividend} and {divisor} are HeapNumbers. Load their values
         // and divide them.
@@ -2131,14 +1441,57 @@
         assembler->Int32Constant(BinaryOperationFeedback::kNumber));
     Node* value = assembler->Float64Div(var_dividend_float64.value(),
                                         var_divisor_float64.value());
-    var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+    var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
     assembler->Goto(&end);
   }
 
+  assembler->Bind(&dividend_is_not_number);
+  {
+    // We just know dividend is not a number or Smi. No checks on divisor yet.
+    // Check if dividend is an oddball.
+    Node* dividend_instance_type = assembler->LoadInstanceType(dividend);
+    Node* dividend_is_oddball = assembler->Word32Equal(
+        dividend_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+    assembler->GotoUnless(dividend_is_oddball, &call_with_any_feedback);
+
+    assembler->GotoIf(assembler->TaggedIsSmi(divisor),
+                      &call_with_oddball_feedback);
+
+    // Load the map of the {divisor}.
+    Node* divisor_map = assembler->LoadMap(divisor);
+
+    // Check if {divisor} is a HeapNumber.
+    assembler->Branch(assembler->IsHeapNumberMap(divisor_map),
+                      &call_with_oddball_feedback, &check_divisor_for_oddball);
+  }
+
+  assembler->Bind(&check_divisor_for_oddball);
+  {
+    // Check if divisor is an oddball. At this point we know dividend is either
+    // a Smi or number or oddball and divisor is not a number or Smi.
+    Node* divisor_instance_type = assembler->LoadInstanceType(divisor);
+    Node* divisor_is_oddball = assembler->Word32Equal(
+        divisor_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+    assembler->Branch(divisor_is_oddball, &call_with_oddball_feedback,
+                      &call_with_any_feedback);
+  }
+
+  assembler->Bind(&call_with_oddball_feedback);
+  {
+    var_type_feedback.Bind(
+        assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+    assembler->Goto(&call_divide_stub);
+  }
+
+  assembler->Bind(&call_with_any_feedback);
+  {
+    var_type_feedback.Bind(
+        assembler->Int32Constant(BinaryOperationFeedback::kAny));
+    assembler->Goto(&call_divide_stub);
+  }
+
   assembler->Bind(&call_divide_stub);
   {
-    var_type_feedback.Bind(
-        assembler->Int32Constant(BinaryOperationFeedback::kAny));
     Callable callable = CodeFactory::Divide(assembler->isolate());
     var_result.Bind(assembler->CallStub(callable, context, dividend, divisor));
     assembler->Goto(&end);
@@ -2151,165 +1504,6 @@
 }
 
 // static
-compiler::Node* ModulusStub::Generate(CodeStubAssembler* assembler,
-                                      compiler::Node* left,
-                                      compiler::Node* right,
-                                      compiler::Node* context) {
-  using compiler::Node;
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Variable var_result(assembler, MachineRepresentation::kTagged);
-  Label return_result(assembler, &var_result);
-
-  // Shared entry point for floating point modulus.
-  Label do_fmod(assembler);
-  Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
-      var_divisor_float64(assembler, MachineRepresentation::kFloat64);
-
-  Node* number_map = assembler->HeapNumberMapConstant();
-
-  // We might need to loop one or two times due to ToNumber conversions.
-  Variable var_dividend(assembler, MachineRepresentation::kTagged),
-      var_divisor(assembler, MachineRepresentation::kTagged);
-  Variable* loop_variables[] = {&var_dividend, &var_divisor};
-  Label loop(assembler, 2, loop_variables);
-  var_dividend.Bind(left);
-  var_divisor.Bind(right);
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
-  {
-    Node* dividend = var_dividend.value();
-    Node* divisor = var_divisor.value();
-
-    Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
-    assembler->Branch(assembler->WordIsSmi(dividend), &dividend_is_smi,
-                      &dividend_is_not_smi);
-
-    assembler->Bind(&dividend_is_smi);
-    {
-      Label dividend_is_not_zero(assembler);
-      Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
-      assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
-                        &divisor_is_not_smi);
-
-      assembler->Bind(&divisor_is_smi);
-      {
-        // Compute the modulus of two Smis.
-        var_result.Bind(assembler->SmiMod(dividend, divisor));
-        assembler->Goto(&return_result);
-      }
-
-      assembler->Bind(&divisor_is_not_smi);
-      {
-        Node* divisor_map = assembler->LoadMap(divisor);
-
-        // Check if {divisor} is a HeapNumber.
-        Label divisor_is_number(assembler),
-            divisor_is_not_number(assembler, Label::kDeferred);
-        assembler->Branch(assembler->WordEqual(divisor_map, number_map),
-                          &divisor_is_number, &divisor_is_not_number);
-
-        assembler->Bind(&divisor_is_number);
-        {
-          // Convert {dividend} to a double and compute its modulus with the
-          // value of {dividend}.
-          var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
-          var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
-          assembler->Goto(&do_fmod);
-        }
-
-        assembler->Bind(&divisor_is_not_number);
-        {
-          // Convert {divisor} to a number and loop.
-          Callable callable =
-              CodeFactory::NonNumberToNumber(assembler->isolate());
-          var_divisor.Bind(assembler->CallStub(callable, context, divisor));
-          assembler->Goto(&loop);
-        }
-      }
-    }
-
-    assembler->Bind(&dividend_is_not_smi);
-    {
-      Node* dividend_map = assembler->LoadMap(dividend);
-
-      // Check if {dividend} is a HeapNumber.
-      Label dividend_is_number(assembler),
-          dividend_is_not_number(assembler, Label::kDeferred);
-      assembler->Branch(assembler->WordEqual(dividend_map, number_map),
-                        &dividend_is_number, &dividend_is_not_number);
-
-      assembler->Bind(&dividend_is_number);
-      {
-        // Check if {divisor} is a Smi.
-        Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
-        assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
-                          &divisor_is_not_smi);
-
-        assembler->Bind(&divisor_is_smi);
-        {
-          // Convert {divisor} to a double and compute {dividend}'s modulus with
-          // it.
-          var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
-          var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
-          assembler->Goto(&do_fmod);
-        }
-
-        assembler->Bind(&divisor_is_not_smi);
-        {
-          Node* divisor_map = assembler->LoadMap(divisor);
-
-          // Check if {divisor} is a HeapNumber.
-          Label divisor_is_number(assembler),
-              divisor_is_not_number(assembler, Label::kDeferred);
-          assembler->Branch(assembler->WordEqual(divisor_map, number_map),
-                            &divisor_is_number, &divisor_is_not_number);
-
-          assembler->Bind(&divisor_is_number);
-          {
-            // Both {dividend} and {divisor} are HeapNumbers. Load their values
-            // and compute their modulus.
-            var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
-            var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
-            assembler->Goto(&do_fmod);
-          }
-
-          assembler->Bind(&divisor_is_not_number);
-          {
-            // Convert {divisor} to a number and loop.
-            Callable callable =
-                CodeFactory::NonNumberToNumber(assembler->isolate());
-            var_divisor.Bind(assembler->CallStub(callable, context, divisor));
-            assembler->Goto(&loop);
-          }
-        }
-      }
-
-      assembler->Bind(&dividend_is_not_number);
-      {
-        // Convert {dividend} to a Number and loop.
-        Callable callable =
-            CodeFactory::NonNumberToNumber(assembler->isolate());
-        var_dividend.Bind(assembler->CallStub(callable, context, dividend));
-        assembler->Goto(&loop);
-      }
-    }
-  }
-
-  assembler->Bind(&do_fmod);
-  {
-    Node* value = assembler->Float64Mod(var_dividend_float64.value(),
-                                        var_divisor_float64.value());
-    var_result.Bind(assembler->ChangeFloat64ToTagged(value));
-    assembler->Goto(&return_result);
-  }
-
-  assembler->Bind(&return_result);
-  return var_result.value();
-}
-
-// static
 compiler::Node* ModulusWithFeedbackStub::Generate(
     CodeStubAssembler* assembler, compiler::Node* dividend,
     compiler::Node* divisor, compiler::Node* slot_id,
@@ -2319,7 +1513,10 @@
   typedef CodeStubAssembler::Variable Variable;
 
   // Shared entry point for floating point division.
-  Label do_fmod(assembler), end(assembler), call_modulus_stub(assembler);
+  Label do_fmod(assembler), dividend_is_not_number(assembler, Label::kDeferred),
+      check_divisor_for_oddball(assembler, Label::kDeferred),
+      call_with_oddball_feedback(assembler), call_with_any_feedback(assembler),
+      call_modulus_stub(assembler), end(assembler);
   Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
       var_divisor_float64(assembler, MachineRepresentation::kFloat64),
       var_result(assembler, MachineRepresentation::kTagged),
@@ -2328,20 +1525,20 @@
   Node* number_map = assembler->HeapNumberMapConstant();
 
   Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
-  assembler->Branch(assembler->WordIsSmi(dividend), &dividend_is_smi,
+  assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
                     &dividend_is_not_smi);
 
   assembler->Bind(&dividend_is_smi);
   {
     Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
-    assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+    assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
                       &divisor_is_not_smi);
 
     assembler->Bind(&divisor_is_smi);
     {
       var_result.Bind(assembler->SmiMod(dividend, divisor));
       var_type_feedback.Bind(assembler->Select(
-          assembler->WordIsSmi(var_result.value()),
+          assembler->TaggedIsSmi(var_result.value()),
           assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall),
           assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
       assembler->Goto(&end);
@@ -2353,7 +1550,7 @@
 
       // Check if {divisor} is a HeapNumber.
       assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
-                            &call_modulus_stub);
+                            &check_divisor_for_oddball);
 
       // Convert {dividend} to a double and divide it with the value of
       // {divisor}.
@@ -2369,11 +1566,11 @@
 
     // Check if {dividend} is a HeapNumber.
     assembler->GotoUnless(assembler->WordEqual(dividend_map, number_map),
-                          &call_modulus_stub);
+                          &dividend_is_not_number);
 
     // Check if {divisor} is a Smi.
     Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
-    assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+    assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
                       &divisor_is_not_smi);
 
     assembler->Bind(&divisor_is_smi);
@@ -2391,7 +1588,7 @@
 
       // Check if {divisor} is a HeapNumber.
       assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
-                            &call_modulus_stub);
+                            &check_divisor_for_oddball);
 
       // Both {dividend} and {divisor} are HeapNumbers. Load their values
       // and divide them.
@@ -2407,14 +1604,57 @@
         assembler->Int32Constant(BinaryOperationFeedback::kNumber));
     Node* value = assembler->Float64Mod(var_dividend_float64.value(),
                                         var_divisor_float64.value());
-    var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+    var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
     assembler->Goto(&end);
   }
 
+  assembler->Bind(&dividend_is_not_number);
+  {
+    // No checks on divisor yet. We just know dividend is not a number or Smi.
+    // Check if dividend is an oddball.
+    Node* dividend_instance_type = assembler->LoadInstanceType(dividend);
+    Node* dividend_is_oddball = assembler->Word32Equal(
+        dividend_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+    assembler->GotoUnless(dividend_is_oddball, &call_with_any_feedback);
+
+    assembler->GotoIf(assembler->TaggedIsSmi(divisor),
+                      &call_with_oddball_feedback);
+
+    // Load the map of the {divisor}.
+    Node* divisor_map = assembler->LoadMap(divisor);
+
+    // Check if {divisor} is a HeapNumber.
+    assembler->Branch(assembler->IsHeapNumberMap(divisor_map),
+                      &call_with_oddball_feedback, &check_divisor_for_oddball);
+  }
+
+  assembler->Bind(&check_divisor_for_oddball);
+  {
+    // Check if divisor is an oddball. At this point we know dividend is either
+    // a Smi or number or oddball and divisor is not a number or Smi.
+    Node* divisor_instance_type = assembler->LoadInstanceType(divisor);
+    Node* divisor_is_oddball = assembler->Word32Equal(
+        divisor_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+    assembler->Branch(divisor_is_oddball, &call_with_oddball_feedback,
+                      &call_with_any_feedback);
+  }
+
+  assembler->Bind(&call_with_oddball_feedback);
+  {
+    var_type_feedback.Bind(
+        assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+    assembler->Goto(&call_modulus_stub);
+  }
+
+  assembler->Bind(&call_with_any_feedback);
+  {
+    var_type_feedback.Bind(
+        assembler->Int32Constant(BinaryOperationFeedback::kAny));
+    assembler->Goto(&call_modulus_stub);
+  }
+
   assembler->Bind(&call_modulus_stub);
   {
-    var_type_feedback.Bind(
-        assembler->Int32Constant(BinaryOperationFeedback::kAny));
     Callable callable = CodeFactory::Modulus(assembler->isolate());
     var_result.Bind(assembler->CallStub(callable, context, dividend, divisor));
     assembler->Goto(&end);
@@ -2425,95 +1665,6 @@
                             slot_id);
   return var_result.value();
 }
-// static
-compiler::Node* ShiftLeftStub::Generate(CodeStubAssembler* assembler,
-                                        compiler::Node* left,
-                                        compiler::Node* right,
-                                        compiler::Node* context) {
-  using compiler::Node;
-
-  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
-  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
-  Node* shift_count =
-      assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
-  Node* value = assembler->Word32Shl(lhs_value, shift_count);
-  Node* result = assembler->ChangeInt32ToTagged(value);
-  return result;
-}
-
-// static
-compiler::Node* ShiftRightStub::Generate(CodeStubAssembler* assembler,
-                                         compiler::Node* left,
-                                         compiler::Node* right,
-                                         compiler::Node* context) {
-  using compiler::Node;
-
-  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
-  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
-  Node* shift_count =
-      assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
-  Node* value = assembler->Word32Sar(lhs_value, shift_count);
-  Node* result = assembler->ChangeInt32ToTagged(value);
-  return result;
-}
-
-// static
-compiler::Node* ShiftRightLogicalStub::Generate(CodeStubAssembler* assembler,
-                                                compiler::Node* left,
-                                                compiler::Node* right,
-                                                compiler::Node* context) {
-  using compiler::Node;
-
-  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
-  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
-  Node* shift_count =
-      assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
-  Node* value = assembler->Word32Shr(lhs_value, shift_count);
-  Node* result = assembler->ChangeUint32ToTagged(value);
-  return result;
-}
-
-// static
-compiler::Node* BitwiseAndStub::Generate(CodeStubAssembler* assembler,
-                                         compiler::Node* left,
-                                         compiler::Node* right,
-                                         compiler::Node* context) {
-  using compiler::Node;
-
-  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
-  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
-  Node* value = assembler->Word32And(lhs_value, rhs_value);
-  Node* result = assembler->ChangeInt32ToTagged(value);
-  return result;
-}
-
-// static
-compiler::Node* BitwiseOrStub::Generate(CodeStubAssembler* assembler,
-                                        compiler::Node* left,
-                                        compiler::Node* right,
-                                        compiler::Node* context) {
-  using compiler::Node;
-
-  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
-  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
-  Node* value = assembler->Word32Or(lhs_value, rhs_value);
-  Node* result = assembler->ChangeInt32ToTagged(value);
-  return result;
-}
-
-// static
-compiler::Node* BitwiseXorStub::Generate(CodeStubAssembler* assembler,
-                                         compiler::Node* left,
-                                         compiler::Node* right,
-                                         compiler::Node* context) {
-  using compiler::Node;
-
-  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
-  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
-  Node* value = assembler->Word32Xor(lhs_value, rhs_value);
-  Node* result = assembler->ChangeInt32ToTagged(value);
-  return result;
-}
 
 // static
 compiler::Node* IncStub::Generate(CodeStubAssembler* assembler,
@@ -2544,13 +1695,15 @@
     value = value_var.value();
 
     Label if_issmi(assembler), if_isnotsmi(assembler);
-    assembler->Branch(assembler->WordIsSmi(value), &if_issmi, &if_isnotsmi);
+    assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
 
     assembler->Bind(&if_issmi);
     {
       // Try fast Smi addition first.
       Node* one = assembler->SmiConstant(Smi::FromInt(1));
-      Node* pair = assembler->SmiAddWithOverflow(value, one);
+      Node* pair = assembler->IntPtrAddWithOverflow(
+          assembler->BitcastTaggedToWord(value),
+          assembler->BitcastTaggedToWord(one));
       Node* overflow = assembler->Projection(1, pair);
 
       // Check if the Smi addition overflowed.
@@ -2561,7 +1714,8 @@
       var_type_feedback.Bind(assembler->Word32Or(
           var_type_feedback.value(),
           assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall)));
-      result_var.Bind(assembler->Projection(0, pair));
+      result_var.Bind(
+          assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
       assembler->Goto(&end);
 
       assembler->Bind(&if_overflow);
@@ -2589,13 +1743,40 @@
 
       assembler->Bind(&if_valuenotnumber);
       {
-        // Convert to a Number first and try again.
-        Callable callable =
-            CodeFactory::NonNumberToNumber(assembler->isolate());
-        var_type_feedback.Bind(
-            assembler->Int32Constant(BinaryOperationFeedback::kAny));
-        value_var.Bind(assembler->CallStub(callable, context, value));
-        assembler->Goto(&start);
+        // We do not require an Or with earlier feedback here because once we
+        // convert the value to a number, we cannot reach this path. We can
+        // only reach this path on the first pass when the feedback is kNone.
+        CSA_ASSERT(assembler,
+                   assembler->Word32Equal(var_type_feedback.value(),
+                                          assembler->Int32Constant(
+                                              BinaryOperationFeedback::kNone)));
+
+        Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
+        Node* instance_type = assembler->LoadMapInstanceType(value_map);
+        Node* is_oddball = assembler->Word32Equal(
+            instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+        assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
+
+        assembler->Bind(&if_valueisoddball);
+        {
+          // Convert Oddball to Number and check again.
+          value_var.Bind(
+              assembler->LoadObjectField(value, Oddball::kToNumberOffset));
+          var_type_feedback.Bind(assembler->Int32Constant(
+              BinaryOperationFeedback::kNumberOrOddball));
+          assembler->Goto(&start);
+        }
+
+        assembler->Bind(&if_valuenotoddball);
+        {
+          // Convert to a Number first and try again.
+          Callable callable =
+              CodeFactory::NonNumberToNumber(assembler->isolate());
+          var_type_feedback.Bind(
+              assembler->Int32Constant(BinaryOperationFeedback::kAny));
+          value_var.Bind(assembler->CallStub(callable, context, value));
+          assembler->Goto(&start);
+        }
       }
     }
   }
@@ -2608,7 +1789,7 @@
     var_type_feedback.Bind(assembler->Word32Or(
         var_type_feedback.value(),
         assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
-    result_var.Bind(assembler->ChangeFloat64ToTagged(finc_result));
+    result_var.Bind(assembler->AllocateHeapNumberWithValue(finc_result));
     assembler->Goto(&end);
   }
 
@@ -2618,6 +1799,13 @@
   return result_var.value();
 }
 
+void NumberToStringStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+  Node* argument = assembler->Parameter(Descriptor::kArgument);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+  assembler->Return(assembler->NumberToString(context, argument));
+}
+
 // static
 compiler::Node* DecStub::Generate(CodeStubAssembler* assembler,
                                   compiler::Node* value,
@@ -2647,13 +1835,15 @@
     value = value_var.value();
 
     Label if_issmi(assembler), if_isnotsmi(assembler);
-    assembler->Branch(assembler->WordIsSmi(value), &if_issmi, &if_isnotsmi);
+    assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
 
     assembler->Bind(&if_issmi);
     {
       // Try fast Smi subtraction first.
       Node* one = assembler->SmiConstant(Smi::FromInt(1));
-      Node* pair = assembler->SmiSubWithOverflow(value, one);
+      Node* pair = assembler->IntPtrSubWithOverflow(
+          assembler->BitcastTaggedToWord(value),
+          assembler->BitcastTaggedToWord(one));
       Node* overflow = assembler->Projection(1, pair);
 
       // Check if the Smi subtraction overflowed.
@@ -2664,7 +1854,8 @@
       var_type_feedback.Bind(assembler->Word32Or(
           var_type_feedback.value(),
           assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall)));
-      result_var.Bind(assembler->Projection(0, pair));
+      result_var.Bind(
+          assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
       assembler->Goto(&end);
 
       assembler->Bind(&if_overflow);
@@ -2692,13 +1883,40 @@
 
       assembler->Bind(&if_valuenotnumber);
       {
-        // Convert to a Number first and try again.
-        Callable callable =
-            CodeFactory::NonNumberToNumber(assembler->isolate());
-        var_type_feedback.Bind(
-            assembler->Int32Constant(BinaryOperationFeedback::kAny));
-        value_var.Bind(assembler->CallStub(callable, context, value));
-        assembler->Goto(&start);
+        // We do not require an Or with earlier feedback here because once we
+        // convert the value to a number, we cannot reach this path. We can
+        // only reach this path on the first pass when the feedback is kNone.
+        CSA_ASSERT(assembler,
+                   assembler->Word32Equal(var_type_feedback.value(),
+                                          assembler->Int32Constant(
+                                              BinaryOperationFeedback::kNone)));
+
+        Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
+        Node* instance_type = assembler->LoadMapInstanceType(value_map);
+        Node* is_oddball = assembler->Word32Equal(
+            instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+        assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
+
+        assembler->Bind(&if_valueisoddball);
+        {
+          // Convert Oddball to Number and check again.
+          value_var.Bind(
+              assembler->LoadObjectField(value, Oddball::kToNumberOffset));
+          var_type_feedback.Bind(assembler->Int32Constant(
+              BinaryOperationFeedback::kNumberOrOddball));
+          assembler->Goto(&start);
+        }
+
+        assembler->Bind(&if_valuenotoddball);
+        {
+          // Convert to a Number first and try again.
+          Callable callable =
+              CodeFactory::NonNumberToNumber(assembler->isolate());
+          var_type_feedback.Bind(
+              assembler->Int32Constant(BinaryOperationFeedback::kAny));
+          value_var.Bind(assembler->CallStub(callable, context, value));
+          assembler->Goto(&start);
+        }
       }
     }
   }
@@ -2711,7 +1929,7 @@
     var_type_feedback.Bind(assembler->Word32Or(
         var_type_feedback.value(),
         assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
-    result_var.Bind(assembler->ChangeFloat64ToTagged(fdec_result));
+    result_var.Bind(assembler->AllocateHeapNumberWithValue(fdec_result));
     assembler->Goto(&end);
   }
 
@@ -2730,1439 +1948,6 @@
   return assembler->SubString(context, string, from, to);
 }
 
-// ES6 section 7.1.13 ToObject (argument)
-void ToObjectStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Label if_number(assembler, Label::kDeferred), if_notsmi(assembler),
-      if_jsreceiver(assembler), if_noconstructor(assembler, Label::kDeferred),
-      if_wrapjsvalue(assembler);
-
-  Node* object = assembler->Parameter(Descriptor::kArgument);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-
-  Variable constructor_function_index_var(assembler,
-                                          MachineType::PointerRepresentation());
-
-  assembler->Branch(assembler->WordIsSmi(object), &if_number, &if_notsmi);
-
-  assembler->Bind(&if_notsmi);
-  Node* map = assembler->LoadMap(object);
-
-  assembler->GotoIf(assembler->IsHeapNumberMap(map), &if_number);
-
-  Node* instance_type = assembler->LoadMapInstanceType(map);
-  assembler->GotoIf(assembler->IsJSReceiverInstanceType(instance_type),
-                    &if_jsreceiver);
-
-  Node* constructor_function_index =
-      assembler->LoadMapConstructorFunctionIndex(map);
-  assembler->GotoIf(assembler->WordEqual(constructor_function_index,
-                                         assembler->IntPtrConstant(
-                                             Map::kNoConstructorFunctionIndex)),
-                    &if_noconstructor);
-  constructor_function_index_var.Bind(constructor_function_index);
-  assembler->Goto(&if_wrapjsvalue);
-
-  assembler->Bind(&if_number);
-  constructor_function_index_var.Bind(
-      assembler->IntPtrConstant(Context::NUMBER_FUNCTION_INDEX));
-  assembler->Goto(&if_wrapjsvalue);
-
-  assembler->Bind(&if_wrapjsvalue);
-  Node* native_context = assembler->LoadNativeContext(context);
-  Node* constructor = assembler->LoadFixedArrayElement(
-      native_context, constructor_function_index_var.value(), 0,
-      CodeStubAssembler::INTPTR_PARAMETERS);
-  Node* initial_map = assembler->LoadObjectField(
-      constructor, JSFunction::kPrototypeOrInitialMapOffset);
-  Node* js_value = assembler->Allocate(JSValue::kSize);
-  assembler->StoreMapNoWriteBarrier(js_value, initial_map);
-  assembler->StoreObjectFieldRoot(js_value, JSValue::kPropertiesOffset,
-                                  Heap::kEmptyFixedArrayRootIndex);
-  assembler->StoreObjectFieldRoot(js_value, JSObject::kElementsOffset,
-                                  Heap::kEmptyFixedArrayRootIndex);
-  assembler->StoreObjectField(js_value, JSValue::kValueOffset, object);
-  assembler->Return(js_value);
-
-  assembler->Bind(&if_noconstructor);
-  assembler->TailCallRuntime(
-      Runtime::kThrowUndefinedOrNullToObject, context,
-      assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
-          "ToObject", TENURED)));
-
-  assembler->Bind(&if_jsreceiver);
-  assembler->Return(object);
-}
-
-// static
-// ES6 section 12.5.5 typeof operator
-compiler::Node* TypeofStub::Generate(CodeStubAssembler* assembler,
-                                     compiler::Node* value,
-                                     compiler::Node* context) {
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Variable result_var(assembler, MachineRepresentation::kTagged);
-
-  Label return_number(assembler, Label::kDeferred), if_oddball(assembler),
-      return_function(assembler), return_undefined(assembler),
-      return_object(assembler), return_string(assembler),
-      return_result(assembler);
-
-  assembler->GotoIf(assembler->WordIsSmi(value), &return_number);
-
-  Node* map = assembler->LoadMap(value);
-
-  assembler->GotoIf(assembler->IsHeapNumberMap(map), &return_number);
-
-  Node* instance_type = assembler->LoadMapInstanceType(map);
-
-  assembler->GotoIf(assembler->Word32Equal(
-                        instance_type, assembler->Int32Constant(ODDBALL_TYPE)),
-                    &if_oddball);
-
-  Node* callable_or_undetectable_mask =
-      assembler->Word32And(assembler->LoadMapBitField(map),
-                           assembler->Int32Constant(1 << Map::kIsCallable |
-                                                    1 << Map::kIsUndetectable));
-
-  assembler->GotoIf(
-      assembler->Word32Equal(callable_or_undetectable_mask,
-                             assembler->Int32Constant(1 << Map::kIsCallable)),
-      &return_function);
-
-  assembler->GotoUnless(assembler->Word32Equal(callable_or_undetectable_mask,
-                                               assembler->Int32Constant(0)),
-                        &return_undefined);
-
-  assembler->GotoIf(assembler->IsJSReceiverInstanceType(instance_type),
-                    &return_object);
-
-  assembler->GotoIf(assembler->IsStringInstanceType(instance_type),
-                    &return_string);
-
-#define SIMD128_BRANCH(TYPE, Type, type, lane_count, lane_type)    \
-  Label return_##type(assembler);                                  \
-  Node* type##_map =                                               \
-      assembler->HeapConstant(assembler->factory()->type##_map()); \
-  assembler->GotoIf(assembler->WordEqual(map, type##_map), &return_##type);
-  SIMD128_TYPES(SIMD128_BRANCH)
-#undef SIMD128_BRANCH
-
-  assembler->Assert(assembler->Word32Equal(
-      instance_type, assembler->Int32Constant(SYMBOL_TYPE)));
-  result_var.Bind(assembler->HeapConstant(
-      assembler->isolate()->factory()->symbol_string()));
-  assembler->Goto(&return_result);
-
-  assembler->Bind(&return_number);
-  {
-    result_var.Bind(assembler->HeapConstant(
-        assembler->isolate()->factory()->number_string()));
-    assembler->Goto(&return_result);
-  }
-
-  assembler->Bind(&if_oddball);
-  {
-    Node* type = assembler->LoadObjectField(value, Oddball::kTypeOfOffset);
-    result_var.Bind(type);
-    assembler->Goto(&return_result);
-  }
-
-  assembler->Bind(&return_function);
-  {
-    result_var.Bind(assembler->HeapConstant(
-        assembler->isolate()->factory()->function_string()));
-    assembler->Goto(&return_result);
-  }
-
-  assembler->Bind(&return_undefined);
-  {
-    result_var.Bind(assembler->HeapConstant(
-        assembler->isolate()->factory()->undefined_string()));
-    assembler->Goto(&return_result);
-  }
-
-  assembler->Bind(&return_object);
-  {
-    result_var.Bind(assembler->HeapConstant(
-        assembler->isolate()->factory()->object_string()));
-    assembler->Goto(&return_result);
-  }
-
-  assembler->Bind(&return_string);
-  {
-    result_var.Bind(assembler->HeapConstant(
-        assembler->isolate()->factory()->string_string()));
-    assembler->Goto(&return_result);
-  }
-
-#define SIMD128_BIND_RETURN(TYPE, Type, type, lane_count, lane_type) \
-  assembler->Bind(&return_##type);                                   \
-  {                                                                  \
-    result_var.Bind(assembler->HeapConstant(                         \
-        assembler->isolate()->factory()->type##_string()));          \
-    assembler->Goto(&return_result);                                 \
-  }
-  SIMD128_TYPES(SIMD128_BIND_RETURN)
-#undef SIMD128_BIND_RETURN
-
-  assembler->Bind(&return_result);
-  return result_var.value();
-}
-
-// static
-compiler::Node* InstanceOfStub::Generate(CodeStubAssembler* assembler,
-                                         compiler::Node* object,
-                                         compiler::Node* callable,
-                                         compiler::Node* context) {
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Label return_runtime(assembler, Label::kDeferred), end(assembler);
-  Variable result(assembler, MachineRepresentation::kTagged);
-
-  // Check if no one installed @@hasInstance somewhere.
-  assembler->GotoUnless(
-      assembler->WordEqual(
-          assembler->LoadObjectField(
-              assembler->LoadRoot(Heap::kHasInstanceProtectorRootIndex),
-              PropertyCell::kValueOffset),
-          assembler->SmiConstant(Smi::FromInt(Isolate::kArrayProtectorValid))),
-      &return_runtime);
-
-  // Check if {callable} is a valid receiver.
-  assembler->GotoIf(assembler->WordIsSmi(callable), &return_runtime);
-  assembler->GotoIf(
-      assembler->Word32Equal(
-          assembler->Word32And(
-              assembler->LoadMapBitField(assembler->LoadMap(callable)),
-              assembler->Int32Constant(1 << Map::kIsCallable)),
-          assembler->Int32Constant(0)),
-      &return_runtime);
-
-  // Use the inline OrdinaryHasInstance directly.
-  result.Bind(assembler->OrdinaryHasInstance(context, callable, object));
-  assembler->Goto(&end);
-
-  // TODO(bmeurer): Use GetPropertyStub here once available.
-  assembler->Bind(&return_runtime);
-  {
-    result.Bind(assembler->CallRuntime(Runtime::kInstanceOf, context, object,
-                                       callable));
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&end);
-  return result.value();
-}
-
-namespace {
-
-enum RelationalComparisonMode {
-  kLessThan,
-  kLessThanOrEqual,
-  kGreaterThan,
-  kGreaterThanOrEqual
-};
-
-compiler::Node* GenerateAbstractRelationalComparison(
-    CodeStubAssembler* assembler, RelationalComparisonMode mode,
-    compiler::Node* lhs, compiler::Node* rhs, compiler::Node* context) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Label return_true(assembler), return_false(assembler), end(assembler);
-  Variable result(assembler, MachineRepresentation::kTagged);
-
-  // Shared entry for floating point comparison.
-  Label do_fcmp(assembler);
-  Variable var_fcmp_lhs(assembler, MachineRepresentation::kFloat64),
-      var_fcmp_rhs(assembler, MachineRepresentation::kFloat64);
-
-  // We might need to loop several times due to ToPrimitive and/or ToNumber
-  // conversions.
-  Variable var_lhs(assembler, MachineRepresentation::kTagged),
-      var_rhs(assembler, MachineRepresentation::kTagged);
-  Variable* loop_vars[2] = {&var_lhs, &var_rhs};
-  Label loop(assembler, 2, loop_vars);
-  var_lhs.Bind(lhs);
-  var_rhs.Bind(rhs);
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
-  {
-    // Load the current {lhs} and {rhs} values.
-    lhs = var_lhs.value();
-    rhs = var_rhs.value();
-
-    // Check if the {lhs} is a Smi or a HeapObject.
-    Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
-    assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
-
-    assembler->Bind(&if_lhsissmi);
-    {
-      // Check if {rhs} is a Smi or a HeapObject.
-      Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-      assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
-                        &if_rhsisnotsmi);
-
-      assembler->Bind(&if_rhsissmi);
-      {
-        // Both {lhs} and {rhs} are Smi, so just perform a fast Smi comparison.
-        switch (mode) {
-          case kLessThan:
-            assembler->BranchIfSmiLessThan(lhs, rhs, &return_true,
-                                           &return_false);
-            break;
-          case kLessThanOrEqual:
-            assembler->BranchIfSmiLessThanOrEqual(lhs, rhs, &return_true,
-                                                  &return_false);
-            break;
-          case kGreaterThan:
-            assembler->BranchIfSmiLessThan(rhs, lhs, &return_true,
-                                           &return_false);
-            break;
-          case kGreaterThanOrEqual:
-            assembler->BranchIfSmiLessThanOrEqual(rhs, lhs, &return_true,
-                                                  &return_false);
-            break;
-        }
-      }
-
-      assembler->Bind(&if_rhsisnotsmi);
-      {
-        // Load the map of {rhs}.
-        Node* rhs_map = assembler->LoadMap(rhs);
-
-        // Check if the {rhs} is a HeapNumber.
-        Label if_rhsisnumber(assembler),
-            if_rhsisnotnumber(assembler, Label::kDeferred);
-        assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
-                          &if_rhsisnotnumber);
-
-        assembler->Bind(&if_rhsisnumber);
-        {
-          // Convert the {lhs} and {rhs} to floating point values, and
-          // perform a floating point comparison.
-          var_fcmp_lhs.Bind(assembler->SmiToFloat64(lhs));
-          var_fcmp_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
-          assembler->Goto(&do_fcmp);
-        }
-
-        assembler->Bind(&if_rhsisnotnumber);
-        {
-          // Convert the {rhs} to a Number; we don't need to perform the
-          // dedicated ToPrimitive(rhs, hint Number) operation, as the
-          // ToNumber(rhs) will by itself already invoke ToPrimitive with
-          // a Number hint.
-          Callable callable =
-              CodeFactory::NonNumberToNumber(assembler->isolate());
-          var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-          assembler->Goto(&loop);
-        }
-      }
-    }
-
-    assembler->Bind(&if_lhsisnotsmi);
-    {
-      // Load the HeapNumber map for later comparisons.
-      Node* number_map = assembler->HeapNumberMapConstant();
-
-      // Load the map of {lhs}.
-      Node* lhs_map = assembler->LoadMap(lhs);
-
-      // Check if {rhs} is a Smi or a HeapObject.
-      Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-      assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
-                        &if_rhsisnotsmi);
-
-      assembler->Bind(&if_rhsissmi);
-      {
-        // Check if the {lhs} is a HeapNumber.
-        Label if_lhsisnumber(assembler),
-            if_lhsisnotnumber(assembler, Label::kDeferred);
-        assembler->Branch(assembler->WordEqual(lhs_map, number_map),
-                          &if_lhsisnumber, &if_lhsisnotnumber);
-
-        assembler->Bind(&if_lhsisnumber);
-        {
-          // Convert the {lhs} and {rhs} to floating point values, and
-          // perform a floating point comparison.
-          var_fcmp_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
-          var_fcmp_rhs.Bind(assembler->SmiToFloat64(rhs));
-          assembler->Goto(&do_fcmp);
-        }
-
-        assembler->Bind(&if_lhsisnotnumber);
-        {
-          // Convert the {lhs} to a Number; we don't need to perform the
-          // dedicated ToPrimitive(lhs, hint Number) operation, as the
-          // ToNumber(lhs) will by itself already invoke ToPrimitive with
-          // a Number hint.
-          Callable callable =
-              CodeFactory::NonNumberToNumber(assembler->isolate());
-          var_lhs.Bind(assembler->CallStub(callable, context, lhs));
-          assembler->Goto(&loop);
-        }
-      }
-
-      assembler->Bind(&if_rhsisnotsmi);
-      {
-        // Load the map of {rhs}.
-        Node* rhs_map = assembler->LoadMap(rhs);
-
-        // Check if {lhs} is a HeapNumber.
-        Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
-        assembler->Branch(assembler->WordEqual(lhs_map, number_map),
-                          &if_lhsisnumber, &if_lhsisnotnumber);
-
-        assembler->Bind(&if_lhsisnumber);
-        {
-          // Check if {rhs} is also a HeapNumber.
-          Label if_rhsisnumber(assembler),
-              if_rhsisnotnumber(assembler, Label::kDeferred);
-          assembler->Branch(assembler->WordEqual(lhs_map, rhs_map),
-                            &if_rhsisnumber, &if_rhsisnotnumber);
-
-          assembler->Bind(&if_rhsisnumber);
-          {
-            // Convert the {lhs} and {rhs} to floating point values, and
-            // perform a floating point comparison.
-            var_fcmp_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
-            var_fcmp_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
-            assembler->Goto(&do_fcmp);
-          }
-
-          assembler->Bind(&if_rhsisnotnumber);
-          {
-            // Convert the {rhs} to a Number; we don't need to perform
-            // dedicated ToPrimitive(rhs, hint Number) operation, as the
-            // ToNumber(rhs) will by itself already invoke ToPrimitive with
-            // a Number hint.
-            Callable callable =
-                CodeFactory::NonNumberToNumber(assembler->isolate());
-            var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-            assembler->Goto(&loop);
-          }
-        }
-
-        assembler->Bind(&if_lhsisnotnumber);
-        {
-          // Load the instance type of {lhs}.
-          Node* lhs_instance_type = assembler->LoadMapInstanceType(lhs_map);
-
-          // Check if {lhs} is a String.
-          Label if_lhsisstring(assembler),
-              if_lhsisnotstring(assembler, Label::kDeferred);
-          assembler->Branch(assembler->IsStringInstanceType(lhs_instance_type),
-                            &if_lhsisstring, &if_lhsisnotstring);
-
-          assembler->Bind(&if_lhsisstring);
-          {
-            // Load the instance type of {rhs}.
-            Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
-
-            // Check if {rhs} is also a String.
-            Label if_rhsisstring(assembler, Label::kDeferred),
-                if_rhsisnotstring(assembler, Label::kDeferred);
-            assembler->Branch(
-                assembler->IsStringInstanceType(rhs_instance_type),
-                &if_rhsisstring, &if_rhsisnotstring);
-
-            assembler->Bind(&if_rhsisstring);
-            {
-              // Both {lhs} and {rhs} are strings.
-              switch (mode) {
-                case kLessThan:
-                  result.Bind(assembler->CallStub(
-                      CodeFactory::StringLessThan(assembler->isolate()),
-                      context, lhs, rhs));
-                  assembler->Goto(&end);
-                  break;
-                case kLessThanOrEqual:
-                  result.Bind(assembler->CallStub(
-                      CodeFactory::StringLessThanOrEqual(assembler->isolate()),
-                      context, lhs, rhs));
-                  assembler->Goto(&end);
-                  break;
-                case kGreaterThan:
-                  result.Bind(assembler->CallStub(
-                      CodeFactory::StringGreaterThan(assembler->isolate()),
-                      context, lhs, rhs));
-                  assembler->Goto(&end);
-                  break;
-                case kGreaterThanOrEqual:
-                  result.Bind(
-                      assembler->CallStub(CodeFactory::StringGreaterThanOrEqual(
-                                              assembler->isolate()),
-                                          context, lhs, rhs));
-                  assembler->Goto(&end);
-                  break;
-              }
-            }
-
-            assembler->Bind(&if_rhsisnotstring);
-            {
-              // The {lhs} is a String, while {rhs} is neither a Number nor a
-              // String, so we need to call ToPrimitive(rhs, hint Number) if
-              // {rhs} is a receiver or ToNumber(lhs) and ToNumber(rhs) in the
-              // other cases.
-              STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
-              Label if_rhsisreceiver(assembler, Label::kDeferred),
-                  if_rhsisnotreceiver(assembler, Label::kDeferred);
-              assembler->Branch(
-                  assembler->IsJSReceiverInstanceType(rhs_instance_type),
-                  &if_rhsisreceiver, &if_rhsisnotreceiver);
-
-              assembler->Bind(&if_rhsisreceiver);
-              {
-                // Convert {rhs} to a primitive first passing Number hint.
-                Callable callable = CodeFactory::NonPrimitiveToPrimitive(
-                    assembler->isolate(), ToPrimitiveHint::kNumber);
-                var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-                assembler->Goto(&loop);
-              }
-
-              assembler->Bind(&if_rhsisnotreceiver);
-              {
-                // Convert both {lhs} and {rhs} to Number.
-                Callable callable = CodeFactory::ToNumber(assembler->isolate());
-                var_lhs.Bind(assembler->CallStub(callable, context, lhs));
-                var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-                assembler->Goto(&loop);
-              }
-            }
-          }
-
-          assembler->Bind(&if_lhsisnotstring);
-          {
-            // The {lhs} is neither a Number nor a String, so we need to call
-            // ToPrimitive(lhs, hint Number) if {lhs} is a receiver or
-            // ToNumber(lhs) and ToNumber(rhs) in the other cases.
-            STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
-            Label if_lhsisreceiver(assembler, Label::kDeferred),
-                if_lhsisnotreceiver(assembler, Label::kDeferred);
-            assembler->Branch(
-                assembler->IsJSReceiverInstanceType(lhs_instance_type),
-                &if_lhsisreceiver, &if_lhsisnotreceiver);
-
-            assembler->Bind(&if_lhsisreceiver);
-            {
-              // Convert {lhs} to a primitive first passing Number hint.
-              Callable callable = CodeFactory::NonPrimitiveToPrimitive(
-                  assembler->isolate(), ToPrimitiveHint::kNumber);
-              var_lhs.Bind(assembler->CallStub(callable, context, lhs));
-              assembler->Goto(&loop);
-            }
-
-            assembler->Bind(&if_lhsisnotreceiver);
-            {
-              // Convert both {lhs} and {rhs} to Number.
-              Callable callable = CodeFactory::ToNumber(assembler->isolate());
-              var_lhs.Bind(assembler->CallStub(callable, context, lhs));
-              var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-              assembler->Goto(&loop);
-            }
-          }
-        }
-      }
-    }
-  }
-
-  assembler->Bind(&do_fcmp);
-  {
-    // Load the {lhs} and {rhs} floating point values.
-    Node* lhs = var_fcmp_lhs.value();
-    Node* rhs = var_fcmp_rhs.value();
-
-    // Perform a fast floating point comparison.
-    switch (mode) {
-      case kLessThan:
-        assembler->BranchIfFloat64LessThan(lhs, rhs, &return_true,
-                                           &return_false);
-        break;
-      case kLessThanOrEqual:
-        assembler->BranchIfFloat64LessThanOrEqual(lhs, rhs, &return_true,
-                                                  &return_false);
-        break;
-      case kGreaterThan:
-        assembler->BranchIfFloat64GreaterThan(lhs, rhs, &return_true,
-                                              &return_false);
-        break;
-      case kGreaterThanOrEqual:
-        assembler->BranchIfFloat64GreaterThanOrEqual(lhs, rhs, &return_true,
-                                                     &return_false);
-        break;
-    }
-  }
-
-  assembler->Bind(&return_true);
-  {
-    result.Bind(assembler->BooleanConstant(true));
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&return_false);
-  {
-    result.Bind(assembler->BooleanConstant(false));
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&end);
-  return result.value();
-}
-
-enum ResultMode { kDontNegateResult, kNegateResult };
-
-void GenerateEqual_Same(CodeStubAssembler* assembler, compiler::Node* value,
-                        CodeStubAssembler::Label* if_equal,
-                        CodeStubAssembler::Label* if_notequal) {
-  // In case of abstract or strict equality checks, we need additional checks
-  // for NaN values because they are not considered equal, even if both the
-  // left and the right hand side reference exactly the same value.
-  // TODO(bmeurer): This seems to violate the SIMD.js specification, but it
-  // seems to be what is tested in the current SIMD.js testsuite.
-
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-
-  // Check if {value} is a Smi or a HeapObject.
-  Label if_valueissmi(assembler), if_valueisnotsmi(assembler);
-  assembler->Branch(assembler->WordIsSmi(value), &if_valueissmi,
-                    &if_valueisnotsmi);
-
-  assembler->Bind(&if_valueisnotsmi);
-  {
-    // Load the map of {value}.
-    Node* value_map = assembler->LoadMap(value);
-
-    // Check if {value} (and therefore {rhs}) is a HeapNumber.
-    Label if_valueisnumber(assembler), if_valueisnotnumber(assembler);
-    assembler->Branch(assembler->IsHeapNumberMap(value_map), &if_valueisnumber,
-                      &if_valueisnotnumber);
-
-    assembler->Bind(&if_valueisnumber);
-    {
-      // Convert {value} (and therefore {rhs}) to floating point value.
-      Node* value_value = assembler->LoadHeapNumberValue(value);
-
-      // Check if the HeapNumber value is a NaN.
-      assembler->BranchIfFloat64IsNaN(value_value, if_notequal, if_equal);
-    }
-
-    assembler->Bind(&if_valueisnotnumber);
-    assembler->Goto(if_equal);
-  }
-
-  assembler->Bind(&if_valueissmi);
-  assembler->Goto(if_equal);
-}
-
-void GenerateEqual_Simd128Value_HeapObject(
-    CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* lhs_map,
-    compiler::Node* rhs, compiler::Node* rhs_map,
-    CodeStubAssembler::Label* if_equal, CodeStubAssembler::Label* if_notequal) {
-  assembler->BranchIfSimd128Equal(lhs, lhs_map, rhs, rhs_map, if_equal,
-                                  if_notequal);
-}
-
-// ES6 section 7.2.12 Abstract Equality Comparison
-compiler::Node* GenerateEqual(CodeStubAssembler* assembler, ResultMode mode,
-                              compiler::Node* lhs, compiler::Node* rhs,
-                              compiler::Node* context) {
-  // This is a slightly optimized version of Object::Equals represented as
-  // scheduled TurboFan graph utilizing the CodeStubAssembler. Whenever you
-  // change something functionality wise in here, remember to update the
-  // Object::Equals method as well.
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Label if_equal(assembler), if_notequal(assembler),
-      do_rhsstringtonumber(assembler, Label::kDeferred), end(assembler);
-  Variable result(assembler, MachineRepresentation::kTagged);
-
-  // Shared entry for floating point comparison.
-  Label do_fcmp(assembler);
-  Variable var_fcmp_lhs(assembler, MachineRepresentation::kFloat64),
-      var_fcmp_rhs(assembler, MachineRepresentation::kFloat64);
-
-  // We might need to loop several times due to ToPrimitive and/or ToNumber
-  // conversions.
-  Variable var_lhs(assembler, MachineRepresentation::kTagged),
-      var_rhs(assembler, MachineRepresentation::kTagged);
-  Variable* loop_vars[2] = {&var_lhs, &var_rhs};
-  Label loop(assembler, 2, loop_vars);
-  var_lhs.Bind(lhs);
-  var_rhs.Bind(rhs);
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
-  {
-    // Load the current {lhs} and {rhs} values.
-    lhs = var_lhs.value();
-    rhs = var_rhs.value();
-
-    // Check if {lhs} and {rhs} refer to the same object.
-    Label if_same(assembler), if_notsame(assembler);
-    assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
-
-    assembler->Bind(&if_same);
-    {
-      // The {lhs} and {rhs} reference the exact same value, yet we need special
-      // treatment for HeapNumber, as NaN is not equal to NaN.
-      GenerateEqual_Same(assembler, lhs, &if_equal, &if_notequal);
-    }
-
-    assembler->Bind(&if_notsame);
-    {
-      // Check if {lhs} is a Smi or a HeapObject.
-      Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
-      assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi,
-                        &if_lhsisnotsmi);
-
-      assembler->Bind(&if_lhsissmi);
-      {
-        // Check if {rhs} is a Smi or a HeapObject.
-        Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-        assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
-                          &if_rhsisnotsmi);
-
-        assembler->Bind(&if_rhsissmi);
-        // We have already checked for {lhs} and {rhs} being the same value, so
-        // if both are Smis when we get here they must not be equal.
-        assembler->Goto(&if_notequal);
-
-        assembler->Bind(&if_rhsisnotsmi);
-        {
-          // Load the map of {rhs}.
-          Node* rhs_map = assembler->LoadMap(rhs);
-
-          // Check if {rhs} is a HeapNumber.
-          Node* number_map = assembler->HeapNumberMapConstant();
-          Label if_rhsisnumber(assembler), if_rhsisnotnumber(assembler);
-          assembler->Branch(assembler->WordEqual(rhs_map, number_map),
-                            &if_rhsisnumber, &if_rhsisnotnumber);
-
-          assembler->Bind(&if_rhsisnumber);
-          {
-            // Convert {lhs} and {rhs} to floating point values, and
-            // perform a floating point comparison.
-            var_fcmp_lhs.Bind(assembler->SmiToFloat64(lhs));
-            var_fcmp_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
-            assembler->Goto(&do_fcmp);
-          }
-
-          assembler->Bind(&if_rhsisnotnumber);
-          {
-            // Load the instance type of the {rhs}.
-            Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
-
-            // Check if the {rhs} is a String.
-            Label if_rhsisstring(assembler, Label::kDeferred),
-                if_rhsisnotstring(assembler);
-            assembler->Branch(
-                assembler->IsStringInstanceType(rhs_instance_type),
-                &if_rhsisstring, &if_rhsisnotstring);
-
-            assembler->Bind(&if_rhsisstring);
-            {
-              // The {rhs} is a String and the {lhs} is a Smi; we need
-              // to convert the {rhs} to a Number and compare the output to
-              // the Number on the {lhs}.
-              assembler->Goto(&do_rhsstringtonumber);
-            }
-
-            assembler->Bind(&if_rhsisnotstring);
-            {
-              // Check if the {rhs} is a Boolean.
-              Label if_rhsisboolean(assembler), if_rhsisnotboolean(assembler);
-              assembler->Branch(assembler->IsBooleanMap(rhs_map),
-                                &if_rhsisboolean, &if_rhsisnotboolean);
-
-              assembler->Bind(&if_rhsisboolean);
-              {
-                // The {rhs} is a Boolean, load its number value.
-                var_rhs.Bind(
-                    assembler->LoadObjectField(rhs, Oddball::kToNumberOffset));
-                assembler->Goto(&loop);
-              }
-
-              assembler->Bind(&if_rhsisnotboolean);
-              {
-                // Check if the {rhs} is a Receiver.
-                STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
-                Label if_rhsisreceiver(assembler, Label::kDeferred),
-                    if_rhsisnotreceiver(assembler);
-                assembler->Branch(
-                    assembler->IsJSReceiverInstanceType(rhs_instance_type),
-                    &if_rhsisreceiver, &if_rhsisnotreceiver);
-
-                assembler->Bind(&if_rhsisreceiver);
-                {
-                  // Convert {rhs} to a primitive first (passing no hint).
-                  Callable callable = CodeFactory::NonPrimitiveToPrimitive(
-                      assembler->isolate());
-                  var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-                  assembler->Goto(&loop);
-                }
-
-                assembler->Bind(&if_rhsisnotreceiver);
-                assembler->Goto(&if_notequal);
-              }
-            }
-          }
-        }
-      }
-
-      assembler->Bind(&if_lhsisnotsmi);
-      {
-        // Check if {rhs} is a Smi or a HeapObject.
-        Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-        assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
-                          &if_rhsisnotsmi);
-
-        assembler->Bind(&if_rhsissmi);
-        {
-          // The {lhs} is a HeapObject and the {rhs} is a Smi; swapping {lhs}
-          // and {rhs} is not observable and doesn't matter for the result, so
-          // we can just swap them and use the Smi handling above (for {lhs}
-          // being a Smi).
-          var_lhs.Bind(rhs);
-          var_rhs.Bind(lhs);
-          assembler->Goto(&loop);
-        }
-
-        assembler->Bind(&if_rhsisnotsmi);
-        {
-          Label if_lhsisstring(assembler), if_lhsisnumber(assembler),
-              if_lhsissymbol(assembler), if_lhsissimd128value(assembler),
-              if_lhsisoddball(assembler), if_lhsisreceiver(assembler);
-
-          // Both {lhs} and {rhs} are HeapObjects, load their maps
-          // and their instance types.
-          Node* lhs_map = assembler->LoadMap(lhs);
-          Node* rhs_map = assembler->LoadMap(rhs);
-
-          // Load the instance types of {lhs} and {rhs}.
-          Node* lhs_instance_type = assembler->LoadMapInstanceType(lhs_map);
-          Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
-
-          // Dispatch based on the instance type of {lhs}.
-          size_t const kNumCases = FIRST_NONSTRING_TYPE + 4;
-          Label* case_labels[kNumCases];
-          int32_t case_values[kNumCases];
-          for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
-            case_labels[i] = new Label(assembler);
-            case_values[i] = i;
-          }
-          case_labels[FIRST_NONSTRING_TYPE + 0] = &if_lhsisnumber;
-          case_values[FIRST_NONSTRING_TYPE + 0] = HEAP_NUMBER_TYPE;
-          case_labels[FIRST_NONSTRING_TYPE + 1] = &if_lhsissymbol;
-          case_values[FIRST_NONSTRING_TYPE + 1] = SYMBOL_TYPE;
-          case_labels[FIRST_NONSTRING_TYPE + 2] = &if_lhsissimd128value;
-          case_values[FIRST_NONSTRING_TYPE + 2] = SIMD128_VALUE_TYPE;
-          case_labels[FIRST_NONSTRING_TYPE + 3] = &if_lhsisoddball;
-          case_values[FIRST_NONSTRING_TYPE + 3] = ODDBALL_TYPE;
-          assembler->Switch(lhs_instance_type, &if_lhsisreceiver, case_values,
-                            case_labels, arraysize(case_values));
-          for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
-            assembler->Bind(case_labels[i]);
-            assembler->Goto(&if_lhsisstring);
-            delete case_labels[i];
-          }
-
-          assembler->Bind(&if_lhsisstring);
-          {
-            // Check if {rhs} is also a String.
-            Label if_rhsisstring(assembler, Label::kDeferred),
-                if_rhsisnotstring(assembler);
-            assembler->Branch(
-                assembler->IsStringInstanceType(rhs_instance_type),
-                &if_rhsisstring, &if_rhsisnotstring);
-
-            assembler->Bind(&if_rhsisstring);
-            {
-              // Both {lhs} and {rhs} are of type String, just do the
-              // string comparison then.
-              Callable callable =
-                  (mode == kDontNegateResult)
-                      ? CodeFactory::StringEqual(assembler->isolate())
-                      : CodeFactory::StringNotEqual(assembler->isolate());
-              result.Bind(assembler->CallStub(callable, context, lhs, rhs));
-              assembler->Goto(&end);
-            }
-
-            assembler->Bind(&if_rhsisnotstring);
-            {
-              // The {lhs} is a String and the {rhs} is some other HeapObject.
-              // Swapping {lhs} and {rhs} is not observable and doesn't matter
-              // for the result, so we can just swap them and use the String
-              // handling below (for {rhs} being a String).
-              var_lhs.Bind(rhs);
-              var_rhs.Bind(lhs);
-              assembler->Goto(&loop);
-            }
-          }
-
-          assembler->Bind(&if_lhsisnumber);
-          {
-            // Check if {rhs} is also a HeapNumber.
-            Label if_rhsisnumber(assembler), if_rhsisnotnumber(assembler);
-            assembler->Branch(
-                assembler->Word32Equal(lhs_instance_type, rhs_instance_type),
-                &if_rhsisnumber, &if_rhsisnotnumber);
-
-            assembler->Bind(&if_rhsisnumber);
-            {
-              // Convert {lhs} and {rhs} to floating point values, and
-              // perform a floating point comparison.
-              var_fcmp_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
-              var_fcmp_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
-              assembler->Goto(&do_fcmp);
-            }
-
-            assembler->Bind(&if_rhsisnotnumber);
-            {
-              // The {lhs} is a Number, the {rhs} is some other HeapObject.
-              Label if_rhsisstring(assembler, Label::kDeferred),
-                  if_rhsisnotstring(assembler);
-              assembler->Branch(
-                  assembler->IsStringInstanceType(rhs_instance_type),
-                  &if_rhsisstring, &if_rhsisnotstring);
-
-              assembler->Bind(&if_rhsisstring);
-              {
-                // The {rhs} is a String and the {lhs} is a HeapNumber; we need
-                // to convert the {rhs} to a Number and compare the output to
-                // the Number on the {lhs}.
-                assembler->Goto(&do_rhsstringtonumber);
-              }
-
-              assembler->Bind(&if_rhsisnotstring);
-              {
-                // Check if the {rhs} is a JSReceiver.
-                Label if_rhsisreceiver(assembler),
-                    if_rhsisnotreceiver(assembler);
-                STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-                assembler->Branch(
-                    assembler->IsJSReceiverInstanceType(rhs_instance_type),
-                    &if_rhsisreceiver, &if_rhsisnotreceiver);
-
-                assembler->Bind(&if_rhsisreceiver);
-                {
-                  // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
-                  // Swapping {lhs} and {rhs} is not observable and doesn't
-                  // matter for the result, so we can just swap them and use
-                  // the JSReceiver handling below (for {lhs} being a
-                  // JSReceiver).
-                  var_lhs.Bind(rhs);
-                  var_rhs.Bind(lhs);
-                  assembler->Goto(&loop);
-                }
-
-                assembler->Bind(&if_rhsisnotreceiver);
-                {
-                  // Check if {rhs} is a Boolean.
-                  Label if_rhsisboolean(assembler),
-                      if_rhsisnotboolean(assembler);
-                  assembler->Branch(assembler->IsBooleanMap(rhs_map),
-                                    &if_rhsisboolean, &if_rhsisnotboolean);
-
-                  assembler->Bind(&if_rhsisboolean);
-                  {
-                    // The {rhs} is a Boolean, convert it to a Smi first.
-                    var_rhs.Bind(assembler->LoadObjectField(
-                        rhs, Oddball::kToNumberOffset));
-                    assembler->Goto(&loop);
-                  }
-
-                  assembler->Bind(&if_rhsisnotboolean);
-                  assembler->Goto(&if_notequal);
-                }
-              }
-            }
-          }
-
-          assembler->Bind(&if_lhsisoddball);
-          {
-            // The {lhs} is an Oddball and {rhs} is some other HeapObject.
-            Label if_lhsisboolean(assembler), if_lhsisnotboolean(assembler);
-            Node* boolean_map = assembler->BooleanMapConstant();
-            assembler->Branch(assembler->WordEqual(lhs_map, boolean_map),
-                              &if_lhsisboolean, &if_lhsisnotboolean);
-
-            assembler->Bind(&if_lhsisboolean);
-            {
-              // The {lhs} is a Boolean, check if {rhs} is also a Boolean.
-              Label if_rhsisboolean(assembler), if_rhsisnotboolean(assembler);
-              assembler->Branch(assembler->WordEqual(rhs_map, boolean_map),
-                                &if_rhsisboolean, &if_rhsisnotboolean);
-
-              assembler->Bind(&if_rhsisboolean);
-              {
-                // Both {lhs} and {rhs} are distinct Boolean values.
-                assembler->Goto(&if_notequal);
-              }
-
-              assembler->Bind(&if_rhsisnotboolean);
-              {
-                // Convert the {lhs} to a Number first.
-                var_lhs.Bind(
-                    assembler->LoadObjectField(lhs, Oddball::kToNumberOffset));
-                assembler->Goto(&loop);
-              }
-            }
-
-            assembler->Bind(&if_lhsisnotboolean);
-            {
-              // The {lhs} is either Null or Undefined; check if the {rhs} is
-              // undetectable (i.e. either also Null or Undefined or some
-              // undetectable JSReceiver).
-              Node* rhs_bitfield = assembler->LoadMapBitField(rhs_map);
-              assembler->BranchIfWord32Equal(
-                  assembler->Word32And(
-                      rhs_bitfield,
-                      assembler->Int32Constant(1 << Map::kIsUndetectable)),
-                  assembler->Int32Constant(0), &if_notequal, &if_equal);
-            }
-          }
-
-          assembler->Bind(&if_lhsissymbol);
-          {
-            // Check if the {rhs} is a JSReceiver.
-            Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
-            STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-            assembler->Branch(
-                assembler->IsJSReceiverInstanceType(rhs_instance_type),
-                &if_rhsisreceiver, &if_rhsisnotreceiver);
-
-            assembler->Bind(&if_rhsisreceiver);
-            {
-              // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
-              // Swapping {lhs} and {rhs} is not observable and doesn't
-              // matter for the result, so we can just swap them and use
-              // the JSReceiver handling below (for {lhs} being a JSReceiver).
-              var_lhs.Bind(rhs);
-              var_rhs.Bind(lhs);
-              assembler->Goto(&loop);
-            }
-
-            assembler->Bind(&if_rhsisnotreceiver);
-            {
-              // The {rhs} is not a JSReceiver and also not the same Symbol
-              // as the {lhs}, so this is equality check is considered false.
-              assembler->Goto(&if_notequal);
-            }
-          }
-
-          assembler->Bind(&if_lhsissimd128value);
-          {
-            // Check if the {rhs} is also a Simd128Value.
-            Label if_rhsissimd128value(assembler),
-                if_rhsisnotsimd128value(assembler);
-            assembler->Branch(
-                assembler->Word32Equal(lhs_instance_type, rhs_instance_type),
-                &if_rhsissimd128value, &if_rhsisnotsimd128value);
-
-            assembler->Bind(&if_rhsissimd128value);
-            {
-              // Both {lhs} and {rhs} is a Simd128Value.
-              GenerateEqual_Simd128Value_HeapObject(assembler, lhs, lhs_map,
-                                                    rhs, rhs_map, &if_equal,
-                                                    &if_notequal);
-            }
-
-            assembler->Bind(&if_rhsisnotsimd128value);
-            {
-              // Check if the {rhs} is a JSReceiver.
-              Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
-              STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-              assembler->Branch(
-                  assembler->IsJSReceiverInstanceType(rhs_instance_type),
-                  &if_rhsisreceiver, &if_rhsisnotreceiver);
-
-              assembler->Bind(&if_rhsisreceiver);
-              {
-                // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
-                // Swapping {lhs} and {rhs} is not observable and doesn't
-                // matter for the result, so we can just swap them and use
-                // the JSReceiver handling below (for {lhs} being a JSReceiver).
-                var_lhs.Bind(rhs);
-                var_rhs.Bind(lhs);
-                assembler->Goto(&loop);
-              }
-
-              assembler->Bind(&if_rhsisnotreceiver);
-              {
-                // The {rhs} is some other Primitive.
-                assembler->Goto(&if_notequal);
-              }
-            }
-          }
-
-          assembler->Bind(&if_lhsisreceiver);
-          {
-            // Check if the {rhs} is also a JSReceiver.
-            Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
-            STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-            assembler->Branch(
-                assembler->IsJSReceiverInstanceType(rhs_instance_type),
-                &if_rhsisreceiver, &if_rhsisnotreceiver);
-
-            assembler->Bind(&if_rhsisreceiver);
-            {
-              // Both {lhs} and {rhs} are different JSReceiver references, so
-              // this cannot be considered equal.
-              assembler->Goto(&if_notequal);
-            }
-
-            assembler->Bind(&if_rhsisnotreceiver);
-            {
-              // Check if {rhs} is Null or Undefined (an undetectable check
-              // is sufficient here, since we already know that {rhs} is not
-              // a JSReceiver).
-              Label if_rhsisundetectable(assembler),
-                  if_rhsisnotundetectable(assembler, Label::kDeferred);
-              Node* rhs_bitfield = assembler->LoadMapBitField(rhs_map);
-              assembler->BranchIfWord32Equal(
-                  assembler->Word32And(
-                      rhs_bitfield,
-                      assembler->Int32Constant(1 << Map::kIsUndetectable)),
-                  assembler->Int32Constant(0), &if_rhsisnotundetectable,
-                  &if_rhsisundetectable);
-
-              assembler->Bind(&if_rhsisundetectable);
-              {
-                // Check if {lhs} is an undetectable JSReceiver.
-                Node* lhs_bitfield = assembler->LoadMapBitField(lhs_map);
-                assembler->BranchIfWord32Equal(
-                    assembler->Word32And(
-                        lhs_bitfield,
-                        assembler->Int32Constant(1 << Map::kIsUndetectable)),
-                    assembler->Int32Constant(0), &if_notequal, &if_equal);
-              }
-
-              assembler->Bind(&if_rhsisnotundetectable);
-              {
-                // The {rhs} is some Primitive different from Null and
-                // Undefined, need to convert {lhs} to Primitive first.
-                Callable callable =
-                    CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
-                var_lhs.Bind(assembler->CallStub(callable, context, lhs));
-                assembler->Goto(&loop);
-              }
-            }
-          }
-        }
-      }
-    }
-
-    assembler->Bind(&do_rhsstringtonumber);
-    {
-      Callable callable = CodeFactory::StringToNumber(assembler->isolate());
-      var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-      assembler->Goto(&loop);
-    }
-  }
-
-  assembler->Bind(&do_fcmp);
-  {
-    // Load the {lhs} and {rhs} floating point values.
-    Node* lhs = var_fcmp_lhs.value();
-    Node* rhs = var_fcmp_rhs.value();
-
-    // Perform a fast floating point comparison.
-    assembler->BranchIfFloat64Equal(lhs, rhs, &if_equal, &if_notequal);
-  }
-
-  assembler->Bind(&if_equal);
-  {
-    result.Bind(assembler->BooleanConstant(mode == kDontNegateResult));
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&if_notequal);
-  {
-    result.Bind(assembler->BooleanConstant(mode == kNegateResult));
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&end);
-  return result.value();
-}
-
-compiler::Node* GenerateStrictEqual(CodeStubAssembler* assembler,
-                                    ResultMode mode, compiler::Node* lhs,
-                                    compiler::Node* rhs,
-                                    compiler::Node* context) {
-  // Here's pseudo-code for the algorithm below in case of kDontNegateResult
-  // mode; for kNegateResult mode we properly negate the result.
-  //
-  // if (lhs == rhs) {
-  //   if (lhs->IsHeapNumber()) return HeapNumber::cast(lhs)->value() != NaN;
-  //   return true;
-  // }
-  // if (!lhs->IsSmi()) {
-  //   if (lhs->IsHeapNumber()) {
-  //     if (rhs->IsSmi()) {
-  //       return Smi::cast(rhs)->value() == HeapNumber::cast(lhs)->value();
-  //     } else if (rhs->IsHeapNumber()) {
-  //       return HeapNumber::cast(rhs)->value() ==
-  //       HeapNumber::cast(lhs)->value();
-  //     } else {
-  //       return false;
-  //     }
-  //   } else {
-  //     if (rhs->IsSmi()) {
-  //       return false;
-  //     } else {
-  //       if (lhs->IsString()) {
-  //         if (rhs->IsString()) {
-  //           return %StringEqual(lhs, rhs);
-  //         } else {
-  //           return false;
-  //         }
-  //       } else if (lhs->IsSimd128()) {
-  //         if (rhs->IsSimd128()) {
-  //           return %StrictEqual(lhs, rhs);
-  //         }
-  //       } else {
-  //         return false;
-  //       }
-  //     }
-  //   }
-  // } else {
-  //   if (rhs->IsSmi()) {
-  //     return false;
-  //   } else {
-  //     if (rhs->IsHeapNumber()) {
-  //       return Smi::cast(lhs)->value() == HeapNumber::cast(rhs)->value();
-  //     } else {
-  //       return false;
-  //     }
-  //   }
-  // }
-
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
-  typedef compiler::Node Node;
-
-  Label if_equal(assembler), if_notequal(assembler), end(assembler);
-  Variable result(assembler, MachineRepresentation::kTagged);
-
-  // Check if {lhs} and {rhs} refer to the same object.
-  Label if_same(assembler), if_notsame(assembler);
-  assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
-
-  assembler->Bind(&if_same);
-  {
-    // The {lhs} and {rhs} reference the exact same value, yet we need special
-    // treatment for HeapNumber, as NaN is not equal to NaN.
-    GenerateEqual_Same(assembler, lhs, &if_equal, &if_notequal);
-  }
-
-  assembler->Bind(&if_notsame);
-  {
-    // The {lhs} and {rhs} reference different objects, yet for Smi, HeapNumber,
-    // String and Simd128Value they can still be considered equal.
-    Node* number_map = assembler->HeapNumberMapConstant();
-
-    // Check if {lhs} is a Smi or a HeapObject.
-    Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
-    assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
-
-    assembler->Bind(&if_lhsisnotsmi);
-    {
-      // Load the map of {lhs}.
-      Node* lhs_map = assembler->LoadMap(lhs);
-
-      // Check if {lhs} is a HeapNumber.
-      Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
-      assembler->Branch(assembler->WordEqual(lhs_map, number_map),
-                        &if_lhsisnumber, &if_lhsisnotnumber);
-
-      assembler->Bind(&if_lhsisnumber);
-      {
-        // Check if {rhs} is a Smi or a HeapObject.
-        Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-        assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
-                          &if_rhsisnotsmi);
-
-        assembler->Bind(&if_rhsissmi);
-        {
-          // Convert {lhs} and {rhs} to floating point values.
-          Node* lhs_value = assembler->LoadHeapNumberValue(lhs);
-          Node* rhs_value = assembler->SmiToFloat64(rhs);
-
-          // Perform a floating point comparison of {lhs} and {rhs}.
-          assembler->BranchIfFloat64Equal(lhs_value, rhs_value, &if_equal,
-                                          &if_notequal);
-        }
-
-        assembler->Bind(&if_rhsisnotsmi);
-        {
-          // Load the map of {rhs}.
-          Node* rhs_map = assembler->LoadMap(rhs);
-
-          // Check if {rhs} is also a HeapNumber.
-          Label if_rhsisnumber(assembler), if_rhsisnotnumber(assembler);
-          assembler->Branch(assembler->WordEqual(rhs_map, number_map),
-                            &if_rhsisnumber, &if_rhsisnotnumber);
-
-          assembler->Bind(&if_rhsisnumber);
-          {
-            // Convert {lhs} and {rhs} to floating point values.
-            Node* lhs_value = assembler->LoadHeapNumberValue(lhs);
-            Node* rhs_value = assembler->LoadHeapNumberValue(rhs);
-
-            // Perform a floating point comparison of {lhs} and {rhs}.
-            assembler->BranchIfFloat64Equal(lhs_value, rhs_value, &if_equal,
-                                            &if_notequal);
-          }
-
-          assembler->Bind(&if_rhsisnotnumber);
-          assembler->Goto(&if_notequal);
-        }
-      }
-
-      assembler->Bind(&if_lhsisnotnumber);
-      {
-        // Check if {rhs} is a Smi or a HeapObject.
-        Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-        assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
-                          &if_rhsisnotsmi);
-
-        assembler->Bind(&if_rhsissmi);
-        assembler->Goto(&if_notequal);
-
-        assembler->Bind(&if_rhsisnotsmi);
-        {
-          // Load the instance type of {lhs}.
-          Node* lhs_instance_type = assembler->LoadMapInstanceType(lhs_map);
-
-          // Check if {lhs} is a String.
-          Label if_lhsisstring(assembler), if_lhsisnotstring(assembler);
-          assembler->Branch(assembler->IsStringInstanceType(lhs_instance_type),
-                            &if_lhsisstring, &if_lhsisnotstring);
-
-          assembler->Bind(&if_lhsisstring);
-          {
-            // Load the instance type of {rhs}.
-            Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
-
-            // Check if {rhs} is also a String.
-            Label if_rhsisstring(assembler, Label::kDeferred),
-                if_rhsisnotstring(assembler);
-            assembler->Branch(
-                assembler->IsStringInstanceType(rhs_instance_type),
-                &if_rhsisstring, &if_rhsisnotstring);
-
-            assembler->Bind(&if_rhsisstring);
-            {
-              Callable callable =
-                  (mode == kDontNegateResult)
-                      ? CodeFactory::StringEqual(assembler->isolate())
-                      : CodeFactory::StringNotEqual(assembler->isolate());
-              result.Bind(assembler->CallStub(callable, context, lhs, rhs));
-              assembler->Goto(&end);
-            }
-
-            assembler->Bind(&if_rhsisnotstring);
-            assembler->Goto(&if_notequal);
-          }
-
-          assembler->Bind(&if_lhsisnotstring);
-          {
-            // Check if {lhs} is a Simd128Value.
-            Label if_lhsissimd128value(assembler),
-                if_lhsisnotsimd128value(assembler);
-            assembler->Branch(assembler->Word32Equal(
-                                  lhs_instance_type,
-                                  assembler->Int32Constant(SIMD128_VALUE_TYPE)),
-                              &if_lhsissimd128value, &if_lhsisnotsimd128value);
-
-            assembler->Bind(&if_lhsissimd128value);
-            {
-              // Load the map of {rhs}.
-              Node* rhs_map = assembler->LoadMap(rhs);
-
-              // Check if {rhs} is also a Simd128Value that is equal to {lhs}.
-              GenerateEqual_Simd128Value_HeapObject(assembler, lhs, lhs_map,
-                                                    rhs, rhs_map, &if_equal,
-                                                    &if_notequal);
-            }
-
-            assembler->Bind(&if_lhsisnotsimd128value);
-            assembler->Goto(&if_notequal);
-          }
-        }
-      }
-    }
-
-    assembler->Bind(&if_lhsissmi);
-    {
-      // We already know that {lhs} and {rhs} are not reference equal, and {lhs}
-      // is a Smi; so {lhs} and {rhs} can only be strictly equal if {rhs} is a
-      // HeapNumber with an equal floating point value.
-
-      // Check if {rhs} is a Smi or a HeapObject.
-      Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-      assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
-                        &if_rhsisnotsmi);
-
-      assembler->Bind(&if_rhsissmi);
-      assembler->Goto(&if_notequal);
-
-      assembler->Bind(&if_rhsisnotsmi);
-      {
-        // Load the map of the {rhs}.
-        Node* rhs_map = assembler->LoadMap(rhs);
-
-        // The {rhs} could be a HeapNumber with the same value as {lhs}.
-        Label if_rhsisnumber(assembler), if_rhsisnotnumber(assembler);
-        assembler->Branch(assembler->WordEqual(rhs_map, number_map),
-                          &if_rhsisnumber, &if_rhsisnotnumber);
-
-        assembler->Bind(&if_rhsisnumber);
-        {
-          // Convert {lhs} and {rhs} to floating point values.
-          Node* lhs_value = assembler->SmiToFloat64(lhs);
-          Node* rhs_value = assembler->LoadHeapNumberValue(rhs);
-
-          // Perform a floating point comparison of {lhs} and {rhs}.
-          assembler->BranchIfFloat64Equal(lhs_value, rhs_value, &if_equal,
-                                          &if_notequal);
-        }
-
-        assembler->Bind(&if_rhsisnotnumber);
-        assembler->Goto(&if_notequal);
-      }
-    }
-  }
-
-  assembler->Bind(&if_equal);
-  {
-    result.Bind(assembler->BooleanConstant(mode == kDontNegateResult));
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&if_notequal);
-  {
-    result.Bind(assembler->BooleanConstant(mode == kNegateResult));
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&end);
-  return result.value();
-}
-
-}  // namespace
-
 void LoadApiGetterStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   typedef compiler::Node Node;
   Node* context = assembler->Parameter(Descriptor::kContext);
@@ -4244,7 +2029,7 @@
     Node* global = assembler->LoadObjectField(proxy_map, Map::kPrototypeOffset);
     Node* map_cell = assembler->HeapConstant(isolate()->factory()->NewWeakCell(
         StoreGlobalStub::global_map_placeholder(isolate())));
-    Node* expected_map = assembler->LoadWeakCellValue(map_cell);
+    Node* expected_map = assembler->LoadWeakCellValueUnchecked(map_cell);
     Node* map = assembler->LoadMap(global);
     assembler->GotoIf(assembler->WordNotEqual(expected_map, map), &miss);
   }
@@ -4252,7 +2037,7 @@
   Node* weak_cell = assembler->HeapConstant(isolate()->factory()->NewWeakCell(
       StoreGlobalStub::property_cell_placeholder(isolate())));
   Node* cell = assembler->LoadWeakCellValue(weak_cell);
-  assembler->GotoIf(assembler->WordIsSmi(cell), &miss);
+  assembler->GotoIf(assembler->TaggedIsSmi(cell), &miss);
 
   // Load the payload of the global parameter cell. A hole indicates that the
   // cell has been invalidated and that the store must be handled by the
@@ -4274,7 +2059,7 @@
     if (cell_type == PropertyCellType::kConstantType) {
       switch (constant_type()) {
         case PropertyCellConstantType::kSmi:
-          assembler->GotoUnless(assembler->WordIsSmi(value), &miss);
+          assembler->GotoUnless(assembler->TaggedIsSmi(value), &miss);
           value_is_smi = true;
           break;
         case PropertyCellConstantType::kStableMap: {
@@ -4283,8 +2068,8 @@
           // are the maps that were originally in the cell or not. If optimized
           // code will deopt when a cell has a unstable map and if it has a
           // dependency on a stable map, it will deopt if the map destabilizes.
-          assembler->GotoIf(assembler->WordIsSmi(value), &miss);
-          assembler->GotoIf(assembler->WordIsSmi(cell_contents), &miss);
+          assembler->GotoIf(assembler->TaggedIsSmi(value), &miss);
+          assembler->GotoIf(assembler->TaggedIsSmi(cell_contents), &miss);
           Node* expected_map = assembler->LoadMap(cell_contents);
           Node* map = assembler->LoadMap(value);
           assembler->GotoIf(assembler->WordNotEqual(expected_map, map), &miss);
@@ -4392,155 +2177,6 @@
   assembler->Return(value);
 }
 
-// static
-compiler::Node* LessThanStub::Generate(CodeStubAssembler* assembler,
-                                       compiler::Node* lhs, compiler::Node* rhs,
-                                       compiler::Node* context) {
-  return GenerateAbstractRelationalComparison(assembler, kLessThan, lhs, rhs,
-                                              context);
-}
-
-// static
-compiler::Node* LessThanOrEqualStub::Generate(CodeStubAssembler* assembler,
-                                              compiler::Node* lhs,
-                                              compiler::Node* rhs,
-                                              compiler::Node* context) {
-  return GenerateAbstractRelationalComparison(assembler, kLessThanOrEqual, lhs,
-                                              rhs, context);
-}
-
-// static
-compiler::Node* GreaterThanStub::Generate(CodeStubAssembler* assembler,
-                                          compiler::Node* lhs,
-                                          compiler::Node* rhs,
-                                          compiler::Node* context) {
-  return GenerateAbstractRelationalComparison(assembler, kGreaterThan, lhs, rhs,
-                                              context);
-}
-
-// static
-compiler::Node* GreaterThanOrEqualStub::Generate(CodeStubAssembler* assembler,
-                                                 compiler::Node* lhs,
-                                                 compiler::Node* rhs,
-                                                 compiler::Node* context) {
-  return GenerateAbstractRelationalComparison(assembler, kGreaterThanOrEqual,
-                                              lhs, rhs, context);
-}
-
-// static
-compiler::Node* EqualStub::Generate(CodeStubAssembler* assembler,
-                                    compiler::Node* lhs, compiler::Node* rhs,
-                                    compiler::Node* context) {
-  return GenerateEqual(assembler, kDontNegateResult, lhs, rhs, context);
-}
-
-// static
-compiler::Node* NotEqualStub::Generate(CodeStubAssembler* assembler,
-                                       compiler::Node* lhs, compiler::Node* rhs,
-                                       compiler::Node* context) {
-  return GenerateEqual(assembler, kNegateResult, lhs, rhs, context);
-}
-
-// static
-compiler::Node* StrictEqualStub::Generate(CodeStubAssembler* assembler,
-                                          compiler::Node* lhs,
-                                          compiler::Node* rhs,
-                                          compiler::Node* context) {
-  return GenerateStrictEqual(assembler, kDontNegateResult, lhs, rhs, context);
-}
-
-// static
-compiler::Node* StrictNotEqualStub::Generate(CodeStubAssembler* assembler,
-                                             compiler::Node* lhs,
-                                             compiler::Node* rhs,
-                                             compiler::Node* context) {
-  return GenerateStrictEqual(assembler, kNegateResult, lhs, rhs, context);
-}
-
-void ToLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Node* context = assembler->Parameter(1);
-
-  // We might need to loop once for ToNumber conversion.
-  Variable var_len(assembler, MachineRepresentation::kTagged);
-  Label loop(assembler, &var_len);
-  var_len.Bind(assembler->Parameter(0));
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
-  {
-    // Shared entry points.
-    Label return_len(assembler),
-        return_two53minus1(assembler, Label::kDeferred),
-        return_zero(assembler, Label::kDeferred);
-
-    // Load the current {len} value.
-    Node* len = var_len.value();
-
-    // Check if {len} is a positive Smi.
-    assembler->GotoIf(assembler->WordIsPositiveSmi(len), &return_len);
-
-    // Check if {len} is a (negative) Smi.
-    assembler->GotoIf(assembler->WordIsSmi(len), &return_zero);
-
-    // Check if {len} is a HeapNumber.
-    Label if_lenisheapnumber(assembler),
-        if_lenisnotheapnumber(assembler, Label::kDeferred);
-    assembler->Branch(assembler->IsHeapNumberMap(assembler->LoadMap(len)),
-                      &if_lenisheapnumber, &if_lenisnotheapnumber);
-
-    assembler->Bind(&if_lenisheapnumber);
-    {
-      // Load the floating-point value of {len}.
-      Node* len_value = assembler->LoadHeapNumberValue(len);
-
-      // Check if {len} is not greater than zero.
-      assembler->GotoUnless(assembler->Float64GreaterThan(
-                                len_value, assembler->Float64Constant(0.0)),
-                            &return_zero);
-
-      // Check if {len} is greater than or equal to 2^53-1.
-      assembler->GotoIf(
-          assembler->Float64GreaterThanOrEqual(
-              len_value, assembler->Float64Constant(kMaxSafeInteger)),
-          &return_two53minus1);
-
-      // Round the {len} towards -Infinity.
-      Node* value = assembler->Float64Floor(len_value);
-      Node* result = assembler->ChangeFloat64ToTagged(value);
-      assembler->Return(result);
-    }
-
-    assembler->Bind(&if_lenisnotheapnumber);
-    {
-      // Need to convert {len} to a Number first.
-      Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
-      var_len.Bind(assembler->CallStub(callable, context, len));
-      assembler->Goto(&loop);
-    }
-
-    assembler->Bind(&return_len);
-    assembler->Return(var_len.value());
-
-    assembler->Bind(&return_two53minus1);
-    assembler->Return(assembler->NumberConstant(kMaxSafeInteger));
-
-    assembler->Bind(&return_zero);
-    assembler->Return(assembler->SmiConstant(Smi::FromInt(0)));
-  }
-}
-
-void ToIntegerStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-
-  Node* input = assembler->Parameter(Descriptor::kArgument);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-
-  assembler->Return(assembler->ToInteger(context, input));
-}
-
 void StoreInterceptorStub::GenerateAssembly(
     CodeStubAssembler* assembler) const {
   typedef compiler::Node Node;
@@ -4713,27 +2349,6 @@
   os << ": " << from << "=>" << to << "]" << std::endl;
 }
 
-
-// TODO(svenpanne) Make this a real infix_ostream_iterator.
-class SimpleListPrinter {
- public:
-  explicit SimpleListPrinter(std::ostream& os) : os_(os), first_(true) {}
-
-  void Add(const char* s) {
-    if (first_) {
-      first_ = false;
-    } else {
-      os_ << ",";
-    }
-    os_ << s;
-  }
-
- private:
-  std::ostream& os_;
-  bool first_;
-};
-
-
 void CallICStub::PrintState(std::ostream& os) const {  // NOLINT
   os << state();
 }
@@ -4753,14 +2368,6 @@
       FUNCTION_ADDR(Runtime_KeyedLoadIC_MissFromStubFailure));
 }
 
-
-void KeyedLoadGenericStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  descriptor->Initialize(
-      Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry);
-}
-
-
 void HandlerStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   DCHECK(kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC);
   if (kind() == Code::KEYED_LOAD_IC) {
@@ -4779,21 +2386,6 @@
   }
 }
 
-
-void NumberToStringStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
-  descriptor->Initialize(
-      Runtime::FunctionForId(Runtime::kNumberToString)->entry);
-  descriptor->SetMissHandler(Runtime::kNumberToString);
-}
-
-void RegExpConstructResultStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  descriptor->Initialize(
-      Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
-  descriptor->SetMissHandler(Runtime::kRegExpConstructResult);
-}
-
-
 void TransitionElementsKindStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
   descriptor->Initialize(
@@ -4835,118 +2427,6 @@
       FUNCTION_ADDR(Runtime_BinaryOpIC_MissWithAllocationSite));
 }
 
-
-void StringAddStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
-  descriptor->Initialize(Runtime::FunctionForId(Runtime::kStringAdd)->entry);
-  descriptor->SetMissHandler(Runtime::kStringAdd);
-}
-
-namespace {
-
-compiler::Node* GenerateHasProperty(
-    CodeStubAssembler* assembler, compiler::Node* object, compiler::Node* key,
-    compiler::Node* context, Runtime::FunctionId fallback_runtime_function_id) {
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Label call_runtime(assembler, Label::kDeferred), return_true(assembler),
-      return_false(assembler), end(assembler);
-
-  CodeStubAssembler::LookupInHolder lookup_property_in_holder =
-      [assembler, &return_true](Node* receiver, Node* holder, Node* holder_map,
-                                Node* holder_instance_type, Node* unique_name,
-                                Label* next_holder, Label* if_bailout) {
-        assembler->TryHasOwnProperty(holder, holder_map, holder_instance_type,
-                                     unique_name, &return_true, next_holder,
-                                     if_bailout);
-      };
-
-  CodeStubAssembler::LookupInHolder lookup_element_in_holder =
-      [assembler, &return_true](Node* receiver, Node* holder, Node* holder_map,
-                                Node* holder_instance_type, Node* index,
-                                Label* next_holder, Label* if_bailout) {
-        assembler->TryLookupElement(holder, holder_map, holder_instance_type,
-                                    index, &return_true, next_holder,
-                                    if_bailout);
-      };
-
-  assembler->TryPrototypeChainLookup(object, key, lookup_property_in_holder,
-                                     lookup_element_in_holder, &return_false,
-                                     &call_runtime);
-
-  Variable result(assembler, MachineRepresentation::kTagged);
-  assembler->Bind(&return_true);
-  {
-    result.Bind(assembler->BooleanConstant(true));
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&return_false);
-  {
-    result.Bind(assembler->BooleanConstant(false));
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&call_runtime);
-  {
-    result.Bind(assembler->CallRuntime(fallback_runtime_function_id, context,
-                                       object, key));
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&end);
-  return result.value();
-}
-
-}  // namespace
-
-// static
-compiler::Node* HasPropertyStub::Generate(CodeStubAssembler* assembler,
-                                          compiler::Node* key,
-                                          compiler::Node* object,
-                                          compiler::Node* context) {
-  return GenerateHasProperty(assembler, object, key, context,
-                             Runtime::kHasProperty);
-}
-
-// static
-compiler::Node* ForInFilterStub::Generate(CodeStubAssembler* assembler,
-                                          compiler::Node* key,
-                                          compiler::Node* object,
-                                          compiler::Node* context) {
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Label return_undefined(assembler, Label::kDeferred),
-      return_to_name(assembler), end(assembler);
-
-  Variable var_result(assembler, MachineRepresentation::kTagged);
-
-  Node* has_property = GenerateHasProperty(assembler, object, key, context,
-                                           Runtime::kForInHasProperty);
-
-  assembler->Branch(
-      assembler->WordEqual(has_property, assembler->BooleanConstant(true)),
-      &return_to_name, &return_undefined);
-
-  assembler->Bind(&return_to_name);
-  {
-    var_result.Bind(assembler->ToName(context, key));
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&return_undefined);
-  {
-    var_result.Bind(assembler->UndefinedConstant());
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&end);
-  return var_result.value();
-}
-
 void GetPropertyStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   typedef compiler::Node Node;
   typedef CodeStubAssembler::Label Label;
@@ -5064,12 +2544,13 @@
 
   if (FLAG_debug_code) {
     // Function must be a function without a prototype.
-    assembler->Assert(assembler->Word32And(
-        compiler_hints,
-        assembler->Int32Constant((FunctionKind::kAccessorFunction |
-                                  FunctionKind::kArrowFunction |
-                                  FunctionKind::kConciseMethod)
-                                 << SharedFunctionInfo::kFunctionKindShift)));
+    CSA_ASSERT(assembler, assembler->Word32And(
+                              compiler_hints,
+                              assembler->Int32Constant(
+                                  (FunctionKind::kAccessorFunction |
+                                   FunctionKind::kArrowFunction |
+                                   FunctionKind::kConciseMethod)
+                                  << SharedFunctionInfo::kFunctionKindShift)));
   }
   assembler->Goto(&if_function_without_prototype);
 
@@ -5167,9 +2648,7 @@
 compiler::Node* FastNewFunctionContextStub::Generate(
     CodeStubAssembler* assembler, compiler::Node* function,
     compiler::Node* slots, compiler::Node* context) {
-  typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
 
   Node* min_context_slots =
       assembler->Int32Constant(Context::MIN_CONTEXT_SLOTS);
@@ -5208,24 +2687,12 @@
 
   // Initialize the rest of the slots to undefined.
   Node* undefined = assembler->UndefinedConstant();
-  Variable var_slot_index(assembler, MachineRepresentation::kWord32);
-  var_slot_index.Bind(min_context_slots);
-  Label loop(assembler, &var_slot_index), after_loop(assembler);
-  assembler->Goto(&loop);
-
-  assembler->Bind(&loop);
-  {
-    Node* slot_index = var_slot_index.value();
-    assembler->GotoUnless(assembler->Int32LessThan(slot_index, length),
-                          &after_loop);
-    assembler->StoreFixedArrayElement(function_context, slot_index, undefined,
-                                      SKIP_WRITE_BARRIER);
-    Node* one = assembler->Int32Constant(1);
-    Node* next_index = assembler->Int32Add(slot_index, one);
-    var_slot_index.Bind(next_index);
-    assembler->Goto(&loop);
-  }
-  assembler->Bind(&after_loop);
+  assembler->BuildFastFixedArrayForEach(
+      function_context, FAST_ELEMENTS, min_context_slots, length,
+      [undefined](CodeStubAssembler* assembler, Node* context, Node* offset) {
+        assembler->StoreNoWriteBarrier(MachineType::PointerRepresentation(),
+                                       context, offset, undefined);
+      });
 
   return function_context;
 }
@@ -5380,7 +2847,7 @@
   allocation_site =
       allocation_site_mode == TRACK_ALLOCATION_SITE ? allocation_site : nullptr;
 
-  Node* zero = assembler->SmiConstant(Smi::FromInt(0));
+  Node* zero = assembler->SmiConstant(Smi::kZero);
   assembler->GotoIf(assembler->SmiEqual(capacity, zero), &zero_capacity);
 
   Node* elements_map = assembler->LoadMap(boilerplate_elements);
@@ -5391,14 +2858,16 @@
     assembler->Comment("fast double elements path");
     if (FLAG_debug_code) {
       Label correct_elements_map(assembler), abort(assembler, Label::kDeferred);
-      assembler->BranchIf(assembler->IsFixedDoubleArrayMap(elements_map),
-                          &correct_elements_map, &abort);
+      assembler->Branch(assembler->IsFixedDoubleArrayMap(elements_map),
+                        &correct_elements_map, &abort);
 
       assembler->Bind(&abort);
       {
         Node* abort_id = assembler->SmiConstant(
             Smi::FromInt(BailoutReason::kExpectedFixedDoubleArrayMap));
-        assembler->TailCallRuntime(Runtime::kAbort, context, abort_id);
+        assembler->CallRuntime(Runtime::kAbort, context, abort_id);
+        result.Bind(assembler->UndefinedConstant());
+        assembler->Goto(&return_result);
       }
       assembler->Bind(&correct_elements_map);
     }
@@ -5544,103 +3013,54 @@
   }
 }
 
-void ArrayConstructorStub::PrintName(std::ostream& os) const {  // NOLINT
-  os << "ArrayConstructorStub";
-  switch (argument_count()) {
-    case ANY:
-      os << "_Any";
-      break;
-    case NONE:
-      os << "_None";
-      break;
-    case ONE:
-      os << "_One";
-      break;
-    case MORE_THAN_ONE:
-      os << "_More_Than_One";
-      break;
-  }
-  return;
-}
-
-
 bool ToBooleanICStub::UpdateStatus(Handle<Object> object) {
-  Types new_types = types();
-  Types old_types = new_types;
-  bool to_boolean_value = new_types.UpdateStatus(isolate(), object);
-  TraceTransition(old_types, new_types);
-  set_sub_minor_key(TypesBits::update(sub_minor_key(), new_types.ToIntegral()));
+  ToBooleanHints old_hints = hints();
+  ToBooleanHints new_hints = old_hints;
+  bool to_boolean_value = false;  // Dummy initialization.
+  if (object->IsUndefined(isolate())) {
+    new_hints |= ToBooleanHint::kUndefined;
+    to_boolean_value = false;
+  } else if (object->IsBoolean()) {
+    new_hints |= ToBooleanHint::kBoolean;
+    to_boolean_value = object->IsTrue(isolate());
+  } else if (object->IsNull(isolate())) {
+    new_hints |= ToBooleanHint::kNull;
+    to_boolean_value = false;
+  } else if (object->IsSmi()) {
+    new_hints |= ToBooleanHint::kSmallInteger;
+    to_boolean_value = Smi::cast(*object)->value() != 0;
+  } else if (object->IsJSReceiver()) {
+    new_hints |= ToBooleanHint::kReceiver;
+    to_boolean_value = !object->IsUndetectable();
+  } else if (object->IsString()) {
+    DCHECK(!object->IsUndetectable());
+    new_hints |= ToBooleanHint::kString;
+    to_boolean_value = String::cast(*object)->length() != 0;
+  } else if (object->IsSymbol()) {
+    new_hints |= ToBooleanHint::kSymbol;
+    to_boolean_value = true;
+  } else if (object->IsHeapNumber()) {
+    DCHECK(!object->IsUndetectable());
+    new_hints |= ToBooleanHint::kHeapNumber;
+    double value = HeapNumber::cast(*object)->value();
+    to_boolean_value = value != 0 && !std::isnan(value);
+  } else if (object->IsSimd128Value()) {
+    new_hints |= ToBooleanHint::kSimdValue;
+    to_boolean_value = true;
+  } else {
+    // We should never see an internal object at runtime here!
+    UNREACHABLE();
+    to_boolean_value = true;
+  }
+  TraceTransition(old_hints, new_hints);
+  set_sub_minor_key(HintsBits::update(sub_minor_key(), new_hints));
   return to_boolean_value;
 }
 
 void ToBooleanICStub::PrintState(std::ostream& os) const {  // NOLINT
-  os << types();
+  os << hints();
 }
 
-std::ostream& operator<<(std::ostream& os, const ToBooleanICStub::Types& s) {
-  os << "(";
-  SimpleListPrinter p(os);
-  if (s.IsEmpty()) p.Add("None");
-  if (s.Contains(ToBooleanICStub::UNDEFINED)) p.Add("Undefined");
-  if (s.Contains(ToBooleanICStub::BOOLEAN)) p.Add("Bool");
-  if (s.Contains(ToBooleanICStub::NULL_TYPE)) p.Add("Null");
-  if (s.Contains(ToBooleanICStub::SMI)) p.Add("Smi");
-  if (s.Contains(ToBooleanICStub::SPEC_OBJECT)) p.Add("SpecObject");
-  if (s.Contains(ToBooleanICStub::STRING)) p.Add("String");
-  if (s.Contains(ToBooleanICStub::SYMBOL)) p.Add("Symbol");
-  if (s.Contains(ToBooleanICStub::HEAP_NUMBER)) p.Add("HeapNumber");
-  if (s.Contains(ToBooleanICStub::SIMD_VALUE)) p.Add("SimdValue");
-  return os << ")";
-}
-
-bool ToBooleanICStub::Types::UpdateStatus(Isolate* isolate,
-                                          Handle<Object> object) {
-  if (object->IsUndefined(isolate)) {
-    Add(UNDEFINED);
-    return false;
-  } else if (object->IsBoolean()) {
-    Add(BOOLEAN);
-    return object->IsTrue(isolate);
-  } else if (object->IsNull(isolate)) {
-    Add(NULL_TYPE);
-    return false;
-  } else if (object->IsSmi()) {
-    Add(SMI);
-    return Smi::cast(*object)->value() != 0;
-  } else if (object->IsJSReceiver()) {
-    Add(SPEC_OBJECT);
-    return !object->IsUndetectable();
-  } else if (object->IsString()) {
-    DCHECK(!object->IsUndetectable());
-    Add(STRING);
-    return String::cast(*object)->length() != 0;
-  } else if (object->IsSymbol()) {
-    Add(SYMBOL);
-    return true;
-  } else if (object->IsHeapNumber()) {
-    DCHECK(!object->IsUndetectable());
-    Add(HEAP_NUMBER);
-    double value = HeapNumber::cast(*object)->value();
-    return value != 0 && !std::isnan(value);
-  } else if (object->IsSimd128Value()) {
-    Add(SIMD_VALUE);
-    return true;
-  } else {
-    // We should never see an internal object at runtime here!
-    UNREACHABLE();
-    return true;
-  }
-}
-
-bool ToBooleanICStub::Types::NeedsMap() const {
-  return Contains(ToBooleanICStub::SPEC_OBJECT) ||
-         Contains(ToBooleanICStub::STRING) ||
-         Contains(ToBooleanICStub::SYMBOL) ||
-         Contains(ToBooleanICStub::HEAP_NUMBER) ||
-         Contains(ToBooleanICStub::SIMD_VALUE);
-}
-
-
 void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
   StubFailureTrampolineStub stub1(isolate, NOT_JS_FUNCTION_STUB_MODE);
   StubFailureTrampolineStub stub2(isolate, JS_FUNCTION_STUB_MODE);
@@ -5687,7 +3107,7 @@
   Node* array = assembler->AllocateJSArray(
       elements_kind(), array_map,
       assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
-      assembler->SmiConstant(Smi::FromInt(0)), allocation_site);
+      assembler->SmiConstant(Smi::kZero), allocation_site);
   assembler->Return(array);
 }
 
@@ -5700,7 +3120,7 @@
   Node* array = assembler->AllocateJSArray(
       elements_kind(), array_map,
       assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
-      assembler->SmiConstant(Smi::FromInt(0)), nullptr);
+      assembler->SmiConstant(Smi::kZero), nullptr);
   assembler->Return(array);
 }
 
@@ -5721,14 +3141,14 @@
   Label call_runtime(assembler, Label::kDeferred);
 
   Node* size = assembler->Parameter(Descriptor::kArraySizeSmiParameter);
-  assembler->Branch(assembler->WordIsSmi(size), &smi_size, &call_runtime);
+  assembler->Branch(assembler->TaggedIsSmi(size), &smi_size, &call_runtime);
 
   assembler->Bind(&smi_size);
 
   if (IsFastPackedElementsKind(elements_kind)) {
     Label abort(assembler, Label::kDeferred);
     assembler->Branch(
-        assembler->SmiEqual(size, assembler->SmiConstant(Smi::FromInt(0))),
+        assembler->SmiEqual(size, assembler->SmiConstant(Smi::kZero)),
         &small_smi_size, &abort);
 
     assembler->Bind(&abort);
@@ -5828,42 +3248,10 @@
 }
 
 ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
-    : PlatformCodeStub(isolate) {
-  minor_key_ = ArgumentCountBits::encode(ANY);
-}
-
-ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate,
-                                           int argument_count)
-    : PlatformCodeStub(isolate) {
-  if (argument_count == 0) {
-    minor_key_ = ArgumentCountBits::encode(NONE);
-  } else if (argument_count == 1) {
-    minor_key_ = ArgumentCountBits::encode(ONE);
-  } else if (argument_count >= 2) {
-    minor_key_ = ArgumentCountBits::encode(MORE_THAN_ONE);
-  } else {
-    UNREACHABLE();
-  }
-}
+    : PlatformCodeStub(isolate) {}
 
 InternalArrayConstructorStub::InternalArrayConstructorStub(Isolate* isolate)
     : PlatformCodeStub(isolate) {}
 
-Representation RepresentationFromMachineType(MachineType type) {
-  if (type == MachineType::Int32()) {
-    return Representation::Integer32();
-  }
-
-  if (type == MachineType::TaggedSigned()) {
-    return Representation::Smi();
-  }
-
-  if (type == MachineType::Pointer()) {
-    return Representation::External();
-  }
-
-  return Representation::Tagged();
-}
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 5c83fde..450d0c1 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -14,6 +14,7 @@
 #include "src/interface-descriptors.h"
 #include "src/macro-assembler.h"
 #include "src/ostreams.h"
+#include "src/type-hints.h"
 
 namespace v8 {
 namespace internal {
@@ -43,10 +44,7 @@
   V(StoreBufferOverflow)                      \
   V(StoreElement)                             \
   V(SubString)                                \
-  V(StoreIC)                                  \
   V(KeyedStoreIC)                             \
-  V(KeyedLoadIC)                              \
-  V(LoadIC)                                   \
   V(LoadGlobalIC)                             \
   V(FastNewObject)                            \
   V(FastNewRestParameter)                     \
@@ -61,13 +59,8 @@
   /* version of the corresponding stub is  */ \
   /* used universally */                      \
   V(CallICTrampoline)                         \
-  V(LoadICTrampoline)                         \
-  V(KeyedLoadICTrampoline)                    \
   V(KeyedStoreICTrampoline)                   \
-  V(StoreICTrampoline)                        \
   /* --- HydrogenCodeStubs --- */             \
-  V(NumberToString)                           \
-  V(StringAdd)                                \
   /* These builtins w/ JS linkage are */      \
   /* just fast-cases of C++ builtins. They */ \
   /* require varg support from TF */          \
@@ -76,7 +69,6 @@
   /* These will be ported/eliminated */       \
   /* as part of the new IC system, ask */     \
   /* ishell before doing anything  */         \
-  V(KeyedLoadGeneric)                         \
   V(LoadConstant)                             \
   V(LoadDictionaryElement)                    \
   V(LoadFastElement)                          \
@@ -87,9 +79,7 @@
   V(BinaryOpIC)                               \
   V(BinaryOpWithAllocationSite)               \
   V(ToBooleanIC)                              \
-  V(RegExpConstructResult)                    \
   V(TransitionElementsKind)                   \
-  V(StoreGlobalViaContext)                    \
   /* --- TurboFanCodeStubs --- */             \
   V(AllocateHeapNumber)                       \
   V(AllocateFloat32x4)                        \
@@ -108,22 +98,11 @@
   V(CreateAllocationSite)                     \
   V(CreateWeakCell)                           \
   V(StringLength)                             \
-  V(Add)                                      \
   V(AddWithFeedback)                          \
-  V(Subtract)                                 \
   V(SubtractWithFeedback)                     \
-  V(Multiply)                                 \
   V(MultiplyWithFeedback)                     \
-  V(Divide)                                   \
   V(DivideWithFeedback)                       \
-  V(Modulus)                                  \
   V(ModulusWithFeedback)                      \
-  V(ShiftRight)                               \
-  V(ShiftRightLogical)                        \
-  V(ShiftLeft)                                \
-  V(BitwiseAnd)                               \
-  V(BitwiseOr)                                \
-  V(BitwiseXor)                               \
   V(Inc)                                      \
   V(InternalArrayNoArgumentConstructor)       \
   V(InternalArraySingleArgumentConstructor)   \
@@ -134,45 +113,35 @@
   V(FastCloneShallowObject)                   \
   V(FastNewClosure)                           \
   V(FastNewFunctionContext)                   \
-  V(InstanceOf)                               \
-  V(LessThan)                                 \
-  V(LessThanOrEqual)                          \
-  V(GreaterThan)                              \
-  V(GreaterThanOrEqual)                       \
-  V(Equal)                                    \
-  V(NotEqual)                                 \
   V(KeyedLoadSloppyArguments)                 \
   V(KeyedStoreSloppyArguments)                \
   V(LoadScriptContextField)                   \
   V(StoreScriptContextField)                  \
-  V(StrictEqual)                              \
-  V(StrictNotEqual)                           \
-  V(ToInteger)                                \
-  V(ToLength)                                 \
-  V(HasProperty)                              \
-  V(ForInFilter)                              \
+  V(NumberToString)                           \
+  V(StringAdd)                                \
   V(GetProperty)                              \
-  V(LoadICTF)                                 \
+  V(LoadIC)                                   \
+  V(LoadICProtoArray)                         \
   V(KeyedLoadICTF)                            \
   V(StoreFastElement)                         \
   V(StoreField)                               \
   V(StoreGlobal)                              \
-  V(StoreICTF)                                \
+  V(StoreIC)                                  \
+  V(KeyedStoreICTF)                           \
   V(StoreInterceptor)                         \
   V(StoreMap)                                 \
   V(StoreTransition)                          \
   V(LoadApiGetter)                            \
   V(LoadIndexedInterceptor)                   \
   V(GrowArrayElements)                        \
-  V(ToObject)                                 \
-  V(Typeof)                                   \
   /* These are only called from FGC and */    \
   /* can be removed when we use ignition */   \
   /* only */                                  \
-  V(LoadICTrampolineTF)                       \
+  V(LoadICTrampoline)                         \
   V(LoadGlobalICTrampoline)                   \
   V(KeyedLoadICTrampolineTF)                  \
-  V(StoreICTrampolineTF)
+  V(StoreICTrampoline)                        \
+  V(KeyedStoreICTrampolineTF)
 
 // List of code stubs only used on ARM 32 bits platforms.
 #if V8_TARGET_ARCH_ARM
@@ -756,14 +725,6 @@
   DEFINE_TURBOFAN_CODE_STUB(StringLength, TurboFanCodeStub);
 };
 
-class AddStub final : public TurboFanCodeStub {
- public:
-  explicit AddStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Add, TurboFanCodeStub);
-};
-
 class AddWithFeedbackStub final : public TurboFanCodeStub {
  public:
   explicit AddWithFeedbackStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
@@ -773,14 +734,6 @@
                                                     TurboFanCodeStub);
 };
 
-class SubtractStub final : public TurboFanCodeStub {
- public:
-  explicit SubtractStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Subtract, TurboFanCodeStub);
-};
-
 class SubtractWithFeedbackStub final : public TurboFanCodeStub {
  public:
   explicit SubtractWithFeedbackStub(Isolate* isolate)
@@ -791,14 +744,6 @@
                                                     TurboFanCodeStub);
 };
 
-class MultiplyStub final : public TurboFanCodeStub {
- public:
-  explicit MultiplyStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Multiply, TurboFanCodeStub);
-};
-
 class MultiplyWithFeedbackStub final : public TurboFanCodeStub {
  public:
   explicit MultiplyWithFeedbackStub(Isolate* isolate)
@@ -809,14 +754,6 @@
                                                     TurboFanCodeStub);
 };
 
-class DivideStub final : public TurboFanCodeStub {
- public:
-  explicit DivideStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Divide, TurboFanCodeStub);
-};
-
 class DivideWithFeedbackStub final : public TurboFanCodeStub {
  public:
   explicit DivideWithFeedbackStub(Isolate* isolate)
@@ -827,14 +764,6 @@
                                                     TurboFanCodeStub);
 };
 
-class ModulusStub final : public TurboFanCodeStub {
- public:
-  explicit ModulusStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Modulus, TurboFanCodeStub);
-};
-
 class ModulusWithFeedbackStub final : public TurboFanCodeStub {
  public:
   explicit ModulusWithFeedbackStub(Isolate* isolate)
@@ -845,55 +774,6 @@
                                                     TurboFanCodeStub);
 };
 
-class ShiftRightStub final : public TurboFanCodeStub {
- public:
-  explicit ShiftRightStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(ShiftRight, TurboFanCodeStub);
-};
-
-class ShiftRightLogicalStub final : public TurboFanCodeStub {
- public:
-  explicit ShiftRightLogicalStub(Isolate* isolate)
-      : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(ShiftRightLogical, TurboFanCodeStub);
-};
-
-class ShiftLeftStub final : public TurboFanCodeStub {
- public:
-  explicit ShiftLeftStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(ShiftLeft, TurboFanCodeStub);
-};
-
-class BitwiseAndStub final : public TurboFanCodeStub {
- public:
-  explicit BitwiseAndStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(BitwiseAnd, TurboFanCodeStub);
-};
-
-class BitwiseOrStub final : public TurboFanCodeStub {
- public:
-  explicit BitwiseOrStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(BitwiseOr, TurboFanCodeStub);
-};
-
-class BitwiseXorStub final : public TurboFanCodeStub {
- public:
-  explicit BitwiseXorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(BitwiseXor, TurboFanCodeStub);
-};
-
 class IncStub final : public TurboFanCodeStub {
  public:
   explicit IncStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
@@ -910,96 +790,6 @@
   DEFINE_TURBOFAN_UNARY_OP_CODE_STUB_WITH_FEEDBACK(Dec, TurboFanCodeStub);
 };
 
-class InstanceOfStub final : public TurboFanCodeStub {
- public:
-  explicit InstanceOfStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- private:
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(InstanceOf, TurboFanCodeStub);
-};
-
-class LessThanStub final : public TurboFanCodeStub {
- public:
-  explicit LessThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(LessThan, TurboFanCodeStub);
-};
-
-class LessThanOrEqualStub final : public TurboFanCodeStub {
- public:
-  explicit LessThanOrEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(LessThanOrEqual, TurboFanCodeStub);
-};
-
-class GreaterThanStub final : public TurboFanCodeStub {
- public:
-  explicit GreaterThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(GreaterThan, TurboFanCodeStub);
-};
-
-class GreaterThanOrEqualStub final : public TurboFanCodeStub {
- public:
-  explicit GreaterThanOrEqualStub(Isolate* isolate)
-      : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(GreaterThanOrEqual, TurboFanCodeStub);
-};
-
-class EqualStub final : public TurboFanCodeStub {
- public:
-  explicit EqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Equal, TurboFanCodeStub);
-};
-
-class NotEqualStub final : public TurboFanCodeStub {
- public:
-  explicit NotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(NotEqual, TurboFanCodeStub);
-};
-
-class StrictEqualStub final : public TurboFanCodeStub {
- public:
-  explicit StrictEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(StrictEqual, TurboFanCodeStub);
-};
-
-class StrictNotEqualStub final : public TurboFanCodeStub {
- public:
-  explicit StrictNotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(StrictNotEqual, TurboFanCodeStub);
-};
-
-class ToIntegerStub final : public TurboFanCodeStub {
- public:
-  explicit ToIntegerStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
-  DEFINE_TURBOFAN_CODE_STUB(ToInteger, TurboFanCodeStub);
-};
-
-class ToLengthStub final : public TurboFanCodeStub {
- public:
-  explicit ToLengthStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
-  DEFINE_TURBOFAN_CODE_STUB(ToLength, TurboFanCodeStub);
-};
-
 class StoreInterceptorStub : public TurboFanCodeStub {
  public:
   explicit StoreInterceptorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
@@ -1025,23 +815,6 @@
   DEFINE_TURBOFAN_CODE_STUB(LoadIndexedInterceptor, TurboFanCodeStub);
 };
 
-// ES6 section 12.10.3 "in" operator evaluation.
-class HasPropertyStub : public TurboFanCodeStub {
- public:
-  explicit HasPropertyStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(HasProperty);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(HasProperty, TurboFanCodeStub);
-};
-
-class ForInFilterStub : public TurboFanCodeStub {
- public:
-  explicit ForInFilterStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(ForInFilter);
-  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(ForInFilter, TurboFanCodeStub);
-};
-
 // ES6 [[Get]] operation.
 class GetPropertyStub : public TurboFanCodeStub {
  public:
@@ -1051,39 +824,12 @@
   DEFINE_TURBOFAN_CODE_STUB(GetProperty, TurboFanCodeStub);
 };
 
-enum StringAddFlags {
-  // Omit both parameter checks.
-  STRING_ADD_CHECK_NONE = 0,
-  // Check left parameter.
-  STRING_ADD_CHECK_LEFT = 1 << 0,
-  // Check right parameter.
-  STRING_ADD_CHECK_RIGHT = 1 << 1,
-  // Check both parameters.
-  STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT,
-  // Convert parameters when check fails (instead of throwing an exception).
-  STRING_ADD_CONVERT = 1 << 2,
-  STRING_ADD_CONVERT_LEFT = STRING_ADD_CHECK_LEFT | STRING_ADD_CONVERT,
-  STRING_ADD_CONVERT_RIGHT = STRING_ADD_CHECK_RIGHT | STRING_ADD_CONVERT
-};
-
-
-std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags);
-
-
-class NumberToStringStub final : public HydrogenCodeStub {
+class NumberToStringStub final : public TurboFanCodeStub {
  public:
-  explicit NumberToStringStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
+  explicit NumberToStringStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
-  DEFINE_HYDROGEN_CODE_STUB(NumberToString, HydrogenCodeStub);
-};
-
-class TypeofStub final : public TurboFanCodeStub {
- public:
-  explicit TypeofStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Typeof);
-  DEFINE_TURBOFAN_UNARY_OP_CODE_STUB(Typeof, TurboFanCodeStub);
+  DEFINE_TURBOFAN_CODE_STUB(NumberToString, TurboFanCodeStub);
 };
 
 class FastNewClosureStub : public TurboFanCodeStub {
@@ -1329,24 +1075,12 @@
 
 class ArrayConstructorStub: public PlatformCodeStub {
  public:
-  enum ArgumentCountKey { ANY, NONE, ONE, MORE_THAN_ONE };
-
-  ArrayConstructorStub(Isolate* isolate, int argument_count);
-
   explicit ArrayConstructorStub(Isolate* isolate);
 
  private:
-  ArgumentCountKey argument_count() const {
-    return ArgumentCountBits::decode(minor_key_);
-  }
-
   void GenerateDispatchToArrayStub(MacroAssembler* masm,
                                    AllocationSiteOverrideMode mode);
 
-  void PrintName(std::ostream& os) const override;  // NOLINT
-
-  class ArgumentCountBits : public BitField<ArgumentCountKey, 0, 2> {};
-
   DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayNArgumentsConstructor);
   DEFINE_PLATFORM_CODE_STUB(ArrayConstructor, PlatformCodeStub);
 };
@@ -1410,7 +1144,6 @@
   }
 
  protected:
-  int arg_count() const { return state().argc(); }
   ConvertReceiverMode convert_mode() const { return state().convert_mode(); }
   TailCallMode tail_call_mode() const { return state().tail_call_mode(); }
 
@@ -1726,33 +1459,6 @@
   DEFINE_TURBOFAN_CODE_STUB(StoreGlobal, TurboFanCodeStub);
 };
 
-// TODO(ishell): remove, once StoreGlobalIC is implemented.
-class StoreGlobalViaContextStub final : public PlatformCodeStub {
- public:
-  static const int kMaximumDepth = 15;
-
-  StoreGlobalViaContextStub(Isolate* isolate, int depth,
-                            LanguageMode language_mode)
-      : PlatformCodeStub(isolate) {
-    minor_key_ =
-        DepthBits::encode(depth) | LanguageModeBits::encode(language_mode);
-  }
-
-  int depth() const { return DepthBits::decode(minor_key_); }
-  LanguageMode language_mode() const {
-    return LanguageModeBits::decode(minor_key_);
-  }
-
- private:
-  class DepthBits : public BitField<int, 0, 4> {};
-  STATIC_ASSERT(DepthBits::kMax == kMaximumDepth);
-  class LanguageModeBits : public BitField<LanguageMode, 4, 1> {};
-  STATIC_ASSERT(LANGUAGE_END == 2);
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreGlobalViaContext);
-  DEFINE_PLATFORM_CODE_STUB(StoreGlobalViaContext, PlatformCodeStub);
-};
-
 class CallApiCallbackStub : public PlatformCodeStub {
  public:
   static const int kArgBits = 3;
@@ -1896,22 +1602,21 @@
   DEFINE_HYDROGEN_CODE_STUB(BinaryOpWithAllocationSite, BinaryOpICStub);
 };
 
-
-class StringAddStub final : public HydrogenCodeStub {
+class StringAddStub final : public TurboFanCodeStub {
  public:
   StringAddStub(Isolate* isolate, StringAddFlags flags,
                 PretenureFlag pretenure_flag)
-      : HydrogenCodeStub(isolate) {
-    set_sub_minor_key(StringAddFlagsBits::encode(flags) |
-                      PretenureFlagBits::encode(pretenure_flag));
+      : TurboFanCodeStub(isolate) {
+    minor_key_ = (StringAddFlagsBits::encode(flags) |
+                  PretenureFlagBits::encode(pretenure_flag));
   }
 
   StringAddFlags flags() const {
-    return StringAddFlagsBits::decode(sub_minor_key());
+    return StringAddFlagsBits::decode(minor_key_);
   }
 
   PretenureFlag pretenure_flag() const {
-    return PretenureFlagBits::decode(sub_minor_key());
+    return PretenureFlagBits::decode(minor_key_);
   }
 
  private:
@@ -1921,7 +1626,7 @@
   void PrintBaseName(std::ostream& os) const override;  // NOLINT
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(StringAdd);
-  DEFINE_HYDROGEN_CODE_STUB(StringAdd, HydrogenCodeStub);
+  DEFINE_TURBOFAN_CODE_STUB(StringAdd, TurboFanCodeStub);
 };
 
 
@@ -2077,17 +1782,6 @@
   DEFINE_PLATFORM_CODE_STUB(RegExpExec, PlatformCodeStub);
 };
 
-// TODO(jgruber): Remove this once all uses in regexp.js have been removed.
-class RegExpConstructResultStub final : public HydrogenCodeStub {
- public:
-  explicit RegExpConstructResultStub(Isolate* isolate)
-      : HydrogenCodeStub(isolate) { }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(RegExpConstructResult);
-  DEFINE_HYDROGEN_CODE_STUB(RegExpConstructResult, HydrogenCodeStub);
-};
-
-
 // TODO(bmeurer/mvstanton): Turn CallConstructStub into ConstructICStub.
 class CallConstructStub final : public PlatformCodeStub {
  public:
@@ -2277,39 +1971,16 @@
   DEFINE_HYDROGEN_CODE_STUB(LoadDictionaryElement, HydrogenCodeStub);
 };
 
-
-class KeyedLoadGenericStub : public HydrogenCodeStub {
+class LoadICTrampolineStub : public TurboFanCodeStub {
  public:
-  explicit KeyedLoadGenericStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
-
-  Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
-  DEFINE_HYDROGEN_CODE_STUB(KeyedLoadGeneric, HydrogenCodeStub);
-};
-
-
-class LoadICTrampolineStub : public PlatformCodeStub {
- public:
-  explicit LoadICTrampolineStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
-  Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
-  DEFINE_PLATFORM_CODE_STUB(LoadICTrampoline, PlatformCodeStub);
-};
-
-class LoadICTrampolineTFStub : public TurboFanCodeStub {
- public:
-  explicit LoadICTrampolineTFStub(Isolate* isolate)
-      : TurboFanCodeStub(isolate) {}
+  explicit LoadICTrampolineStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
   void GenerateAssembly(CodeStubAssembler* assembler) const override;
 
   Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
-  DEFINE_CODE_STUB(LoadICTrampolineTF, TurboFanCodeStub);
+  DEFINE_CODE_STUB(LoadICTrampoline, TurboFanCodeStub);
 };
 
 class LoadGlobalICTrampolineStub : public TurboFanCodeStub {
@@ -2332,36 +2003,48 @@
   DEFINE_CODE_STUB(LoadGlobalICTrampoline, TurboFanCodeStub);
 };
 
-class KeyedLoadICTrampolineStub : public LoadICTrampolineStub {
- public:
-  explicit KeyedLoadICTrampolineStub(Isolate* isolate)
-      : LoadICTrampolineStub(isolate) {}
-
-  Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
-
-  DEFINE_PLATFORM_CODE_STUB(KeyedLoadICTrampoline, LoadICTrampolineStub);
-};
-
-class KeyedLoadICTrampolineTFStub : public LoadICTrampolineTFStub {
+class KeyedLoadICTrampolineTFStub : public LoadICTrampolineStub {
  public:
   explicit KeyedLoadICTrampolineTFStub(Isolate* isolate)
-      : LoadICTrampolineTFStub(isolate) {}
+      : LoadICTrampolineStub(isolate) {}
 
   void GenerateAssembly(CodeStubAssembler* assembler) const override;
 
   Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
 
-  DEFINE_CODE_STUB(KeyedLoadICTrampolineTF, LoadICTrampolineTFStub);
+  DEFINE_CODE_STUB(KeyedLoadICTrampolineTF, LoadICTrampolineStub);
 };
 
-class StoreICTrampolineStub : public PlatformCodeStub {
+class StoreICTrampolineStub : public TurboFanCodeStub {
  public:
   StoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
+      : TurboFanCodeStub(isolate) {
+    minor_key_ = state.GetExtraICState();
+  }
+
+  void GenerateAssembly(CodeStubAssembler* assembler) const override;
+
+  Code::Kind GetCodeKind() const override { return Code::STORE_IC; }
+
+  ExtraICState GetExtraICState() const final {
+    return static_cast<ExtraICState>(minor_key_);
+  }
+
+ protected:
+  StoreICState state() const { return StoreICState(GetExtraICState()); }
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
+  DEFINE_CODE_STUB(StoreICTrampoline, TurboFanCodeStub);
+};
+
+class KeyedStoreICTrampolineStub : public PlatformCodeStub {
+ public:
+  KeyedStoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
       : PlatformCodeStub(isolate) {
     minor_key_ = state.GetExtraICState();
   }
 
-  Code::Kind GetCodeKind() const override { return Code::STORE_IC; }
+  Code::Kind GetCodeKind() const override { return Code::KEYED_STORE_IC; }
 
   ExtraICState GetExtraICState() const final {
     return static_cast<ExtraICState>(minor_key_);
@@ -2372,41 +2055,21 @@
 
  private:
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
-  DEFINE_PLATFORM_CODE_STUB(StoreICTrampoline, PlatformCodeStub);
+  DEFINE_PLATFORM_CODE_STUB(KeyedStoreICTrampoline, PlatformCodeStub);
 };
 
-class StoreICTrampolineTFStub : public TurboFanCodeStub {
+class KeyedStoreICTrampolineTFStub : public StoreICTrampolineStub {
  public:
-  StoreICTrampolineTFStub(Isolate* isolate, const StoreICState& state)
-      : TurboFanCodeStub(isolate) {
-    minor_key_ = state.GetExtraICState();
-  }
+  KeyedStoreICTrampolineTFStub(Isolate* isolate, const StoreICState& state)
+      : StoreICTrampolineStub(isolate, state) {}
 
   void GenerateAssembly(CodeStubAssembler* assembler) const override;
 
-  Code::Kind GetCodeKind() const override { return Code::STORE_IC; }
-  ExtraICState GetExtraICState() const final {
-    return static_cast<ExtraICState>(minor_key_);
-  }
-
- protected:
-  StoreICState state() const { return StoreICState(GetExtraICState()); }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
-  DEFINE_CODE_STUB(StoreICTrampolineTF, TurboFanCodeStub);
-};
-
-class KeyedStoreICTrampolineStub : public StoreICTrampolineStub {
- public:
-  KeyedStoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
-      : StoreICTrampolineStub(isolate, state) {}
-
   Code::Kind GetCodeKind() const override { return Code::KEYED_STORE_IC; }
 
-  DEFINE_PLATFORM_CODE_STUB(KeyedStoreICTrampoline, StoreICTrampolineStub);
+  DEFINE_CODE_STUB(KeyedStoreICTrampolineTF, StoreICTrampolineStub);
 };
 
-
 class CallICTrampolineStub : public PlatformCodeStub {
  public:
   CallICTrampolineStub(Isolate* isolate, const CallICState& state)
@@ -2429,32 +2092,26 @@
   DEFINE_PLATFORM_CODE_STUB(CallICTrampoline, PlatformCodeStub);
 };
 
-
-class LoadICStub : public PlatformCodeStub {
+class LoadICStub : public TurboFanCodeStub {
  public:
-  explicit LoadICStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
-  void GenerateForTrampoline(MacroAssembler* masm);
-
-  Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
-  DEFINE_PLATFORM_CODE_STUB(LoadIC, PlatformCodeStub);
-
- protected:
-  void GenerateImpl(MacroAssembler* masm, bool in_frame);
-};
-
-class LoadICTFStub : public TurboFanCodeStub {
- public:
-  explicit LoadICTFStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+  explicit LoadICStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
   void GenerateAssembly(CodeStubAssembler* assembler) const override;
 
   Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
-  DEFINE_CODE_STUB(LoadICTF, TurboFanCodeStub);
+  DEFINE_CODE_STUB(LoadIC, TurboFanCodeStub);
+};
+
+class LoadICProtoArrayStub : public TurboFanCodeStub {
+ public:
+  explicit LoadICProtoArrayStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  void GenerateAssembly(CodeStubAssembler* assembler) const override;
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadICProtoArray);
+  DEFINE_CODE_STUB(LoadICProtoArray, TurboFanCodeStub);
 };
 
 class LoadGlobalICStub : public TurboFanCodeStub {
@@ -2476,57 +2133,20 @@
   DEFINE_CODE_STUB(LoadGlobalIC, TurboFanCodeStub);
 };
 
-class KeyedLoadICStub : public PlatformCodeStub {
+class KeyedLoadICTFStub : public LoadICStub {
  public:
-  explicit KeyedLoadICStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
-  void GenerateForTrampoline(MacroAssembler* masm);
-
-  Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
-  DEFINE_PLATFORM_CODE_STUB(KeyedLoadIC, PlatformCodeStub);
-
- protected:
-  void GenerateImpl(MacroAssembler* masm, bool in_frame);
-};
-
-class KeyedLoadICTFStub : public LoadICTFStub {
- public:
-  explicit KeyedLoadICTFStub(Isolate* isolate) : LoadICTFStub(isolate) {}
+  explicit KeyedLoadICTFStub(Isolate* isolate) : LoadICStub(isolate) {}
 
   void GenerateAssembly(CodeStubAssembler* assembler) const override;
 
   Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
 
-  DEFINE_CODE_STUB(KeyedLoadICTF, LoadICTFStub);
+  DEFINE_CODE_STUB(KeyedLoadICTF, LoadICStub);
 };
 
-class StoreICStub : public PlatformCodeStub {
+class StoreICStub : public TurboFanCodeStub {
  public:
   StoreICStub(Isolate* isolate, const StoreICState& state)
-      : PlatformCodeStub(isolate) {
-    minor_key_ = state.GetExtraICState();
-  }
-
-  void GenerateForTrampoline(MacroAssembler* masm);
-
-  Code::Kind GetCodeKind() const final { return Code::STORE_IC; }
-
-  ExtraICState GetExtraICState() const final {
-    return static_cast<ExtraICState>(minor_key_);
-  }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
-  DEFINE_PLATFORM_CODE_STUB(StoreIC, PlatformCodeStub);
-
- protected:
-  void GenerateImpl(MacroAssembler* masm, bool in_frame);
-};
-
-class StoreICTFStub : public TurboFanCodeStub {
- public:
-  StoreICTFStub(Isolate* isolate, const StoreICState& state)
       : TurboFanCodeStub(isolate) {
     minor_key_ = state.GetExtraICState();
   }
@@ -2539,7 +2159,7 @@
   }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
-  DEFINE_CODE_STUB(StoreICTF, TurboFanCodeStub);
+  DEFINE_CODE_STUB(StoreIC, TurboFanCodeStub);
 };
 
 class KeyedStoreICStub : public PlatformCodeStub {
@@ -2564,6 +2184,17 @@
   void GenerateImpl(MacroAssembler* masm, bool in_frame);
 };
 
+class KeyedStoreICTFStub : public StoreICStub {
+ public:
+  KeyedStoreICTFStub(Isolate* isolate, const StoreICState& state)
+      : StoreICStub(isolate, state) {}
+
+  void GenerateAssembly(CodeStubAssembler* assembler) const override;
+
+  Code::Kind GetCodeKind() const override { return Code::KEYED_STORE_IC; }
+
+  DEFINE_CODE_STUB(KeyedStoreICTF, StoreICStub);
+};
 
 class DoubleToIStub : public PlatformCodeStub {
  public:
@@ -2938,45 +2569,15 @@
 
 class ToBooleanICStub : public HydrogenCodeStub {
  public:
-  enum Type {
-    UNDEFINED,
-    BOOLEAN,
-    NULL_TYPE,
-    SMI,
-    SPEC_OBJECT,
-    STRING,
-    SYMBOL,
-    HEAP_NUMBER,
-    SIMD_VALUE,
-    NUMBER_OF_TYPES
-  };
-
-  // At most 16 different types can be distinguished, because the Code object
-  // only has room for two bytes to hold a set of these types. :-P
-  STATIC_ASSERT(NUMBER_OF_TYPES <= 16);
-
-  class Types : public EnumSet<Type, uint16_t> {
-   public:
-    Types() : EnumSet<Type, uint16_t>(0) {}
-    explicit Types(uint16_t bits) : EnumSet<Type, uint16_t>(bits) {}
-
-    bool UpdateStatus(Isolate* isolate, Handle<Object> object);
-    bool NeedsMap() const;
-    bool CanBeUndetectable() const {
-      return Contains(ToBooleanICStub::SPEC_OBJECT);
-    }
-    bool IsGeneric() const { return ToIntegral() == Generic().ToIntegral(); }
-
-    static Types Generic() { return Types((1 << NUMBER_OF_TYPES) - 1); }
-  };
-
   ToBooleanICStub(Isolate* isolate, ExtraICState state)
       : HydrogenCodeStub(isolate) {
-    set_sub_minor_key(TypesBits::encode(static_cast<uint16_t>(state)));
+    set_sub_minor_key(HintsBits::encode(static_cast<uint16_t>(state)));
   }
 
   bool UpdateStatus(Handle<Object> object);
-  Types types() const { return Types(TypesBits::decode(sub_minor_key())); }
+  ToBooleanHints hints() const {
+    return ToBooleanHints(HintsBits::decode(sub_minor_key()));
+  }
 
   Code::Kind GetCodeKind() const override { return Code::TO_BOOLEAN_IC; }
   void PrintState(std::ostream& os) const override;  // NOLINT
@@ -2987,10 +2588,10 @@
     return ToBooleanICStub(isolate, UNINITIALIZED).GetCode();
   }
 
-  ExtraICState GetExtraICState() const override { return types().ToIntegral(); }
+  ExtraICState GetExtraICState() const override { return hints(); }
 
   InlineCacheState GetICState() const {
-    if (types().IsEmpty()) {
+    if (hints() == ToBooleanHint::kNone) {
       return ::v8::internal::UNINITIALIZED;
     } else {
       return MONOMORPHIC;
@@ -3001,14 +2602,15 @@
   ToBooleanICStub(Isolate* isolate, InitializationState init_state)
       : HydrogenCodeStub(isolate, init_state) {}
 
-  class TypesBits : public BitField<uint16_t, 0, NUMBER_OF_TYPES> {};
+  static const int kNumHints = 9;
+  STATIC_ASSERT(static_cast<int>(ToBooleanHint::kAny) ==
+                ((1 << kNumHints) - 1));
+  class HintsBits : public BitField<uint16_t, 0, kNumHints> {};
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
   DEFINE_HYDROGEN_CODE_STUB(ToBooleanIC, HydrogenCodeStub);
 };
 
-std::ostream& operator<<(std::ostream& os, const ToBooleanICStub::Types& t);
-
 class ElementsTransitionAndStoreStub : public TurboFanCodeStub {
  public:
   ElementsTransitionAndStoreStub(Isolate* isolate, ElementsKind from_kind,
@@ -3123,13 +2725,6 @@
   DEFINE_CODE_STUB(SubString, TurboFanCodeStub);
 };
 
-class ToObjectStub final : public TurboFanCodeStub {
- public:
-  explicit ToObjectStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
-  DEFINE_TURBOFAN_CODE_STUB(ToObject, TurboFanCodeStub);
-};
 
 #undef DEFINE_CALL_INTERFACE_DESCRIPTOR
 #undef DEFINE_PLATFORM_CODE_STUB
@@ -3138,8 +2733,6 @@
 #undef DEFINE_CODE_STUB
 #undef DEFINE_CODE_STUB_BASE
 
-extern Representation RepresentationFromMachineType(MachineType type);
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/codegen.h b/src/codegen.h
index d0b67f1..a17ad2a 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -6,6 +6,7 @@
 #define V8_CODEGEN_H_
 
 #include "src/code-stubs.h"
+#include "src/globals.h"
 #include "src/runtime/runtime.h"
 
 // Include the declaration of the architecture defined class CodeGenerator.
@@ -97,8 +98,7 @@
 
 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate);
 
-
-double modulo(double x, double y);
+V8_EXPORT_PRIVATE double modulo(double x, double y);
 
 // Custom implementation of math functions.
 double fast_sqrt(double input, Isolate* isolate);
diff --git a/src/compilation-dependencies.h b/src/compilation-dependencies.h
index a40eb74..9a8229e 100644
--- a/src/compilation-dependencies.h
+++ b/src/compilation-dependencies.h
@@ -28,8 +28,8 @@
   void AssumeInitialMapCantChange(Handle<Map> map) {
     Insert(DependentCode::kInitialMapChangedGroup, map);
   }
-  void AssumeFieldType(Handle<Map> map) {
-    Insert(DependentCode::kFieldTypeGroup, map);
+  void AssumeFieldOwner(Handle<Map> map) {
+    Insert(DependentCode::kFieldOwnerGroup, map);
   }
   void AssumeMapStable(Handle<Map> map);
   void AssumePrototypeMapsStable(
diff --git a/src/compilation-info.cc b/src/compilation-info.cc
index 2e0934a..5c9fa58 100644
--- a/src/compilation-info.cc
+++ b/src/compilation-info.cc
@@ -9,6 +9,7 @@
 #include "src/ast/scopes.h"
 #include "src/isolate.h"
 #include "src/parsing/parse-info.h"
+#include "src/source-position.h"
 
 namespace v8 {
 namespace internal {
@@ -32,6 +33,20 @@
 #undef PARSE_INFO_GETTER
 #undef PARSE_INFO_GETTER_WITH_DEFAULT
 
+bool CompilationInfo::is_debug() const {
+  return parse_info() ? parse_info()->is_debug() : false;
+}
+
+void CompilationInfo::set_is_debug() {
+  CHECK(parse_info());
+  parse_info()->set_is_debug();
+}
+
+void CompilationInfo::PrepareForSerializing() {
+  if (parse_info()) parse_info()->set_will_serialize();
+  SetFlag(kSerializing);
+}
+
 bool CompilationInfo::has_shared_info() const {
   return parse_info_ && !parse_info_->shared_info().is_null();
 }
@@ -51,8 +66,12 @@
   if (isolate_->serializer_enabled()) EnableDeoptimizationSupport();
 
   if (FLAG_function_context_specialization) MarkAsFunctionContextSpecializing();
-  if (FLAG_turbo_source_positions) MarkAsSourcePositionsEnabled();
   if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
+
+  if (FLAG_trace_deopt || FLAG_trace_turbo || FLAG_trace_turbo_graph ||
+      FLAG_turbo_profiling || isolate_->is_profiling()) {
+    MarkAsSourcePositionsEnabled();
+  }
 }
 
 CompilationInfo::CompilationInfo(Vector<const char> debug_name,
@@ -200,10 +219,12 @@
   code_flags_ = Code::KindField::update(code_flags_, Code::OPTIMIZED_FUNCTION);
 }
 
-void CompilationInfo::AddInlinedFunction(
-    Handle<SharedFunctionInfo> inlined_function) {
+int CompilationInfo::AddInlinedFunction(
+    Handle<SharedFunctionInfo> inlined_function, SourcePosition pos) {
+  int id = static_cast<int>(inlined_functions_.size());
   inlined_functions_.push_back(InlinedFunctionHolder(
-      inlined_function, handle(inlined_function->code())));
+      inlined_function, handle(inlined_function->code()), pos));
+  return id;
 }
 
 Code::Kind CompilationInfo::output_code_kind() const {
diff --git a/src/compilation-info.h b/src/compilation-info.h
index 88477ae..77b9e34 100644
--- a/src/compilation-info.h
+++ b/src/compilation-info.h
@@ -37,22 +37,19 @@
     kNonDeferredCalling = 1 << 1,
     kSavesCallerDoubles = 1 << 2,
     kRequiresFrame = 1 << 3,
-    kMustNotHaveEagerFrame = 1 << 4,
-    kDeoptimizationSupport = 1 << 5,
-    kDebug = 1 << 6,
-    kSerializing = 1 << 7,
-    kFunctionContextSpecializing = 1 << 8,
-    kFrameSpecializing = 1 << 9,
-    kNativeContextSpecializing = 1 << 10,
-    kInliningEnabled = 1 << 11,
-    kDisableFutureOptimization = 1 << 12,
-    kSplittingEnabled = 1 << 13,
-    kDeoptimizationEnabled = 1 << 14,
-    kSourcePositionsEnabled = 1 << 15,
-    kBailoutOnUninitialized = 1 << 16,
-    kOptimizeFromBytecode = 1 << 17,
-    kTypeFeedbackEnabled = 1 << 18,
-    kAccessorInliningEnabled = 1 << 19,
+    kDeoptimizationSupport = 1 << 4,
+    kAccessorInliningEnabled = 1 << 5,
+    kSerializing = 1 << 6,
+    kFunctionContextSpecializing = 1 << 7,
+    kFrameSpecializing = 1 << 8,
+    kInliningEnabled = 1 << 9,
+    kDisableFutureOptimization = 1 << 10,
+    kSplittingEnabled = 1 << 11,
+    kDeoptimizationEnabled = 1 << 12,
+    kSourcePositionsEnabled = 1 << 13,
+    kBailoutOnUninitialized = 1 << 14,
+    kOptimizeFromBytecode = 1 << 15,
+    kTypeFeedbackEnabled = 1 << 16,
   };
 
   CompilationInfo(ParseInfo* parse_info, Handle<JSFunction> closure);
@@ -112,23 +109,17 @@
 
   bool requires_frame() const { return GetFlag(kRequiresFrame); }
 
-  void MarkMustNotHaveEagerFrame() { SetFlag(kMustNotHaveEagerFrame); }
-
-  bool GetMustNotHaveEagerFrame() const {
-    return GetFlag(kMustNotHaveEagerFrame);
-  }
-
   // Compiles marked as debug produce unoptimized code with debug break slots.
   // Inner functions that cannot be compiled w/o context are compiled eagerly.
   // Always include deoptimization support to avoid having to recompile again.
   void MarkAsDebug() {
-    SetFlag(kDebug);
+    set_is_debug();
     SetFlag(kDeoptimizationSupport);
   }
 
-  bool is_debug() const { return GetFlag(kDebug); }
+  bool is_debug() const;
 
-  void PrepareForSerializing() { SetFlag(kSerializing); }
+  void PrepareForSerializing();
 
   bool will_serialize() const { return GetFlag(kSerializing); }
 
@@ -144,14 +135,6 @@
 
   bool is_frame_specializing() const { return GetFlag(kFrameSpecializing); }
 
-  void MarkAsNativeContextSpecializing() {
-    SetFlag(kNativeContextSpecializing);
-  }
-
-  bool is_native_context_specializing() const {
-    return GetFlag(kNativeContextSpecializing);
-  }
-
   void MarkAsDeoptimizationEnabled() { SetFlag(kDeoptimizationEnabled); }
 
   bool is_deoptimization_enabled() const {
@@ -300,18 +283,29 @@
     // Do not remove.
     Handle<Code> inlined_code_object_root;
 
+    InliningPosition position;
+
     InlinedFunctionHolder(Handle<SharedFunctionInfo> inlined_shared_info,
-                          Handle<Code> inlined_code_object_root)
+                          Handle<Code> inlined_code_object_root,
+                          SourcePosition pos)
         : shared_info(inlined_shared_info),
-          inlined_code_object_root(inlined_code_object_root) {}
+          inlined_code_object_root(inlined_code_object_root) {
+      position.position = pos;
+      // initialized when generating the deoptimization literals
+      position.inlined_function_id = DeoptimizationInputData::kNotInlinedIndex;
+    }
+
+    void RegisterInlinedFunctionId(size_t inlined_function_id) {
+      position.inlined_function_id = static_cast<int>(inlined_function_id);
+    }
   };
 
   typedef std::vector<InlinedFunctionHolder> InlinedFunctionList;
-  InlinedFunctionList const& inlined_functions() const {
-    return inlined_functions_;
-  }
+  InlinedFunctionList& inlined_functions() { return inlined_functions_; }
 
-  void AddInlinedFunction(Handle<SharedFunctionInfo> inlined_function);
+  // Returns the inlining id for source position tracking.
+  int AddInlinedFunction(Handle<SharedFunctionInfo> inlined_function,
+                         SourcePosition pos);
 
   std::unique_ptr<char[]> GetDebugName() const;
 
@@ -346,6 +340,8 @@
 
   bool GetFlag(Flag flag) const { return (flags_ & flag) != 0; }
 
+  void set_is_debug();
+
   unsigned flags_;
 
   Code::Flags code_flags_;
diff --git a/src/compiler-dispatcher/compiler-dispatcher-job.cc b/src/compiler-dispatcher/compiler-dispatcher-job.cc
index 96956ae..b87a4a5 100644
--- a/src/compiler-dispatcher/compiler-dispatcher-job.cc
+++ b/src/compiler-dispatcher/compiler-dispatcher-job.cc
@@ -6,6 +6,7 @@
 
 #include "src/assert-scope.h"
 #include "src/compilation-info.h"
+#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
 #include "src/compiler.h"
 #include "src/global-handles.h"
 #include "src/isolate.h"
@@ -23,6 +24,7 @@
                                              Handle<SharedFunctionInfo> shared,
                                              size_t max_stack_size)
     : isolate_(isolate),
+      tracer_(isolate_->compiler_dispatcher_tracer()),
       shared_(Handle<SharedFunctionInfo>::cast(
           isolate_->global_handles()->Create(*shared))),
       max_stack_size_(max_stack_size),
@@ -45,9 +47,10 @@
 void CompilerDispatcherJob::PrepareToParseOnMainThread() {
   DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
   DCHECK(status() == CompileJobStatus::kInitial);
+  COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kPrepareToParse);
   HandleScope scope(isolate_);
   unicode_cache_.reset(new UnicodeCache());
-  zone_.reset(new Zone(isolate_->allocator()));
+  zone_.reset(new Zone(isolate_->allocator(), ZONE_NAME));
   Handle<Script> script(Script::cast(shared_->script()), isolate_);
   DCHECK(script->type() != Script::TYPE_NATIVE);
 
@@ -66,7 +69,6 @@
   parse_info_.reset(new ParseInfo(zone_.get()));
   parse_info_->set_isolate(isolate_);
   parse_info_->set_character_stream(character_stream_.get());
-  parse_info_->set_lazy();
   parse_info_->set_hash_seed(isolate_->heap()->HashSeed());
   parse_info_->set_is_named_expression(shared_->is_named_expression());
   parse_info_->set_compiler_hints(shared_->compiler_hints());
@@ -93,6 +95,9 @@
   DCHECK(can_parse_on_background_thread_ ||
          ThreadId::Current().Equals(isolate_->thread_id()));
   DCHECK(status() == CompileJobStatus::kReadyToParse);
+  COMPILER_DISPATCHER_TRACE_SCOPE_WITH_NUM(
+      tracer_, kParse,
+      parse_info_->end_position() - parse_info_->start_position());
 
   DisallowHeapAllocation no_allocation;
   DisallowHandleAllocation no_handles;
@@ -120,6 +125,7 @@
 bool CompilerDispatcherJob::FinalizeParsingOnMainThread() {
   DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
   DCHECK(status() == CompileJobStatus::kParsed);
+  COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kFinalizeParsing);
 
   if (!source_.is_null()) {
     i::GlobalHandles::Destroy(Handle<Object>::cast(source_).location());
@@ -144,17 +150,9 @@
     }
     parse_info_->set_shared_info(shared_);
 
-    {
-      // Create a canonical handle scope if compiling ignition bytecode. This is
-      // required by the constant array builder to de-duplicate objects without
-      // dereferencing handles.
-      std::unique_ptr<CanonicalHandleScope> canonical;
-      if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(isolate_));
-
-      // Do the parsing tasks which need to be done on the main thread. This
-      // will also handle parse errors.
-      parser_->Internalize(isolate_, script, parse_info_->literal() == nullptr);
-    }
+    // Do the parsing tasks which need to be done on the main thread. This
+    // will also handle parse errors.
+    parser_->Internalize(isolate_, script, parse_info_->literal() == nullptr);
     parser_->HandleSourceURLComments(isolate_, script);
 
     parse_info_->set_character_stream(nullptr);
@@ -171,6 +169,7 @@
 bool CompilerDispatcherJob::PrepareToCompileOnMainThread() {
   DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
   DCHECK(status() == CompileJobStatus::kReadyToAnalyse);
+  COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kPrepareToCompile);
 
   compile_info_.reset(
       new CompilationInfo(parse_info_.get(), Handle<JSFunction>::null()));
@@ -198,6 +197,8 @@
   DCHECK(status() == CompileJobStatus::kReadyToCompile);
   DCHECK(can_compile_on_background_thread_ ||
          ThreadId::Current().Equals(isolate_->thread_id()));
+  COMPILER_DISPATCHER_TRACE_SCOPE_WITH_NUM(
+      tracer_, kCompile, parse_info_->literal()->ast_node_count());
 
   // Disallowing of handle dereference and heap access dealt with in
   // CompilationJob::ExecuteJob.
@@ -216,6 +217,7 @@
 bool CompilerDispatcherJob::FinalizeCompilingOnMainThread() {
   DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
   DCHECK(status() == CompileJobStatus::kCompiled);
+  COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kFinalizeCompiling);
 
   if (compile_job_->state() == CompilationJob::State::kFailed ||
       !Compiler::FinalizeCompilationJob(compile_job_.release())) {
diff --git a/src/compiler-dispatcher/compiler-dispatcher-job.h b/src/compiler-dispatcher/compiler-dispatcher-job.h
index f3aaf93..7f4c6ce 100644
--- a/src/compiler-dispatcher/compiler-dispatcher-job.h
+++ b/src/compiler-dispatcher/compiler-dispatcher-job.h
@@ -8,12 +8,14 @@
 #include <memory>
 
 #include "src/base/macros.h"
+#include "src/globals.h"
 #include "src/handles.h"
 #include "testing/gtest/include/gtest/gtest_prod.h"
 
 namespace v8 {
 namespace internal {
 
+class CompilerDispatcherTracer;
 class CompilationInfo;
 class CompilationJob;
 class Isolate;
@@ -36,7 +38,7 @@
   kDone,
 };
 
-class CompilerDispatcherJob {
+class V8_EXPORT_PRIVATE CompilerDispatcherJob {
  public:
   CompilerDispatcherJob(Isolate* isolate, Handle<SharedFunctionInfo> shared,
                         size_t max_stack_size);
@@ -81,6 +83,7 @@
 
   CompileJobStatus status_ = CompileJobStatus::kInitial;
   Isolate* isolate_;
+  CompilerDispatcherTracer* tracer_;
   Handle<SharedFunctionInfo> shared_;  // Global handle.
   Handle<String> source_;        // Global handle.
   size_t max_stack_size_;
diff --git a/src/compiler-dispatcher/compiler-dispatcher-tracer.cc b/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
new file mode 100644
index 0000000..f8af05f
--- /dev/null
+++ b/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
@@ -0,0 +1,171 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
+
+#include "src/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+double MonotonicallyIncreasingTimeInMs() {
+  return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
+         static_cast<double>(base::Time::kMillisecondsPerSecond);
+}
+
+}  // namespace
+
+CompilerDispatcherTracer::Scope::Scope(CompilerDispatcherTracer* tracer,
+                                       ScopeID scope_id, size_t num)
+    : tracer_(tracer), scope_id_(scope_id), num_(num) {
+  start_time_ = MonotonicallyIncreasingTimeInMs();
+  // TODO(cbruni): remove once we fully moved to a trace-based system.
+  if (V8_UNLIKELY(FLAG_runtime_stats)) {
+    RuntimeCallStats::Enter(tracer_->runtime_call_stats_, &timer_,
+                            &RuntimeCallStats::CompilerDispatcher);
+  }
+}
+
+CompilerDispatcherTracer::Scope::~Scope() {
+  double elapsed = MonotonicallyIncreasingTimeInMs() - start_time_;
+  switch (scope_id_) {
+    case ScopeID::kPrepareToParse:
+      tracer_->RecordPrepareToParse(elapsed);
+      break;
+    case ScopeID::kParse:
+      tracer_->RecordParse(elapsed, num_);
+      break;
+    case ScopeID::kFinalizeParsing:
+      tracer_->RecordFinalizeParsing(elapsed);
+      break;
+    case ScopeID::kPrepareToCompile:
+      tracer_->RecordPrepareToCompile(elapsed);
+      break;
+    case ScopeID::kCompile:
+      tracer_->RecordCompile(elapsed, num_);
+      break;
+    case ScopeID::kFinalizeCompiling:
+      tracer_->RecordFinalizeCompiling(elapsed);
+      break;
+  }
+  // TODO(cbruni): remove once we fully moved to a trace-based system.
+  if (V8_UNLIKELY(FLAG_runtime_stats)) {
+    RuntimeCallStats::Leave(tracer_->runtime_call_stats_, &timer_);
+  }
+}
+
+// static
+const char* CompilerDispatcherTracer::Scope::Name(ScopeID scope_id) {
+  switch (scope_id) {
+    case ScopeID::kPrepareToParse:
+      return "V8.BackgroundCompile_PrepareToParse";
+    case ScopeID::kParse:
+      return "V8.BackgroundCompile_Parse";
+    case ScopeID::kFinalizeParsing:
+      return "V8.BackgroundCompile_FinalizeParsing";
+    case ScopeID::kPrepareToCompile:
+      return "V8.BackgroundCompile_PrepareToCompile";
+    case ScopeID::kCompile:
+      return "V8.BackgroundCompile_Compile";
+    case ScopeID::kFinalizeCompiling:
+      return "V8.BackgroundCompile_FinalizeCompiling";
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
+CompilerDispatcherTracer::CompilerDispatcherTracer(Isolate* isolate)
+    : runtime_call_stats_(nullptr) {
+  // isolate might be nullptr during unittests.
+  if (isolate) {
+    runtime_call_stats_ = isolate->counters()->runtime_call_stats();
+  }
+}
+
+CompilerDispatcherTracer::~CompilerDispatcherTracer() {}
+
+void CompilerDispatcherTracer::RecordPrepareToParse(double duration_ms) {
+  base::LockGuard<base::Mutex> lock(&mutex_);
+  prepare_parse_events_.Push(duration_ms);
+}
+
+void CompilerDispatcherTracer::RecordParse(double duration_ms,
+                                           size_t source_length) {
+  base::LockGuard<base::Mutex> lock(&mutex_);
+  parse_events_.Push(std::make_pair(source_length, duration_ms));
+}
+
+void CompilerDispatcherTracer::RecordFinalizeParsing(double duration_ms) {
+  base::LockGuard<base::Mutex> lock(&mutex_);
+  finalize_parsing_events_.Push(duration_ms);
+}
+
+void CompilerDispatcherTracer::RecordPrepareToCompile(double duration_ms) {
+  base::LockGuard<base::Mutex> lock(&mutex_);
+  prepare_compile_events_.Push(duration_ms);
+}
+
+void CompilerDispatcherTracer::RecordCompile(double duration_ms,
+                                             size_t ast_size_in_bytes) {
+  base::LockGuard<base::Mutex> lock(&mutex_);
+  compile_events_.Push(std::make_pair(ast_size_in_bytes, duration_ms));
+}
+
+void CompilerDispatcherTracer::RecordFinalizeCompiling(double duration_ms) {
+  base::LockGuard<base::Mutex> lock(&mutex_);
+  finalize_compiling_events_.Push(duration_ms);
+}
+
+double CompilerDispatcherTracer::EstimatePrepareToParseInMs() const {
+  base::LockGuard<base::Mutex> lock(&mutex_);
+  return Average(prepare_parse_events_);
+}
+
+double CompilerDispatcherTracer::EstimateParseInMs(size_t source_length) const {
+  base::LockGuard<base::Mutex> lock(&mutex_);
+  return Estimate(parse_events_, source_length);
+}
+
+double CompilerDispatcherTracer::EstimateFinalizeParsingInMs() {
+  base::LockGuard<base::Mutex> lock(&mutex_);
+  return Average(finalize_parsing_events_);
+}
+
+double CompilerDispatcherTracer::EstimatePrepareToCompileInMs() {
+  base::LockGuard<base::Mutex> lock(&mutex_);
+  return Average(prepare_compile_events_);
+}
+
+double CompilerDispatcherTracer::EstimateCompileInMs(size_t ast_size_in_bytes) {
+  base::LockGuard<base::Mutex> lock(&mutex_);
+  return Estimate(compile_events_, ast_size_in_bytes);
+}
+
+double CompilerDispatcherTracer::EstimateFinalizeCompilingInMs() {
+  base::LockGuard<base::Mutex> lock(&mutex_);
+  return Average(finalize_compiling_events_);
+}
+
+double CompilerDispatcherTracer::Average(
+    const base::RingBuffer<double>& buffer) {
+  if (buffer.Count() == 0) return 0.0;
+  double sum = buffer.Sum([](double a, double b) { return a + b; }, 0.0);
+  return sum / buffer.Count();
+}
+
+double CompilerDispatcherTracer::Estimate(
+    const base::RingBuffer<std::pair<size_t, double>>& buffer, size_t num) {
+  if (buffer.Count() == 0) return 0.0;
+  std::pair<size_t, double> sum = buffer.Sum(
+      [](std::pair<size_t, double> a, std::pair<size_t, double> b) {
+        return std::make_pair(a.first + b.first, a.second + b.second);
+      },
+      std::make_pair(0, 0.0));
+  return num * (sum.second / sum.first);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler-dispatcher/compiler-dispatcher-tracer.h b/src/compiler-dispatcher/compiler-dispatcher-tracer.h
new file mode 100644
index 0000000..b505511
--- /dev/null
+++ b/src/compiler-dispatcher/compiler-dispatcher-tracer.h
@@ -0,0 +1,98 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_TRACER_H_
+#define V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_TRACER_H_
+
+#include <utility>
+
+#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/ring-buffer.h"
+#include "src/counters.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class RuntimeCallStats;
+
+#define COMPILER_DISPATCHER_TRACE_SCOPE_WITH_NUM(tracer, scope_id, num)      \
+  CompilerDispatcherTracer::ScopeID tracer_scope_id(                         \
+      CompilerDispatcherTracer::ScopeID::scope_id);                          \
+  CompilerDispatcherTracer::Scope trace_scope(tracer, tracer_scope_id, num); \
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),                      \
+               CompilerDispatcherTracer::Scope::Name(tracer_scope_id))
+
+#define COMPILER_DISPATCHER_TRACE_SCOPE(tracer, scope_id) \
+  COMPILER_DISPATCHER_TRACE_SCOPE_WITH_NUM(tracer, scope_id, 0)
+
+class V8_EXPORT_PRIVATE CompilerDispatcherTracer {
+ public:
+  enum class ScopeID {
+    kPrepareToParse,
+    kParse,
+    kFinalizeParsing,
+    kPrepareToCompile,
+    kCompile,
+    kFinalizeCompiling
+  };
+
+  class Scope {
+   public:
+    Scope(CompilerDispatcherTracer* tracer, ScopeID scope_id, size_t num = 0);
+    ~Scope();
+
+    static const char* Name(ScopeID scoped_id);
+
+   private:
+    CompilerDispatcherTracer* tracer_;
+    ScopeID scope_id_;
+    size_t num_;
+    double start_time_;
+    RuntimeCallTimer timer_;
+
+    DISALLOW_COPY_AND_ASSIGN(Scope);
+  };
+
+  explicit CompilerDispatcherTracer(Isolate* isolate);
+  ~CompilerDispatcherTracer();
+
+  void RecordPrepareToParse(double duration_ms);
+  void RecordParse(double duration_ms, size_t source_length);
+  void RecordFinalizeParsing(double duration_ms);
+  void RecordPrepareToCompile(double duration_ms);
+  void RecordCompile(double duration_ms, size_t ast_size_in_bytes);
+  void RecordFinalizeCompiling(double duration_ms);
+
+  double EstimatePrepareToParseInMs() const;
+  double EstimateParseInMs(size_t source_length) const;
+  double EstimateFinalizeParsingInMs();
+  double EstimatePrepareToCompileInMs();
+  double EstimateCompileInMs(size_t ast_size_in_bytes);
+  double EstimateFinalizeCompilingInMs();
+
+ private:
+  static double Average(const base::RingBuffer<double>& buffer);
+  static double Estimate(
+      const base::RingBuffer<std::pair<size_t, double>>& buffer, size_t num);
+
+  mutable base::Mutex mutex_;
+  base::RingBuffer<double> prepare_parse_events_;
+  base::RingBuffer<std::pair<size_t, double>> parse_events_;
+  base::RingBuffer<double> finalize_parsing_events_;
+  base::RingBuffer<double> prepare_compile_events_;
+  base::RingBuffer<std::pair<size_t, double>> compile_events_;
+  base::RingBuffer<double> finalize_compiling_events_;
+
+  RuntimeCallStats* runtime_call_stats_;
+
+  DISALLOW_COPY_AND_ASSIGN(CompilerDispatcherTracer);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_TRACER_H_
diff --git a/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 75c50ee..1169506 100644
--- a/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -134,7 +134,23 @@
   }
 }
 
-void OptimizingCompileDispatcher::Flush() {
+void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
+  if (FLAG_block_concurrent_recompilation) Unblock();
+  if (blocking_behavior == BlockingBehavior::kDontBlock) {
+    base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
+    while (input_queue_length_ > 0) {
+      CompilationJob* job = input_queue_[InputQueueIndex(0)];
+      DCHECK_NOT_NULL(job);
+      input_queue_shift_ = InputQueueIndex(1);
+      input_queue_length_--;
+      DisposeCompilationJob(job, true);
+    }
+    FlushOutputQueue(true);
+    if (FLAG_trace_concurrent_recompilation) {
+      PrintF("  ** Flushed concurrent recompilation queues (not blocking).\n");
+    }
+    return;
+  }
   base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
   if (FLAG_block_concurrent_recompilation) Unblock();
   {
diff --git a/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index 8c032ab..7e08161 100644
--- a/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -22,6 +22,8 @@
 
 class OptimizingCompileDispatcher {
  public:
+  enum class BlockingBehavior { kBlock, kDontBlock };
+
   explicit OptimizingCompileDispatcher(Isolate* isolate)
       : isolate_(isolate),
         input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
@@ -38,7 +40,7 @@
 
   void Run();
   void Stop();
-  void Flush();
+  void Flush(BlockingBehavior blocking_behavior);
   void QueueForOptimization(CompilationJob* job);
   void Unblock();
   void InstallOptimizedFunctions();
diff --git a/src/compiler.cc b/src/compiler.cc
index ec402fa..3435f53 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -20,7 +20,6 @@
 #include "src/crankshaft/hydrogen.h"
 #include "src/debug/debug.h"
 #include "src/debug/liveedit.h"
-#include "src/deoptimizer.h"
 #include "src/frames-inl.h"
 #include "src/full-codegen/full-codegen.h"
 #include "src/globals.h"
@@ -251,22 +250,6 @@
 
 namespace {
 
-bool IsEvalToplevel(Handle<SharedFunctionInfo> shared) {
-  return shared->is_toplevel() && shared->script()->IsScript() &&
-         Script::cast(shared->script())->compilation_type() ==
-             Script::COMPILATION_TYPE_EVAL;
-}
-
-bool Parse(ParseInfo* info) {
-  // Create a canonical handle scope if compiling ignition bytecode. This is
-  // required by the constant array builder to de-duplicate objects without
-  // dereferencing handles.
-  std::unique_ptr<CanonicalHandleScope> canonical;
-  if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(info->isolate()));
-
-  return Parser::ParseStatic(info);
-}
-
 void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
                                CompilationInfo* info) {
   // Log the code generation. If source information is available include
@@ -320,21 +303,46 @@
       info->literal()->feedback_vector_spec()));
 }
 
-bool ShouldUseIgnition(CompilationInfo* info) {
-  if (!FLAG_ignition) return false;
+bool UseTurboFan(Handle<SharedFunctionInfo> shared) {
+  bool optimization_disabled = shared->optimization_disabled();
+  bool dont_crankshaft = shared->dont_crankshaft();
 
+  // Check the enabling conditions for Turbofan.
+  // 1. "use asm" code.
+  bool is_turbofanable_asm =
+      FLAG_turbo_asm && shared->asm_function() && !optimization_disabled;
+
+  // 2. Fallback for features unsupported by Crankshaft.
+  bool is_unsupported_by_crankshaft_but_turbofanable =
+      dont_crankshaft && strcmp(FLAG_turbo_filter, "~~") == 0 &&
+      !optimization_disabled;
+
+  // 3. Explicitly enabled by the command-line filter.
+  bool passes_turbo_filter = shared->PassesFilter(FLAG_turbo_filter);
+
+  return is_turbofanable_asm || is_unsupported_by_crankshaft_but_turbofanable ||
+         passes_turbo_filter;
+}
+
+bool ShouldUseIgnition(CompilationInfo* info) {
   DCHECK(info->has_shared_info());
 
+  // Skip Ignition for asm.js functions.
+  if (info->shared_info()->asm_function()) {
+    return false;
+  }
+
   // When requesting debug code as a replacement for existing code, we provide
   // the same kind as the existing code (to prevent implicit tier-change).
   if (info->is_debug() && info->shared_info()->is_compiled()) {
     return !info->shared_info()->HasBaselineCode();
   }
 
-  // Since we can't OSR from Ignition, skip Ignition for asm.js functions.
-  if (info->shared_info()->asm_function()) {
-    return false;
-  }
+  // Code destined for TurboFan should be compiled with Ignition first.
+  if (UseTurboFan(info->shared_info())) return true;
+
+  // Only use Ignition for any other function if FLAG_ignition is true.
+  if (!FLAG_ignition) return false;
 
   // Checks whether top level functions should be passed by the filter.
   if (info->shared_info()->is_toplevel()) {
@@ -360,38 +368,6 @@
   }
 }
 
-bool GenerateUnoptimizedCode(CompilationInfo* info) {
-  if (FLAG_validate_asm && info->scope()->asm_module() &&
-      !info->shared_info()->is_asm_wasm_broken()) {
-    EnsureFeedbackMetadata(info);
-    MaybeHandle<FixedArray> wasm_data;
-    wasm_data = AsmJs::ConvertAsmToWasm(info->parse_info());
-    if (!wasm_data.is_null()) {
-      info->shared_info()->set_asm_wasm_data(*wasm_data.ToHandleChecked());
-      info->SetCode(info->isolate()->builtins()->InstantiateAsmJs());
-      return true;
-    }
-  }
-
-  std::unique_ptr<CompilationJob> job(GetUnoptimizedCompilationJob(info));
-  if (job->PrepareJob() != CompilationJob::SUCCEEDED) return false;
-  if (job->ExecuteJob() != CompilationJob::SUCCEEDED) return false;
-  if (job->FinalizeJob() != CompilationJob::SUCCEEDED) return false;
-  job->RecordUnoptimizedCompilationStats();
-  return true;
-}
-
-bool CompileUnoptimizedCode(CompilationInfo* info) {
-  DCHECK(AllowCompilation::IsAllowed(info->isolate()));
-  if (!Compiler::Analyze(info->parse_info()) ||
-      !GenerateUnoptimizedCode(info)) {
-    Isolate* isolate = info->isolate();
-    if (!isolate->has_pending_exception()) isolate->StackOverflow();
-    return false;
-  }
-  return true;
-}
-
 void InstallSharedScopeInfo(CompilationInfo* info,
                             Handle<SharedFunctionInfo> shared) {
   Handle<ScopeInfo> scope_info = info->scope()->scope_info();
@@ -426,9 +402,50 @@
 
   // Install compilation result on the shared function info
   InstallSharedCompilationResult(info, shared);
+}
 
-  // Record the function compilation event.
-  RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
+CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job) {
+  CompilationJob::Status status = job->FinalizeJob();
+  if (status == CompilationJob::SUCCEEDED) {
+    InstallUnoptimizedCode(job->info());
+    job->RecordUnoptimizedCompilationStats();
+  }
+  return status;
+}
+
+bool GenerateUnoptimizedCode(CompilationInfo* info) {
+  if (FLAG_validate_asm && info->scope()->asm_module() &&
+      !info->shared_info()->is_asm_wasm_broken() && !info->is_debug()) {
+    EnsureFeedbackMetadata(info);
+    MaybeHandle<FixedArray> wasm_data;
+    wasm_data = AsmJs::ConvertAsmToWasm(info->parse_info());
+    if (!wasm_data.is_null()) {
+      info->shared_info()->set_asm_wasm_data(*wasm_data.ToHandleChecked());
+      info->SetCode(info->isolate()->builtins()->InstantiateAsmJs());
+      InstallUnoptimizedCode(info);
+      return true;
+    }
+  }
+
+  std::unique_ptr<CompilationJob> job(GetUnoptimizedCompilationJob(info));
+  if (job->PrepareJob() != CompilationJob::SUCCEEDED) return false;
+  if (job->ExecuteJob() != CompilationJob::SUCCEEDED) return false;
+  if (FinalizeUnoptimizedCompilationJob(job.get()) !=
+      CompilationJob::SUCCEEDED) {
+    return false;
+  }
+  return true;
+}
+
+bool CompileUnoptimizedCode(CompilationInfo* info) {
+  DCHECK(AllowCompilation::IsAllowed(info->isolate()));
+  if (!Compiler::Analyze(info->parse_info()) ||
+      !GenerateUnoptimizedCode(info)) {
+    Isolate* isolate = info->isolate();
+    if (!isolate->has_pending_exception()) isolate->StackOverflow();
+    return false;
+  }
+  return true;
 }
 
 MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(CompilationInfo* info) {
@@ -436,28 +453,19 @@
   PostponeInterruptsScope postpone(info->isolate());
 
   // Parse and update CompilationInfo with the results.
-  if (!Parse(info->parse_info())) return MaybeHandle<Code>();
+  if (!Parser::ParseStatic(info->parse_info())) return MaybeHandle<Code>();
   DCHECK_EQ(info->shared_info()->language_mode(),
             info->literal()->language_mode());
 
   // Compile either unoptimized code or bytecode for the interpreter.
   if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
 
-  InstallUnoptimizedCode(info);
+  // Record the function compilation event.
+  RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
 
   return info->code();
 }
 
-CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job) {
-  CompilationJob::Status status = job->FinalizeJob();
-  if (status == CompilationJob::SUCCEEDED) {
-    DCHECK(!job->info()->shared_info()->is_compiled());
-    InstallUnoptimizedCode(job->info());
-    job->RecordUnoptimizedCompilationStats();
-  }
-  return status;
-}
-
 MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
     Handle<JSFunction> function, BailoutId osr_ast_id) {
   Handle<SharedFunctionInfo> shared(function->shared());
@@ -495,29 +503,9 @@
   Handle<Context> native_context(function->context()->native_context());
   SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
                                             literals, info->osr_ast_id());
-
-  // Do not cache (native) context-independent code compiled for OSR.
-  if (code->is_turbofanned() && info->is_osr()) return;
-
-  // Cache optimized (native) context-independent code.
-  if (FLAG_turbo_cache_shared_code && code->is_turbofanned() &&
-      !info->is_native_context_specializing()) {
-    DCHECK(!info->is_function_context_specializing());
-    DCHECK(info->osr_ast_id().IsNone());
-    Handle<SharedFunctionInfo> shared(function->shared());
-    SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(shared, code);
-  }
 }
 
 bool Renumber(ParseInfo* parse_info) {
-  // Create a canonical handle scope if compiling ignition bytecode. This is
-  // required by the constant array builder to de-duplicate objects without
-  // dereferencing handles.
-  std::unique_ptr<CanonicalHandleScope> canonical;
-  if (FLAG_ignition) {
-    canonical.reset(new CanonicalHandleScope(parse_info->isolate()));
-  }
-
   if (!AstNumbering::Renumber(parse_info->isolate(), parse_info->zone(),
                               parse_info->literal())) {
     return false;
@@ -536,27 +524,6 @@
   return true;
 }
 
-bool UseTurboFan(Handle<SharedFunctionInfo> shared) {
-  bool optimization_disabled = shared->optimization_disabled();
-  bool dont_crankshaft = shared->dont_crankshaft();
-
-  // Check the enabling conditions for Turbofan.
-  // 1. "use asm" code.
-  bool is_turbofanable_asm =
-      FLAG_turbo_asm && shared->asm_function() && !optimization_disabled;
-
-  // 2. Fallback for features unsupported by Crankshaft.
-  bool is_unsupported_by_crankshaft_but_turbofanable =
-      dont_crankshaft && strcmp(FLAG_turbo_filter, "~~") == 0 &&
-      !optimization_disabled;
-
-  // 3. Explicitly enabled by the command-line filter.
-  bool passes_turbo_filter = shared->PassesFilter(FLAG_turbo_filter);
-
-  return is_turbofanable_asm || is_unsupported_by_crankshaft_but_turbofanable ||
-         passes_turbo_filter;
-}
-
 bool GetOptimizedCodeNow(CompilationJob* job) {
   CompilationInfo* info = job->info();
   Isolate* isolate = info->isolate();
@@ -652,8 +619,8 @@
   DCHECK_IMPLIES(ignition_osr, !osr_ast_id.IsNone());
   DCHECK_IMPLIES(ignition_osr, FLAG_ignition_osr);
 
-  // Flag combination --ignition-osr --no-turbo-from-bytecode is unsupported.
-  if (ignition_osr && !FLAG_turbo_from_bytecode) return MaybeHandle<Code>();
+  // Shared function no longer needs to be tiered up
+  shared->set_marked_for_tier_up(false);
 
   Handle<Code> cached_code;
   // TODO(4764): When compiling for OSR from bytecode, BailoutId might derive
@@ -673,8 +640,10 @@
   }
 
   // Reset profiler ticks, function is no longer considered hot.
-  if (shared->is_compiled()) {
+  if (shared->HasBaselineCode()) {
     shared->code()->set_profiler_ticks(0);
+  } else if (shared->HasBytecodeArray()) {
+    shared->set_profiler_ticks(0);
   }
 
   VMState<COMPILER> state(isolate);
@@ -708,7 +677,7 @@
   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.OptimizeCode");
 
   // TurboFan can optimize directly from existing bytecode.
-  if (FLAG_turbo_from_bytecode && use_turbofan && ShouldUseIgnition(info)) {
+  if (use_turbofan && ShouldUseIgnition(info)) {
     if (info->is_osr() && !ignition_osr) return MaybeHandle<Code>();
     if (!Compiler::EnsureBytecode(info)) {
       if (isolate->has_pending_exception()) isolate->clear_pending_exception();
@@ -717,14 +686,6 @@
     info->MarkAsOptimizeFromBytecode();
   }
 
-  if (IsEvalToplevel(shared)) {
-    parse_info->set_eval();
-    if (function->context()->IsNativeContext()) parse_info->set_global();
-    parse_info->set_toplevel();
-    parse_info->set_allow_lazy_parsing(false);
-    parse_info->set_lazy(false);
-  }
-
   // Verify that OSR compilations are delegated to the correct graph builder.
   // Depending on the underlying frame the semantics of the {BailoutId} differ
   // and the various graph builders hard-code a certain semantic:
@@ -775,7 +736,13 @@
                "V8.RecompileSynchronous");
 
   Handle<SharedFunctionInfo> shared = info->shared_info();
-  shared->code()->set_profiler_ticks(0);
+
+  // Reset profiler ticks, function is no longer considered hot.
+  if (shared->HasBaselineCode()) {
+    shared->code()->set_profiler_ticks(0);
+  } else if (shared->HasBytecodeArray()) {
+    shared->set_profiler_ticks(0);
+  }
 
   DCHECK(!shared->HasDebugInfo());
 
@@ -818,84 +785,17 @@
   return CompilationJob::FAILED;
 }
 
-class InterpreterActivationsFinder : public ThreadVisitor,
-                                     public OptimizedFunctionVisitor {
- public:
-  explicit InterpreterActivationsFinder(SharedFunctionInfo* shared)
-      : shared_(shared), has_activations_(false) {}
-
-  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
-    Address* activation_pc_address = nullptr;
-    JavaScriptFrameIterator it(isolate, top);
-    for (; !it.done(); it.Advance()) {
-      JavaScriptFrame* frame = it.frame();
-      if (FLAG_turbo_from_bytecode && FLAG_ignition_osr &&
-          frame->is_optimized() && frame->function()->shared() == shared_) {
-        // If we are able to optimize functions directly from bytecode, then
-        // there might be optimized OSR code active on the stack that is not
-        // reachable through a function. We count this as an activation.
-        has_activations_ = true;
-      }
-      if (frame->is_interpreted() && frame->function()->shared() == shared_) {
-        has_activations_ = true;
-        activation_pc_address = frame->pc_address();
-      }
-    }
-
-    if (activation_pc_address) {
-      activation_pc_addresses_.push_back(activation_pc_address);
-    }
-  }
-
-  void VisitFunction(JSFunction* function) {
-    if (function->Inlines(shared_)) has_activations_ = true;
-  }
-
-  void EnterContext(Context* context) {}
-  void LeaveContext(Context* context) {}
-
-  bool MarkActivationsForBaselineOnReturn(Isolate* isolate) {
-    if (activation_pc_addresses_.empty()) return false;
-
-    for (Address* activation_pc_address : activation_pc_addresses_) {
-      DCHECK(isolate->inner_pointer_to_code_cache()
-                 ->GetCacheEntry(*activation_pc_address)
-                 ->code->is_interpreter_trampoline_builtin());
-      *activation_pc_address =
-          isolate->builtins()->InterpreterMarkBaselineOnReturn()->entry();
-    }
-    return true;
-  }
-
-  bool has_activations() { return has_activations_; }
-
- private:
-  SharedFunctionInfo* shared_;
-  bool has_activations_;
-  std::vector<Address*> activation_pc_addresses_;
-};
-
-bool HasInterpreterActivations(
-    Isolate* isolate, InterpreterActivationsFinder* activations_finder) {
-  activations_finder->VisitThread(isolate, isolate->thread_local_top());
-  isolate->thread_manager()->IterateArchivedThreads(activations_finder);
-  if (FLAG_turbo_from_bytecode) {
-    // If we are able to optimize functions directly from bytecode, then there
-    // might be optimized functions that rely on bytecode being around. We need
-    // to prevent switching the given function to baseline code in those cases.
-    Deoptimizer::VisitAllOptimizedFunctions(isolate, activations_finder);
-  }
-  return activations_finder->has_activations();
-}
-
 MaybeHandle<Code> GetBaselineCode(Handle<JSFunction> function) {
   Isolate* isolate = function->GetIsolate();
   VMState<COMPILER> state(isolate);
   PostponeInterruptsScope postpone(isolate);
-  Zone zone(isolate->allocator());
-  ParseInfo parse_info(&zone, function);
+  Zone zone(isolate->allocator(), ZONE_NAME);
+  ParseInfo parse_info(&zone, handle(function->shared()));
   CompilationInfo info(&parse_info, function);
 
+  // Function no longer needs to be tiered up
+  function->shared()->set_marked_for_tier_up(false);
+
   // Reset profiler ticks, function is no longer considered hot.
   if (function->shared()->HasBytecodeArray()) {
     function->shared()->set_profiler_ticks(0);
@@ -920,31 +820,6 @@
     return MaybeHandle<Code>();
   }
 
-  // TODO(4280): For now we disable switching to baseline code in the presence
-  // of interpreter activations of the given function. The reasons is that the
-  // underlying bytecode is cleared below. Note that this only applies in case
-  // the --ignition-preserve-bytecode flag is not passed.
-  if (!FLAG_ignition_preserve_bytecode) {
-    InterpreterActivationsFinder activations_finder(function->shared());
-    if (HasInterpreterActivations(isolate, &activations_finder)) {
-      if (FLAG_trace_opt) {
-        OFStream os(stdout);
-        os << "[unable to switch " << Brief(*function) << " due to activations]"
-           << std::endl;
-      }
-
-      if (activations_finder.MarkActivationsForBaselineOnReturn(isolate)) {
-        if (FLAG_trace_opt) {
-          OFStream os(stdout);
-          os << "[marking " << Brief(function->shared())
-             << " for baseline recompilation on return]" << std::endl;
-        }
-      }
-
-      return MaybeHandle<Code>();
-    }
-  }
-
   if (FLAG_trace_opt) {
     OFStream os(stdout);
     os << "[switching method " << Brief(*function) << " to baseline code]"
@@ -952,7 +827,7 @@
   }
 
   // Parse and update CompilationInfo with the results.
-  if (!Parse(info.parse_info())) return MaybeHandle<Code>();
+  if (!Parser::ParseStatic(info.parse_info())) return MaybeHandle<Code>();
   Handle<SharedFunctionInfo> shared = info.shared_info();
   DCHECK_EQ(shared->language_mode(), info.literal()->language_mode());
 
@@ -963,12 +838,6 @@
     return MaybeHandle<Code>();
   }
 
-  // TODO(4280): For now we play it safe and remove the bytecode array when we
-  // switch to baseline code. We might consider keeping around the bytecode so
-  // that it can be used as the "source of truth" eventually. Note that this
-  // only applies in case the --ignition-preserve-bytecode flag is not passed.
-  if (!FLAG_ignition_preserve_bytecode) shared->ClearBytecodeArray();
-
   // Update the shared function info with the scope info.
   InstallSharedScopeInfo(&info, shared);
 
@@ -1003,6 +872,46 @@
     return cached_code;
   }
 
+  if (function->shared()->marked_for_tier_up()) {
+    DCHECK(FLAG_mark_shared_functions_for_tier_up);
+
+    function->shared()->set_marked_for_tier_up(false);
+
+    switch (Compiler::NextCompilationTier(*function)) {
+      case Compiler::BASELINE: {
+        if (FLAG_trace_opt) {
+          PrintF("[recompiling function ");
+          function->ShortPrint();
+          PrintF(
+              " to baseline eagerly (shared function marked for tier up)]\n");
+        }
+
+        Handle<Code> code;
+        if (GetBaselineCode(function).ToHandle(&code)) {
+          return code;
+        }
+        break;
+      }
+      case Compiler::OPTIMIZED: {
+        if (FLAG_trace_opt) {
+          PrintF("[optimizing method ");
+          function->ShortPrint();
+          PrintF(" eagerly (shared function marked for tier up)]\n");
+        }
+
+        Handle<Code> code;
+        // TODO(leszeks): Look into performing this compilation concurrently.
+        if (GetOptimizedCode(function, Compiler::NOT_CONCURRENT)
+                .ToHandle(&code)) {
+          return code;
+        }
+        break;
+      }
+      default:
+        UNREACHABLE();
+    }
+  }
+
   if (function->shared()->is_compiled()) {
     return Handle<Code>(function->shared()->code());
   }
@@ -1013,8 +922,8 @@
     return entry;
   }
 
-  Zone zone(isolate->allocator());
-  ParseInfo parse_info(&zone, function);
+  Zone zone(isolate->allocator(), ZONE_NAME);
+  ParseInfo parse_info(&zone, handle(function->shared()));
   CompilationInfo info(&parse_info, function);
   Handle<Code> result;
   ASSIGN_RETURN_ON_EXCEPTION(isolate, result, GetUnoptimizedCode(&info), Code);
@@ -1059,50 +968,13 @@
 
   isolate->debug()->OnBeforeCompile(script);
 
-  DCHECK(parse_info->is_eval() || parse_info->is_global() ||
-         parse_info->is_module());
-
-  parse_info->set_toplevel();
-
   Handle<SharedFunctionInfo> result;
 
   { VMState<COMPILER> state(info->isolate());
-    if (parse_info->literal() == NULL) {
-      // Parse the script if needed (if it's already parsed, literal() is
-      // non-NULL). If compiling for debugging, we may eagerly compile inner
-      // functions, so do not parse lazily in that case.
-      ScriptCompiler::CompileOptions options = parse_info->compile_options();
-      bool parse_allow_lazy = (options == ScriptCompiler::kConsumeParserCache ||
-                               String::cast(script->source())->length() >
-                                   FLAG_min_preparse_length) &&
-                              !info->is_debug();
-
-      // Consider parsing eagerly when targeting the code cache.
-      parse_allow_lazy &= !(FLAG_serialize_eager && info->will_serialize());
-
-      // Consider parsing eagerly when targeting Ignition.
-      parse_allow_lazy &= !(FLAG_ignition && FLAG_ignition_eager &&
-                            !isolate->serializer_enabled());
-
-      parse_info->set_allow_lazy_parsing(parse_allow_lazy);
-      if (!parse_allow_lazy &&
-          (options == ScriptCompiler::kProduceParserCache ||
-           options == ScriptCompiler::kConsumeParserCache)) {
-        // We are going to parse eagerly, but we either 1) have cached data
-        // produced by lazy parsing or 2) are asked to generate cached data.
-        // Eager parsing cannot benefit from cached data, and producing cached
-        // data while parsing eagerly is not implemented.
-        parse_info->set_cached_data(nullptr);
-        parse_info->set_compile_options(ScriptCompiler::kNoCompileOptions);
-      }
-
-      if (!Parse(parse_info)) {
-        return Handle<SharedFunctionInfo>::null();
-      }
+    if (parse_info->literal() == nullptr && !Parser::ParseStatic(parse_info)) {
+      return Handle<SharedFunctionInfo>::null();
     }
 
-    DCHECK(!info->is_debug() || !parse_info->allow_lazy_parsing());
-
     FunctionLiteral* lit = parse_info->literal();
 
     // Measure how long it takes to do the compilation; only take the
@@ -1122,10 +994,6 @@
     DCHECK_EQ(kNoSourcePosition, lit->function_token_position());
     result = NewSharedFunctionInfoForLiteral(isolate, lit, script);
     result->set_is_toplevel(true);
-    if (parse_info->is_eval()) {
-      // Eval scripts cannot be (re-)compiled without context.
-      result->set_allows_lazy_compilation_without_context(false);
-    }
     parse_info->set_shared_info(result);
 
     // Compile the code.
@@ -1133,12 +1001,6 @@
       return Handle<SharedFunctionInfo>::null();
     }
 
-    // Update the shared function info with the scope info.
-    InstallSharedScopeInfo(info, result);
-
-    // Install compilation result on the shared function info
-    InstallSharedCompilationResult(info, result);
-
     Handle<String> script_name =
         script->name()->IsString()
             ? Handle<String>(String::cast(script->name()))
@@ -1173,7 +1035,7 @@
 }
 
 bool Compiler::ParseAndAnalyze(ParseInfo* info) {
-  if (!Parse(info)) return false;
+  if (!Parser::ParseStatic(info)) return false;
   if (!Compiler::Analyze(info)) return false;
   DCHECK_NOT_NULL(info->literal());
   DCHECK_NOT_NULL(info->scope());
@@ -1246,8 +1108,8 @@
       code = isolate->builtins()->InterpreterEntryTrampoline();
       function->shared()->ReplaceCode(*code);
     } else {
-      Zone zone(isolate->allocator());
-      ParseInfo parse_info(&zone, function);
+      Zone zone(isolate->allocator(), ZONE_NAME);
+      ParseInfo parse_info(&zone, handle(function->shared()));
       CompilationInfo info(&parse_info, function);
       if (!GetUnoptimizedCode(&info).ToHandle(&code)) {
         return false;
@@ -1266,44 +1128,14 @@
   return true;
 }
 
-bool Compiler::CompileDebugCode(Handle<JSFunction> function) {
-  Isolate* isolate = function->GetIsolate();
-  DCHECK(AllowCompilation::IsAllowed(isolate));
-
-  // Start a compilation.
-  Zone zone(isolate->allocator());
-  ParseInfo parse_info(&zone, function);
-  CompilationInfo info(&parse_info, Handle<JSFunction>::null());
-  if (IsEvalToplevel(handle(function->shared()))) {
-    parse_info.set_eval();
-    if (function->context()->IsNativeContext()) parse_info.set_global();
-    parse_info.set_toplevel();
-    parse_info.set_allow_lazy_parsing(false);
-    parse_info.set_lazy(false);
-  }
-  info.MarkAsDebug();
-  if (GetUnoptimizedCode(&info).is_null()) {
-    isolate->clear_pending_exception();
-    return false;
-  }
-
-  // Check postconditions on success.
-  DCHECK(!isolate->has_pending_exception());
-  DCHECK(function->shared()->is_compiled());
-  DCHECK(function->shared()->HasDebugCode());
-  return true;
-}
-
 bool Compiler::CompileDebugCode(Handle<SharedFunctionInfo> shared) {
   Isolate* isolate = shared->GetIsolate();
   DCHECK(AllowCompilation::IsAllowed(isolate));
 
   // Start a compilation.
-  Zone zone(isolate->allocator());
+  Zone zone(isolate->allocator(), ZONE_NAME);
   ParseInfo parse_info(&zone, shared);
   CompilationInfo info(&parse_info, Handle<JSFunction>::null());
-  DCHECK(shared->allows_lazy_compilation_without_context());
-  DCHECK(!IsEvalToplevel(shared));
   info.MarkAsDebug();
   if (GetUnoptimizedCode(&info).is_null()) {
     isolate->clear_pending_exception();
@@ -1325,13 +1157,12 @@
   // generated shared function infos, clear the script's list temporarily
   // and restore it at the end of this method.
   Handle<Object> old_function_infos(script->shared_function_infos(), isolate);
-  script->set_shared_function_infos(Smi::FromInt(0));
+  script->set_shared_function_infos(Smi::kZero);
 
   // Start a compilation.
-  Zone zone(isolate->allocator());
+  Zone zone(isolate->allocator(), ZONE_NAME);
   ParseInfo parse_info(&zone, script);
   CompilationInfo info(&parse_info, Handle<JSFunction>::null());
-  parse_info.set_global();
   info.MarkAsDebug();
 
   // TODO(635): support extensions.
@@ -1377,7 +1208,7 @@
   DCHECK_NOT_NULL(info->scope());
   Handle<SharedFunctionInfo> shared = info->shared_info();
   if (!shared->has_deoptimization_support()) {
-    Zone zone(info->isolate()->allocator());
+    Zone zone(info->isolate()->allocator(), ZONE_NAME);
     CompilationInfo unoptimized(info->parse_info(), info->closure());
     unoptimized.EnableDeoptimizationSupport();
 
@@ -1387,18 +1218,9 @@
     // TurboFan in this case.
     if (IsResumableFunction(shared->kind())) return false;
 
-    // TODO(4280): For now we disable switching to baseline code in the presence
-    // of interpreter activations of the given function. The reasons is that the
-    // underlying bytecode is cleared below. The expensive check for activations
-    // only needs to be done when the given function has bytecode, otherwise we
-    // can be sure there are no activations. Note that this only applies in case
-    // the --ignition-preserve-bytecode flag is not passed.
-    if (!FLAG_ignition_preserve_bytecode && shared->HasBytecodeArray()) {
-      InterpreterActivationsFinder activations_finder(*shared);
-      if (HasInterpreterActivations(info->isolate(), &activations_finder)) {
-        return false;
-      }
-    }
+    // When we call PrepareForSerializing below, we will change the shared
+    // ParseInfo. Make sure to reset it.
+    bool old_will_serialize_value = info->parse_info()->will_serialize();
 
     // If the current code has reloc info for serialization, also include
     // reloc info for serialization for the new code, so that deopt support
@@ -1410,13 +1232,7 @@
     EnsureFeedbackMetadata(&unoptimized);
     if (!FullCodeGenerator::MakeCode(&unoptimized)) return false;
 
-    // TODO(4280): For now we play it safe and remove the bytecode array when we
-    // switch to baseline code. We might consider keeping around the bytecode so
-    // that it can be used as the "source of truth" eventually. Note that this
-    // only applies in case the --ignition-preserve-bytecode flag is not passed.
-    if (!FLAG_ignition_preserve_bytecode && shared->HasBytecodeArray()) {
-      shared->ClearBytecodeArray();
-    }
+    info->parse_info()->set_will_serialize(old_will_serialize_value);
 
     // The scope info might not have been set if a lazily compiled
     // function is inlined before being called for the first time.
@@ -1437,8 +1253,8 @@
 // static
 Compiler::CompilationTier Compiler::NextCompilationTier(JSFunction* function) {
   Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate());
-  if (shared->code()->is_interpreter_trampoline_builtin()) {
-    if (FLAG_turbo_from_bytecode && UseTurboFan(shared)) {
+  if (shared->IsInterpreted()) {
+    if (UseTurboFan(shared)) {
       return OPTIMIZED;
     } else {
       return BASELINE;
@@ -1468,6 +1284,7 @@
   Handle<Script> script;
   if (!maybe_shared_info.ToHandle(&shared_info)) {
     script = isolate->factory()->NewScript(source);
+    if (FLAG_trace_deopt) Script::InitLineEnds(script);
     if (!script_name.is_null()) {
       script->set_name(*script_name);
       script->set_line_offset(line_offset);
@@ -1477,11 +1294,10 @@
     script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
     Script::SetEvalOrigin(script, outer_info, eval_position);
 
-    Zone zone(isolate->allocator());
+    Zone zone(isolate->allocator(), ZONE_NAME);
     ParseInfo parse_info(&zone, script);
     CompilationInfo info(&parse_info, Handle<JSFunction>::null());
     parse_info.set_eval();
-    if (context->IsNativeContext()) parse_info.set_global();
     parse_info.set_language_mode(language_mode);
     parse_info.set_parse_restriction(restriction);
     if (!context->IsNativeContext()) {
@@ -1628,6 +1444,7 @@
 
     // Create a script object describing the script to be compiled.
     Handle<Script> script = isolate->factory()->NewScript(source);
+    if (FLAG_trace_deopt) Script::InitLineEnds(script);
     if (natives == NATIVES_CODE) {
       script->set_type(Script::TYPE_NATIVE);
       script->set_hide_source(true);
@@ -1646,14 +1463,10 @@
     }
 
     // Compile the function and add it to the cache.
-    Zone zone(isolate->allocator());
+    Zone zone(isolate->allocator(), ZONE_NAME);
     ParseInfo parse_info(&zone, script);
     CompilationInfo info(&parse_info, Handle<JSFunction>::null());
-    if (is_module) {
-      parse_info.set_module();
-    } else {
-      parse_info.set_global();
-    }
+    if (is_module) parse_info.set_module();
     if (compile_options != ScriptCompiler::kNoCompileOptions) {
       parse_info.set_cached_data(cached_data);
     }
@@ -1766,59 +1579,49 @@
     result->set_never_compiled(outer_info->shared_info()->never_compiled());
   }
 
-  Zone zone(isolate->allocator());
+  Zone zone(isolate->allocator(), ZONE_NAME);
   ParseInfo parse_info(&zone, script);
   CompilationInfo info(&parse_info, Handle<JSFunction>::null());
   parse_info.set_literal(literal);
   parse_info.set_shared_info(result);
   parse_info.set_language_mode(literal->scope()->language_mode());
+  parse_info.set_ast_value_factory(
+      outer_info->parse_info()->ast_value_factory());
+  parse_info.set_ast_value_factory_owned(false);
+
   if (outer_info->will_serialize()) info.PrepareForSerializing();
   if (outer_info->is_debug()) info.MarkAsDebug();
 
-  // Determine if the function can be lazily compiled. This is necessary to
-  // allow some of our builtin JS files to be lazily compiled. These
-  // builtins cannot be handled lazily by the parser, since we have to know
-  // if a function uses the special natives syntax, which is something the
-  // parser records.
-  // If the debugger requests compilation for break points, we cannot be
-  // aggressive about lazy compilation, because it might trigger compilation
-  // of functions without an outer context when setting a breakpoint through
-  // Debug::FindSharedFunctionInfoInScript.
-  bool allow_lazy = literal->AllowsLazyCompilation() && !info.is_debug();
-  bool lazy = FLAG_lazy && allow_lazy && !literal->should_eager_compile();
-
-  // Consider compiling eagerly when targeting the code cache.
-  lazy &= !(FLAG_serialize_eager && info.will_serialize());
-
-  // Consider compiling eagerly when compiling bytecode for Ignition.
-  lazy &=
-      !(FLAG_ignition && FLAG_ignition_eager && !isolate->serializer_enabled());
-
-  // Generate code
-  TimerEventScope<TimerEventCompileCode> timer(isolate);
-  RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::CompileCode);
-  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
-
-  if (lazy) {
-    info.SetCode(isolate->builtins()->CompileLazy());
-    Scope* outer_scope = literal->scope()->GetOuterScopeWithContext();
-    if (outer_scope) {
-      result->set_outer_scope_info(*outer_scope->scope_info());
+  // If this inner function is already compiled, we don't need to compile
+  // again. When compiling for debug, we are not interested in having debug
+  // break slots in inner functions, neither for setting break points nor
+  // for revealing inner functions.
+  // This is especially important for generators. We must not replace the
+  // code for generators, as there may be suspended generator objects.
+  if (!result->is_compiled()) {
+    if (!literal->ShouldEagerCompile()) {
+      info.SetCode(isolate->builtins()->CompileLazy());
+      Scope* outer_scope = literal->scope()->GetOuterScopeWithContext();
+      if (outer_scope) {
+        result->set_outer_scope_info(*outer_scope->scope_info());
+      }
+    } else {
+      // Generate code
+      TimerEventScope<TimerEventCompileCode> timer(isolate);
+      RuntimeCallTimerScope runtimeTimer(isolate,
+                                         &RuntimeCallStats::CompileCode);
+      TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
+      if (Renumber(info.parse_info()) && GenerateUnoptimizedCode(&info)) {
+        // Code generation will ensure that the feedback vector is present and
+        // appropriately sized.
+        DCHECK(!info.code().is_null());
+        if (literal->should_be_used_once_hint()) {
+          info.code()->MarkToBeExecutedOnce(isolate);
+        }
+      } else {
+        return Handle<SharedFunctionInfo>::null();
+      }
     }
-  } else if (Renumber(info.parse_info()) && GenerateUnoptimizedCode(&info)) {
-    // Code generation will ensure that the feedback vector is present and
-    // appropriately sized.
-    DCHECK(!info.code().is_null());
-    if (literal->should_eager_compile() &&
-        literal->should_be_used_once_hint()) {
-      info.code()->MarkToBeExecutedOnce(isolate);
-    }
-    // Update the shared function info with the scope info.
-    InstallSharedScopeInfo(&info, result);
-    // Install compilation result on the shared function info.
-    InstallSharedCompilationResult(&info, result);
-  } else {
-    return Handle<SharedFunctionInfo>::null();
   }
 
   if (maybe_existing.is_null()) {
@@ -1887,8 +1690,13 @@
     return FinalizeOptimizedCompilationJob(job.get()) ==
            CompilationJob::SUCCEEDED;
   } else {
-    return FinalizeUnoptimizedCompilationJob(job.get()) ==
-           CompilationJob::SUCCEEDED;
+    if (FinalizeUnoptimizedCompilationJob(job.get()) ==
+        CompilationJob::SUCCEEDED) {
+      RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
+                                job->info());
+      return true;
+    }
+    return false;
   }
 }
 
diff --git a/src/compiler.h b/src/compiler.h
index bfeaa8e..03c6f81 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -48,7 +48,6 @@
   static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag);
   static bool CompileBaseline(Handle<JSFunction> function);
   static bool CompileOptimized(Handle<JSFunction> function, ConcurrencyMode);
-  static bool CompileDebugCode(Handle<JSFunction> function);
   static bool CompileDebugCode(Handle<SharedFunctionInfo> shared);
   static MaybeHandle<JSArray> CompileForLiveEdit(Handle<Script> script);
 
diff --git a/src/compiler/access-builder.cc b/src/compiler/access-builder.cc
index 5301434..540eb37 100644
--- a/src/compiler/access-builder.cc
+++ b/src/compiler/access-builder.cc
@@ -146,11 +146,10 @@
 
 // static
 FieldAccess AccessBuilder::ForJSGeneratorObjectContinuation() {
-  TypeCache const& type_cache = TypeCache::Get();
   FieldAccess access = {kTaggedBase,
                         JSGeneratorObject::kContinuationOffset,
                         Handle<Name>(),
-                        type_cache.kSmi,
+                        Type::SignedSmall(),
                         MachineType::TaggedSigned(),
                         kNoWriteBarrier};
   return access;
@@ -180,10 +179,12 @@
 
 // static
 FieldAccess AccessBuilder::ForJSGeneratorObjectResumeMode() {
-  TypeCache const& type_cache = TypeCache::Get();
-  FieldAccess access = {
-      kTaggedBase,     JSGeneratorObject::kResumeModeOffset, Handle<Name>(),
-      type_cache.kSmi, MachineType::TaggedSigned(),          kNoWriteBarrier};
+  FieldAccess access = {kTaggedBase,
+                        JSGeneratorObject::kResumeModeOffset,
+                        Handle<Name>(),
+                        Type::SignedSmall(),
+                        MachineType::TaggedSigned(),
+                        kNoWriteBarrier};
   return access;
 }
 
@@ -356,7 +357,7 @@
   FieldAccess access = {kTaggedBase,
                         FixedTypedArrayBase::kExternalPointerOffset,
                         MaybeHandle<Name>(),
-                        Type::OtherInternal(),
+                        Type::ExternalPointer(),
                         MachineType::Pointer(),
                         kNoWriteBarrier};
   return access;
@@ -433,6 +434,27 @@
   return access;
 }
 
+// static
+FieldAccess AccessBuilder::ForModuleRegularExports() {
+  FieldAccess access = {kTaggedBase,
+                        Module::kRegularExportsOffset,
+                        Handle<Name>(),
+                        Type::OtherInternal(),
+                        MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForModuleRegularImports() {
+  FieldAccess access = {kTaggedBase,
+                        Module::kRegularImportsOffset,
+                        Handle<Name>(),
+                        Type::OtherInternal(),
+                        MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
+  return access;
+}
 
 // static
 FieldAccess AccessBuilder::ForNameHashField() {
@@ -490,7 +512,7 @@
   FieldAccess access = {kTaggedBase,
                         ExternalString::kResourceDataOffset,
                         Handle<Name>(),
-                        Type::OtherInternal(),
+                        Type::ExternalPointer(),
                         MachineType::Pointer(),
                         kNoWriteBarrier};
   return access;
@@ -549,6 +571,59 @@
 }
 
 // static
+FieldAccess AccessBuilder::ForJSArrayIteratorObject() {
+  FieldAccess access = {kTaggedBase,
+                        JSArrayIterator::kIteratedObjectOffset,
+                        Handle<Name>(),
+                        Type::ReceiverOrUndefined(),
+                        MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSArrayIteratorIndex(InstanceType instance_type,
+                                                   ElementsKind elements_kind) {
+  // In generic case, cap to 2^53-1 (per ToLength() in spec) via
+  // kPositiveSafeInteger
+  FieldAccess access = {kTaggedBase,
+                        JSArrayIterator::kNextIndexOffset,
+                        Handle<Name>(),
+                        TypeCache::Get().kPositiveSafeInteger,
+                        MachineType::AnyTagged(),
+                        kFullWriteBarrier};
+  if (instance_type == JS_ARRAY_TYPE) {
+    if (IsFastDoubleElementsKind(elements_kind)) {
+      access.type = TypeCache::Get().kFixedDoubleArrayLengthType;
+      access.machine_type = MachineType::TaggedSigned();
+      access.write_barrier_kind = kNoWriteBarrier;
+    } else if (IsFastElementsKind(elements_kind)) {
+      access.type = TypeCache::Get().kFixedArrayLengthType;
+      access.machine_type = MachineType::TaggedSigned();
+      access.write_barrier_kind = kNoWriteBarrier;
+    } else {
+      access.type = TypeCache::Get().kJSArrayLengthType;
+    }
+  } else if (instance_type == JS_TYPED_ARRAY_TYPE) {
+    access.type = TypeCache::Get().kJSTypedArrayLengthType;
+    access.machine_type = MachineType::TaggedSigned();
+    access.write_barrier_kind = kNoWriteBarrier;
+  }
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSArrayIteratorObjectMap() {
+  FieldAccess access = {kTaggedBase,
+                        JSArrayIterator::kIteratedObjectMapOffset,
+                        Handle<Name>(),
+                        Type::OtherInternal(),
+                        MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
+  return access;
+}
+
+// static
 FieldAccess AccessBuilder::ForJSStringIteratorString() {
   FieldAccess access = {
       kTaggedBase,    JSStringIterator::kStringOffset, Handle<Name>(),
@@ -611,6 +686,14 @@
 
 
 // static
+FieldAccess AccessBuilder::ForCellValue() {
+  FieldAccess access = {
+      kTaggedBase, Cell::kValueOffset,       Handle<Name>(),
+      Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
+  return access;
+}
+
+// static
 FieldAccess AccessBuilder::ForContextSlot(size_t index) {
   int offset = Context::kHeaderSize + static_cast<int>(index) * kPointerSize;
   DCHECK_EQ(offset,
@@ -656,7 +739,7 @@
                           MachineType::AnyTagged(), kFullWriteBarrier};
   switch (kind) {
     case FAST_SMI_ELEMENTS:
-      access.type = TypeCache::Get().kSmi;
+      access.type = Type::SignedSmall();
       access.machine_type = MachineType::TaggedSigned();
       access.write_barrier_kind = kNoWriteBarrier;
       break;
diff --git a/src/compiler/access-builder.h b/src/compiler/access-builder.h
index 96f3200..eb8e78f 100644
--- a/src/compiler/access-builder.h
+++ b/src/compiler/access-builder.h
@@ -5,8 +5,10 @@
 #ifndef V8_COMPILER_ACCESS_BUILDER_H_
 #define V8_COMPILER_ACCESS_BUILDER_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/elements-kind.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -15,7 +17,8 @@
 // This access builder provides a set of static methods constructing commonly
 // used FieldAccess and ElementAccess descriptors. These descriptors serve as
 // parameters to simplified load/store operators.
-class AccessBuilder final : public AllStatic {
+class V8_EXPORT_PRIVATE AccessBuilder final
+    : public NON_EXPORTED_BASE(AllStatic) {
  public:
   // ===========================================================================
   // Access to external values (based on external references).
@@ -143,6 +146,12 @@
   // Provides access to Map::prototype() field.
   static FieldAccess ForMapPrototype();
 
+  // Provides access to Module::regular_exports() field.
+  static FieldAccess ForModuleRegularExports();
+
+  // Provides access to Module::regular_imports() field.
+  static FieldAccess ForModuleRegularImports();
+
   // Provides access to Name::hash_field() field.
   static FieldAccess ForNameHashField();
 
@@ -182,6 +191,16 @@
   // Provides access to JSGlobalObject::native_context() field.
   static FieldAccess ForJSGlobalObjectNativeContext();
 
+  // Provides access to JSArrayIterator::object() field.
+  static FieldAccess ForJSArrayIteratorObject();
+
+  // Provides access to JSArrayIterator::index() field.
+  static FieldAccess ForJSArrayIteratorIndex(InstanceType type = JS_OBJECT_TYPE,
+                                             ElementsKind kind = NO_ELEMENTS);
+
+  // Provides access to JSArrayIterator::object_map() field.
+  static FieldAccess ForJSArrayIteratorObjectMap();
+
   // Provides access to JSStringIterator::string() field.
   static FieldAccess ForJSStringIteratorString();
 
@@ -191,6 +210,9 @@
   // Provides access to JSValue::value() field.
   static FieldAccess ForValue();
 
+  // Provides access to Cell::value() field.
+  static FieldAccess ForCellValue();
+
   // Provides access to arguments object fields.
   static FieldAccess ForArgumentsLength();
   static FieldAccess ForArgumentsCallee();
diff --git a/src/compiler/access-info.cc b/src/compiler/access-info.cc
index 329cb93..866b060 100644
--- a/src/compiler/access-info.cc
+++ b/src/compiler/access-info.cc
@@ -10,6 +10,7 @@
 #include "src/compiler/type-cache.h"
 #include "src/field-index-inl.h"
 #include "src/field-type.h"
+#include "src/ic/call-optimization.h"
 #include "src/objects-inl.h"
 
 namespace v8 {
@@ -24,8 +25,6 @@
   if (map->has_indexed_interceptor()) return false;
   ElementsKind const elements_kind = map->elements_kind();
   if (IsFastElementsKind(elements_kind)) return true;
-  // TODO(bmeurer): Add support for other elements kind.
-  if (elements_kind == UINT8_CLAMPED_ELEMENTS) return false;
   if (IsFixedTypedArrayElementsKind(elements_kind)) return true;
   return false;
 }
@@ -95,6 +94,12 @@
   return PropertyAccessInfo(kAccessorConstant, holder, constant, receiver_maps);
 }
 
+// static
+PropertyAccessInfo PropertyAccessInfo::Generic(MapList const& receiver_maps) {
+  return PropertyAccessInfo(kGeneric, MaybeHandle<JSObject>(), Handle<Object>(),
+                            receiver_maps);
+}
+
 PropertyAccessInfo::PropertyAccessInfo()
     : kind_(kInvalid),
       field_representation_(MachineRepresentation::kNone),
@@ -168,6 +173,12 @@
       }
       return false;
     }
+    case kGeneric: {
+      this->receiver_maps_.insert(this->receiver_maps_.end(),
+                                  that->receiver_maps_.begin(),
+                                  that->receiver_maps_.end());
+      return true;
+    }
   }
 
   UNREACHABLE();
@@ -301,7 +312,7 @@
               MachineRepresentation::kTagged;
           MaybeHandle<Map> field_map;
           if (details_representation.IsSmi()) {
-            field_type = type_cache_.kSmi;
+            field_type = Type::SignedSmall();
             field_representation = MachineRepresentation::kTaggedSigned;
           } else if (details_representation.IsDouble()) {
             field_type = type_cache_.kFloat64;
@@ -322,7 +333,7 @@
               // Add proper code dependencies in case of stable field map(s).
               Handle<Map> field_owner_map(map->FindFieldOwner(number),
                                           isolate());
-              dependencies()->AssumeFieldType(field_owner_map);
+              dependencies()->AssumeFieldOwner(field_owner_map);
 
               // Remember the field map, and try to infer a useful type.
               field_type = Type::For(descriptors_field_type->AsClass());
@@ -343,8 +354,13 @@
                   : Handle<AccessorPair>::cast(accessors)->setter(),
               isolate());
           if (!accessor->IsJSFunction()) {
-            // TODO(turbofan): Add support for API accessors.
-            return false;
+            CallOptimization optimization(accessor);
+            if (!optimization.is_simple_api_call()) {
+              return false;
+            }
+            if (optimization.api_call_info()->fast_handler()->IsCode()) {
+              return false;
+            }
           }
           *access_info = PropertyAccessInfo::AccessorConstant(
               MapList{receiver_map}, accessor, holder);
@@ -474,7 +490,10 @@
                                          MaybeHandle<JSObject> holder,
                                          PropertyAccessInfo* access_info) {
   // Check if the {map} has a data transition with the given {name}.
-  if (map->unused_property_fields() == 0) return false;
+  if (map->unused_property_fields() == 0) {
+    *access_info = PropertyAccessInfo::Generic(MapList{map});
+    return true;
+  }
   Handle<Map> transition_map;
   if (TransitionArray::SearchTransition(map, kData, name, NONE)
           .ToHandle(&transition_map)) {
@@ -493,7 +512,7 @@
     MaybeHandle<Map> field_map;
     MachineRepresentation field_representation = MachineRepresentation::kTagged;
     if (details_representation.IsSmi()) {
-      field_type = type_cache_.kSmi;
+      field_type = Type::SignedSmall();
       field_representation = MachineRepresentation::kTaggedSigned;
     } else if (details_representation.IsDouble()) {
       field_type = type_cache_.kFloat64;
@@ -512,7 +531,7 @@
         // Add proper code dependencies in case of stable field map(s).
         Handle<Map> field_owner_map(transition_map->FindFieldOwner(number),
                                     isolate());
-        dependencies()->AssumeFieldType(field_owner_map);
+        dependencies()->AssumeFieldOwner(field_owner_map);
 
         // Remember the field map, and try to infer a useful type.
         field_type = Type::For(descriptors_field_type->AsClass());
diff --git a/src/compiler/access-info.h b/src/compiler/access-info.h
index ac186fb..1d485dd 100644
--- a/src/compiler/access-info.h
+++ b/src/compiler/access-info.h
@@ -61,7 +61,8 @@
     kNotFound,
     kDataConstant,
     kDataField,
-    kAccessorConstant
+    kAccessorConstant,
+    kGeneric
   };
 
   static PropertyAccessInfo NotFound(MapList const& receiver_maps,
@@ -78,6 +79,7 @@
   static PropertyAccessInfo AccessorConstant(MapList const& receiver_maps,
                                              Handle<Object> constant,
                                              MaybeHandle<JSObject> holder);
+  static PropertyAccessInfo Generic(MapList const& receiver_maps);
 
   PropertyAccessInfo();
 
@@ -87,6 +89,7 @@
   bool IsDataConstant() const { return kind() == kDataConstant; }
   bool IsDataField() const { return kind() == kDataField; }
   bool IsAccessorConstant() const { return kind() == kAccessorConstant; }
+  bool IsGeneric() const { return kind() == kGeneric; }
 
   bool HasTransitionMap() const { return !transition_map().is_null(); }
 
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index dbe1828..c473b9b 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -136,25 +136,13 @@
     FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
   }
-
-  FloatRegister InputFloat32Register(size_t index) {
-    return ToFloat32Register(instr_->InputAt(index));
-  }
-
-  FloatRegister OutputFloat32Register() {
-    return ToFloat32Register(instr_->Output());
-  }
-
-  FloatRegister ToFloat32Register(InstructionOperand* op) {
-    return LowDwVfpRegister::from_code(ToDoubleRegister(op).code()).low();
-  }
 };
 
 namespace {
 
-class OutOfLineLoadFloat32 final : public OutOfLineCode {
+class OutOfLineLoadFloat final : public OutOfLineCode {
  public:
-  OutOfLineLoadFloat32(CodeGenerator* gen, SwVfpRegister result)
+  OutOfLineLoadFloat(CodeGenerator* gen, SwVfpRegister result)
       : OutOfLineCode(gen), result_(result) {}
 
   void Generate() final {
@@ -679,8 +667,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchTailCallJSFunctionFromJSFunction:
-    case kArchTailCallJSFunction: {
+    case kArchTailCallJSFunctionFromJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
         // Check the function's context matches the context argument.
@@ -688,11 +675,9 @@
         __ cmp(cp, kScratchReg);
         __ Assert(eq, kWrongFunctionContext);
       }
-      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
-        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
-                                         i.TempRegister(0), i.TempRegister(1),
-                                         i.TempRegister(2));
-      }
+      AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                       i.TempRegister(0), i.TempRegister(1),
+                                       i.TempRegister(2));
       __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(ip);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -759,7 +744,7 @@
       break;
     }
     case kArchRet:
-      AssembleReturn();
+      AssembleReturn(instr->InputAt(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArchStackPointer:
@@ -1084,83 +1069,92 @@
       __ mla(i.OutputRegister(1), i.InputRegister(2), i.InputRegister(1),
              i.OutputRegister(1));
       break;
-    case kArmLslPair:
+    case kArmLslPair: {
+      Register second_output =
+          instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
       if (instr->InputAt(2)->IsImmediate()) {
-        __ LslPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+        __ LslPair(i.OutputRegister(0), second_output, i.InputRegister(0),
                    i.InputRegister(1), i.InputInt32(2));
       } else {
-        __ LslPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+        __ LslPair(i.OutputRegister(0), second_output, i.InputRegister(0),
                    i.InputRegister(1), kScratchReg, i.InputRegister(2));
       }
       break;
-    case kArmLsrPair:
+    }
+    case kArmLsrPair: {
+      Register second_output =
+          instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
       if (instr->InputAt(2)->IsImmediate()) {
-        __ LsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+        __ LsrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
                    i.InputRegister(1), i.InputInt32(2));
       } else {
-        __ LsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+        __ LsrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
                    i.InputRegister(1), kScratchReg, i.InputRegister(2));
       }
       break;
-    case kArmAsrPair:
+    }
+    case kArmAsrPair: {
+      Register second_output =
+          instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
       if (instr->InputAt(2)->IsImmediate()) {
-        __ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+        __ AsrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
                    i.InputRegister(1), i.InputInt32(2));
       } else {
-        __ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+        __ AsrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
                    i.InputRegister(1), kScratchReg, i.InputRegister(2));
       }
       break;
+    }
     case kArmVcmpF32:
       if (instr->InputAt(1)->IsFPRegister()) {
-        __ VFPCompareAndSetFlags(i.InputFloat32Register(0),
-                                 i.InputFloat32Register(1));
+        __ VFPCompareAndSetFlags(i.InputFloatRegister(0),
+                                 i.InputFloatRegister(1));
       } else {
         DCHECK(instr->InputAt(1)->IsImmediate());
         // 0.0 is the only immediate supported by vcmp instructions.
         DCHECK(i.InputFloat32(1) == 0.0f);
-        __ VFPCompareAndSetFlags(i.InputFloat32Register(0), i.InputFloat32(1));
+        __ VFPCompareAndSetFlags(i.InputFloatRegister(0), i.InputFloat32(1));
       }
       DCHECK_EQ(SetCC, i.OutputSBit());
       break;
     case kArmVaddF32:
-      __ vadd(i.OutputFloat32Register(), i.InputFloat32Register(0),
-              i.InputFloat32Register(1));
+      __ vadd(i.OutputFloatRegister(), i.InputFloatRegister(0),
+              i.InputFloatRegister(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVsubF32:
-      __ vsub(i.OutputFloat32Register(), i.InputFloat32Register(0),
-              i.InputFloat32Register(1));
+      __ vsub(i.OutputFloatRegister(), i.InputFloatRegister(0),
+              i.InputFloatRegister(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmulF32:
-      __ vmul(i.OutputFloat32Register(), i.InputFloat32Register(0),
-              i.InputFloat32Register(1));
+      __ vmul(i.OutputFloatRegister(), i.InputFloatRegister(0),
+              i.InputFloatRegister(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmlaF32:
-      __ vmla(i.OutputFloat32Register(), i.InputFloat32Register(1),
-              i.InputFloat32Register(2));
+      __ vmla(i.OutputFloatRegister(), i.InputFloatRegister(1),
+              i.InputFloatRegister(2));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmlsF32:
-      __ vmls(i.OutputFloat32Register(), i.InputFloat32Register(1),
-              i.InputFloat32Register(2));
+      __ vmls(i.OutputFloatRegister(), i.InputFloatRegister(1),
+              i.InputFloatRegister(2));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVdivF32:
-      __ vdiv(i.OutputFloat32Register(), i.InputFloat32Register(0),
-              i.InputFloat32Register(1));
+      __ vdiv(i.OutputFloatRegister(), i.InputFloatRegister(0),
+              i.InputFloatRegister(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVsqrtF32:
-      __ vsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
+      __ vsqrt(i.OutputFloatRegister(), i.InputFloatRegister(0));
       break;
     case kArmVabsF32:
-      __ vabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
+      __ vabs(i.OutputFloatRegister(), i.InputFloatRegister(0));
       break;
     case kArmVnegF32:
-      __ vneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
+      __ vneg(i.OutputFloatRegister(), i.InputFloatRegister(0));
       break;
     case kArmVcmpF64:
       if (instr->InputAt(1)->IsFPRegister()) {
@@ -1229,7 +1223,7 @@
       break;
     case kArmVrintmF32: {
       CpuFeatureScope scope(masm(), ARMv8);
-      __ vrintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
+      __ vrintm(i.OutputFloatRegister(), i.InputFloatRegister(0));
       break;
     }
     case kArmVrintmF64: {
@@ -1239,7 +1233,7 @@
     }
     case kArmVrintpF32: {
       CpuFeatureScope scope(masm(), ARMv8);
-      __ vrintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
+      __ vrintp(i.OutputFloatRegister(), i.InputFloatRegister(0));
       break;
     }
     case kArmVrintpF64: {
@@ -1249,7 +1243,7 @@
     }
     case kArmVrintzF32: {
       CpuFeatureScope scope(masm(), ARMv8);
-      __ vrintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
+      __ vrintz(i.OutputFloatRegister(), i.InputFloatRegister(0));
       break;
     }
     case kArmVrintzF64: {
@@ -1264,7 +1258,7 @@
     }
     case kArmVrintnF32: {
       CpuFeatureScope scope(masm(), ARMv8);
-      __ vrintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
+      __ vrintn(i.OutputFloatRegister(), i.InputFloatRegister(0));
       break;
     }
     case kArmVrintnF64: {
@@ -1273,26 +1267,26 @@
       break;
     }
     case kArmVcvtF32F64: {
-      __ vcvt_f32_f64(i.OutputFloat32Register(), i.InputDoubleRegister(0));
+      __ vcvt_f32_f64(i.OutputFloatRegister(), i.InputDoubleRegister(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtF64F32: {
-      __ vcvt_f64_f32(i.OutputDoubleRegister(), i.InputFloat32Register(0));
+      __ vcvt_f64_f32(i.OutputDoubleRegister(), i.InputFloatRegister(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtF32S32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
       __ vmov(scratch, i.InputRegister(0));
-      __ vcvt_f32_s32(i.OutputFloat32Register(), scratch);
+      __ vcvt_f32_s32(i.OutputFloatRegister(), scratch);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtF32U32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
       __ vmov(scratch, i.InputRegister(0));
-      __ vcvt_f32_u32(i.OutputFloat32Register(), scratch);
+      __ vcvt_f32_u32(i.OutputFloatRegister(), scratch);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
@@ -1312,7 +1306,7 @@
     }
     case kArmVcvtS32F32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
-      __ vcvt_s32_f32(scratch, i.InputFloat32Register(0));
+      __ vcvt_s32_f32(scratch, i.InputFloatRegister(0));
       __ vmov(i.OutputRegister(), scratch);
       // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
       // because INT32_MIN allows easier out-of-bounds detection.
@@ -1323,7 +1317,7 @@
     }
     case kArmVcvtU32F32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
-      __ vcvt_u32_f32(scratch, i.InputFloat32Register(0));
+      __ vcvt_u32_f32(scratch, i.InputFloatRegister(0));
       __ vmov(i.OutputRegister(), scratch);
       // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
       // because 0 allows easier out-of-bounds detection.
@@ -1347,11 +1341,11 @@
       break;
     }
     case kArmVmovU32F32:
-      __ vmov(i.OutputRegister(), i.InputFloat32Register(0));
+      __ vmov(i.OutputRegister(), i.InputFloatRegister(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmovF32U32:
-      __ vmov(i.OutputFloat32Register(), i.InputRegister(0));
+      __ vmov(i.OutputFloatRegister(), i.InputRegister(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmovLowU32F64:
@@ -1409,12 +1403,12 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVldrF32: {
-      __ vldr(i.OutputFloat32Register(), i.InputOffset());
+      __ vldr(i.OutputFloatRegister(), i.InputOffset());
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVstrF32:
-      __ vstr(i.InputFloat32Register(0), i.InputOffset(1));
+      __ vstr(i.InputFloatRegister(0), i.InputOffset(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVldrF64:
@@ -1426,9 +1420,9 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmFloat32Max: {
-      SwVfpRegister result = i.OutputFloat32Register();
-      SwVfpRegister left = i.InputFloat32Register(0);
-      SwVfpRegister right = i.InputFloat32Register(1);
+      SwVfpRegister result = i.OutputFloatRegister();
+      SwVfpRegister left = i.InputFloatRegister(0);
+      SwVfpRegister right = i.InputFloatRegister(1);
       if (left.is(right)) {
         __ Move(result, left);
       } else {
@@ -1454,9 +1448,9 @@
       break;
     }
     case kArmFloat32Min: {
-      SwVfpRegister result = i.OutputFloat32Register();
-      SwVfpRegister left = i.InputFloat32Register(0);
-      SwVfpRegister right = i.InputFloat32Register(1);
+      SwVfpRegister result = i.OutputFloatRegister();
+      SwVfpRegister left = i.InputFloatRegister(0);
+      SwVfpRegister right = i.InputFloatRegister(1);
       if (left.is(right)) {
         __ Move(result, left);
       } else {
@@ -1495,7 +1489,7 @@
           frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
         } else {
           DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
-          __ vpush(i.InputFloat32Register(0));
+          __ vpush(i.InputFloatRegister(0));
           frame_access_state()->IncreaseSPDelta(1);
         }
       } else {
@@ -1526,7 +1520,7 @@
       ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
       break;
     case kCheckedLoadFloat32:
-      ASSEMBLE_CHECKED_LOAD_FP(Float32);
+      ASSEMBLE_CHECKED_LOAD_FP(Float);
       break;
     case kCheckedLoadFloat64:
       ASSEMBLE_CHECKED_LOAD_FP(Double);
@@ -1541,7 +1535,7 @@
       ASSEMBLE_CHECKED_STORE_INTEGER(str);
       break;
     case kCheckedStoreFloat32:
-      ASSEMBLE_CHECKED_STORE_FP(Float32);
+      ASSEMBLE_CHECKED_STORE_FP(Float);
       break;
     case kCheckedStoreFloat64:
       ASSEMBLE_CHECKED_STORE_FP(Double);
@@ -1649,7 +1643,7 @@
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   DeoptimizeReason deoptimization_reason =
       GetDeoptimizationReason(deoptimization_id);
-  __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   __ CheckConstPool(false, false);
   return kSuccess;
@@ -1696,6 +1690,9 @@
       }
     } else if (descriptor->IsJSFunctionCall()) {
       __ Prologue(this->info()->GeneratePreagedPrologue());
+      if (descriptor->PushArgumentCount()) {
+        __ Push(kJavaScriptCallArgCountRegister);
+      }
     } else {
       __ StubPrologue(info()->GetOutputStackFrameType());
     }
@@ -1705,7 +1702,8 @@
     }
   }
 
-  int shrink_slots = frame()->GetSpillSlotCount();
+  int shrink_slots =
+      frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
 
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
@@ -1743,8 +1741,7 @@
   }
 }
 
-
-void CodeGenerator::AssembleReturn() {
+void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   int pop_count = static_cast<int>(descriptor->StackParameterCount());
 
@@ -1768,21 +1765,34 @@
 
   unwinding_info_writer_.MarkBlockWillExit();
 
+  ArmOperandConverter g(this, nullptr);
   if (descriptor->IsCFunctionCall()) {
     AssembleDeconstructFrame();
   } else if (frame_access_state()->has_frame()) {
-    // Canonicalize JSFunction return sites for now.
-    if (return_label_.is_bound()) {
-      __ b(&return_label_);
-      return;
+    // Canonicalize JSFunction return sites for now unless they have an variable
+    // number of stack slot pops.
+    if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+      if (return_label_.is_bound()) {
+        __ b(&return_label_);
+        return;
+      } else {
+        __ bind(&return_label_);
+        AssembleDeconstructFrame();
+      }
     } else {
-      __ bind(&return_label_);
       AssembleDeconstructFrame();
     }
   }
-  __ Ret(pop_count);
-}
 
+  if (pop->IsImmediate()) {
+    DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+    pop_count += g.ToConstant(pop).ToInt32();
+  } else {
+    __ Drop(g.ToRegister(pop));
+  }
+  __ Drop(pop_count);
+  __ Ret();
+}
 
 void CodeGenerator::AssembleMove(InstructionOperand* source,
                                  InstructionOperand* destination) {
@@ -1852,12 +1862,12 @@
       }
       if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
     } else if (src.type() == Constant::kFloat32) {
-      if (destination->IsFPStackSlot()) {
+      if (destination->IsFloatStackSlot()) {
         MemOperand dst = g.ToMemOperand(destination);
         __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
         __ str(ip, dst);
       } else {
-        SwVfpRegister dst = g.ToFloat32Register(destination);
+        SwVfpRegister dst = g.ToFloatRegister(destination);
         __ vmov(dst, src.ToFloat32());
       }
     } else {
@@ -1866,28 +1876,60 @@
                               ? g.ToDoubleRegister(destination)
                               : kScratchDoubleReg;
       __ vmov(dst, src.ToFloat64(), kScratchReg);
-      if (destination->IsFPStackSlot()) {
+      if (destination->IsDoubleStackSlot()) {
         __ vstr(dst, g.ToMemOperand(destination));
       }
     }
   } else if (source->IsFPRegister()) {
-    DwVfpRegister src = g.ToDoubleRegister(source);
-    if (destination->IsFPRegister()) {
-      DwVfpRegister dst = g.ToDoubleRegister(destination);
-      __ Move(dst, src);
+    MachineRepresentation rep = LocationOperand::cast(source)->representation();
+    if (rep == MachineRepresentation::kFloat64) {
+      DwVfpRegister src = g.ToDoubleRegister(source);
+      if (destination->IsDoubleRegister()) {
+        DwVfpRegister dst = g.ToDoubleRegister(destination);
+        __ Move(dst, src);
+      } else {
+        DCHECK(destination->IsDoubleStackSlot());
+        __ vstr(src, g.ToMemOperand(destination));
+      }
     } else {
-      DCHECK(destination->IsFPStackSlot());
-      __ vstr(src, g.ToMemOperand(destination));
+      DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+      // GapResolver may give us reg codes that don't map to actual s-registers.
+      // Generate code to work around those cases.
+      int src_code = LocationOperand::cast(source)->register_code();
+      if (destination->IsFloatRegister()) {
+        int dst_code = LocationOperand::cast(destination)->register_code();
+        __ VmovExtended(dst_code, src_code, kScratchReg);
+      } else {
+        DCHECK(destination->IsFloatStackSlot());
+        __ VmovExtended(g.ToMemOperand(destination), src_code, kScratchReg);
+      }
     }
   } else if (source->IsFPStackSlot()) {
     MemOperand src = g.ToMemOperand(source);
+    MachineRepresentation rep =
+        LocationOperand::cast(destination)->representation();
     if (destination->IsFPRegister()) {
+      if (rep == MachineRepresentation::kFloat64) {
         __ vldr(g.ToDoubleRegister(destination), src);
+      } else {
+        DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+        // GapResolver may give us reg codes that don't map to actual
+        // s-registers. Generate code to work around those cases.
+        int dst_code = LocationOperand::cast(destination)->register_code();
+        __ VmovExtended(dst_code, src, kScratchReg);
+      }
     } else {
       DCHECK(destination->IsFPStackSlot());
+      if (rep == MachineRepresentation::kFloat64) {
         DwVfpRegister temp = kScratchDoubleReg;
         __ vldr(temp, src);
         __ vstr(temp, g.ToMemOperand(destination));
+      } else {
+        DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+        SwVfpRegister temp = kScratchDoubleReg.low();
+        __ vldr(temp, src);
+        __ vstr(temp, g.ToMemOperand(destination));
+      }
     }
   } else {
     UNREACHABLE();
@@ -1927,17 +1969,35 @@
     __ str(temp_0, dst);
     __ vstr(temp_1, src);
   } else if (source->IsFPRegister()) {
+    MachineRepresentation rep = LocationOperand::cast(source)->representation();
     LowDwVfpRegister temp = kScratchDoubleReg;
-    DwVfpRegister src = g.ToDoubleRegister(source);
-    if (destination->IsFPRegister()) {
-      DwVfpRegister dst = g.ToDoubleRegister(destination);
-      __ vswp(src, dst);
+    if (rep == MachineRepresentation::kFloat64) {
+      DwVfpRegister src = g.ToDoubleRegister(source);
+      if (destination->IsFPRegister()) {
+        DwVfpRegister dst = g.ToDoubleRegister(destination);
+        __ vswp(src, dst);
+      } else {
+        DCHECK(destination->IsFPStackSlot());
+        MemOperand dst = g.ToMemOperand(destination);
+        __ Move(temp, src);
+        __ vldr(src, dst);
+        __ vstr(temp, dst);
+      }
     } else {
-      DCHECK(destination->IsFPStackSlot());
-      MemOperand dst = g.ToMemOperand(destination);
-      __ Move(temp, src);
-      __ vldr(src, dst);
-      __ vstr(temp, dst);
+      DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+      int src_code = LocationOperand::cast(source)->register_code();
+      if (destination->IsFPRegister()) {
+        int dst_code = LocationOperand::cast(destination)->register_code();
+        __ VmovExtended(temp.low().code(), src_code, kScratchReg);
+        __ VmovExtended(src_code, dst_code, kScratchReg);
+        __ VmovExtended(dst_code, temp.low().code(), kScratchReg);
+      } else {
+        DCHECK(destination->IsFPStackSlot());
+        MemOperand dst = g.ToMemOperand(destination);
+        __ VmovExtended(temp.low().code(), src_code, kScratchReg);
+        __ VmovExtended(src_code, dst, kScratchReg);
+        __ vstr(temp.low(), dst);
+      }
     }
   } else if (source->IsFPStackSlot()) {
     DCHECK(destination->IsFPStackSlot());
@@ -1945,21 +2005,29 @@
     LowDwVfpRegister temp_1 = kScratchDoubleReg;
     MemOperand src0 = g.ToMemOperand(source);
     MemOperand dst0 = g.ToMemOperand(destination);
-    MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
-    MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
-    __ vldr(temp_1, dst0);  // Save destination in temp_1.
-    __ ldr(temp_0, src0);   // Then use temp_0 to copy source to destination.
-    __ str(temp_0, dst0);
-    __ ldr(temp_0, src1);
-    __ str(temp_0, dst1);
-    __ vstr(temp_1, src0);
+    MachineRepresentation rep = LocationOperand::cast(source)->representation();
+    if (rep == MachineRepresentation::kFloat64) {
+      MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
+      MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
+      __ vldr(temp_1, dst0);  // Save destination in temp_1.
+      __ ldr(temp_0, src0);   // Then use temp_0 to copy source to destination.
+      __ str(temp_0, dst0);
+      __ ldr(temp_0, src1);
+      __ str(temp_0, dst1);
+      __ vstr(temp_1, src0);
+    } else {
+      DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+      __ vldr(temp_1.low(), dst0);  // Save destination in temp_1.
+      __ ldr(temp_0, src0);  // Then use temp_0 to copy source to destination.
+      __ str(temp_0, dst0);
+      __ vstr(temp_1.low(), src0);
+    }
   } else {
     // No other combinations are possible.
     UNREACHABLE();
   }
 }
 
-
 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
   // On 32-bit ARM we emit the jump tables inline.
   UNREACHABLE();
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index ceb5b25..5279d1e 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -957,53 +957,83 @@
 void InstructionSelector::VisitInt32PairAdd(Node* node) {
   ArmOperandGenerator g(this);
 
-  // We use UseUniqueRegister here to avoid register sharing with the output
-  // registers.
-  InstructionOperand inputs[] = {
-      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
-      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
+  if (projection1) {
+    // We use UseUniqueRegister here to avoid register sharing with the output
+    // registers.
+    InstructionOperand inputs[] = {
+        g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+        g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
 
-  InstructionOperand outputs[] = {
-      g.DefineAsRegister(node),
-      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+    InstructionOperand outputs[] = {
+        g.DefineAsRegister(node),
+        g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
 
-  Emit(kArmAddPair, 2, outputs, 4, inputs);
+    Emit(kArmAddPair, 2, outputs, 4, inputs);
+  } else {
+    // The high word of the result is not used, so we emit the standard 32 bit
+    // instruction.
+    Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R),
+         g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+         g.UseRegister(node->InputAt(2)));
+  }
 }
 
 void InstructionSelector::VisitInt32PairSub(Node* node) {
   ArmOperandGenerator g(this);
 
-  // We use UseUniqueRegister here to avoid register sharing with the output
-  // register.
-  InstructionOperand inputs[] = {
-      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
-      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
+  if (projection1) {
+    // We use UseUniqueRegister here to avoid register sharing with the output
+    // register.
+    InstructionOperand inputs[] = {
+        g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+        g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
 
-  InstructionOperand outputs[] = {
-      g.DefineAsRegister(node),
-      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+    InstructionOperand outputs[] = {
+        g.DefineAsRegister(node),
+        g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
 
-  Emit(kArmSubPair, 2, outputs, 4, inputs);
+    Emit(kArmSubPair, 2, outputs, 4, inputs);
+  } else {
+    // The high word of the result is not used, so we emit the standard 32 bit
+    // instruction.
+    Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_R),
+         g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+         g.UseRegister(node->InputAt(2)));
+  }
 }
 
 void InstructionSelector::VisitInt32PairMul(Node* node) {
   ArmOperandGenerator g(this);
-  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
-                                 g.UseUniqueRegister(node->InputAt(1)),
-                                 g.UseUniqueRegister(node->InputAt(2)),
-                                 g.UseUniqueRegister(node->InputAt(3))};
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
+  if (projection1) {
+    InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+                                   g.UseUniqueRegister(node->InputAt(1)),
+                                   g.UseUniqueRegister(node->InputAt(2)),
+                                   g.UseUniqueRegister(node->InputAt(3))};
 
-  InstructionOperand outputs[] = {
-      g.DefineAsRegister(node),
-      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+    InstructionOperand outputs[] = {
+        g.DefineAsRegister(node),
+        g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
 
-  Emit(kArmMulPair, 2, outputs, 4, inputs);
+    Emit(kArmMulPair, 2, outputs, 4, inputs);
+  } else {
+    // The high word of the result is not used, so we emit the standard 32 bit
+    // instruction.
+    Emit(kArmMul | AddressingModeField::encode(kMode_Operand2_R),
+         g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+         g.UseRegister(node->InputAt(2)));
+  }
 }
 
-void InstructionSelector::VisitWord32PairShl(Node* node) {
-  ArmOperandGenerator g(this);
-  // We use g.UseUniqueRegister here for InputAt(0) to guarantee that there is
-  // no register aliasing with output registers.
+namespace {
+// Shared routine for multiple shift operations.
+void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
+                          Node* node) {
+  ArmOperandGenerator g(selector);
+  // We use g.UseUniqueRegister here to guarantee that there is
+  // no register aliasing of input registers with output registers.
   Int32Matcher m(node->InputAt(2));
   InstructionOperand shift_operand;
   if (m.HasValue()) {
@@ -1013,60 +1043,36 @@
   }
 
   InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
-                                 g.UseRegister(node->InputAt(1)),
+                                 g.UseUniqueRegister(node->InputAt(1)),
                                  shift_operand};
 
-  InstructionOperand outputs[] = {
-      g.DefineAsRegister(node),
-      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
 
-  Emit(kArmLslPair, 2, outputs, 3, inputs);
+  InstructionOperand outputs[2];
+  InstructionOperand temps[1];
+  int32_t output_count = 0;
+  int32_t temp_count = 0;
+
+  outputs[output_count++] = g.DefineAsRegister(node);
+  if (projection1) {
+    outputs[output_count++] = g.DefineAsRegister(projection1);
+  } else {
+    temps[temp_count++] = g.TempRegister();
+  }
+
+  selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
+}
+}  // namespace
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+  VisitWord32PairShift(this, kArmLslPair, node);
 }
 
 void InstructionSelector::VisitWord32PairShr(Node* node) {
-  ArmOperandGenerator g(this);
-  // We use g.UseUniqueRegister here for InputAt(1) and InputAt(2) to to
-  // guarantee that there is no register aliasing with output register.
-  Int32Matcher m(node->InputAt(2));
-  InstructionOperand shift_operand;
-  if (m.HasValue()) {
-    shift_operand = g.UseImmediate(m.node());
-  } else {
-    shift_operand = g.UseUniqueRegister(m.node());
-  }
-
-  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
-                                 g.UseUniqueRegister(node->InputAt(1)),
-                                 shift_operand};
-
-  InstructionOperand outputs[] = {
-      g.DefineAsRegister(node),
-      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
-
-  Emit(kArmLsrPair, 2, outputs, 3, inputs);
+  VisitWord32PairShift(this, kArmLsrPair, node);
 }
 
 void InstructionSelector::VisitWord32PairSar(Node* node) {
-  ArmOperandGenerator g(this);
-  // We use g.UseUniqueRegister here for InputAt(1) and InputAt(2) to to
-  // guarantee that there is no register aliasing with output register.
-  Int32Matcher m(node->InputAt(2));
-  InstructionOperand shift_operand;
-  if (m.HasValue()) {
-    shift_operand = g.UseImmediate(m.node());
-  } else {
-    shift_operand = g.UseUniqueRegister(m.node());
-  }
-
-  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
-                                 g.UseUniqueRegister(node->InputAt(1)),
-                                 shift_operand};
-
-  InstructionOperand outputs[] = {
-      g.DefineAsRegister(node),
-      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
-
-  Emit(kArmAsrPair, 2, outputs, 3, inputs);
+  VisitWord32PairShift(this, kArmAsrPair, node);
 }
 
 void InstructionSelector::VisitWord32Ror(Node* node) {
@@ -1870,21 +1876,22 @@
 // Shared routine for word comparisons against zero.
 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
                           Node* value, FlagsContinuation* cont) {
-  while (selector->CanCover(user, value)) {
+  // Try to combine with comparisons against 0 by simply inverting the branch.
+  while (value->opcode() == IrOpcode::kWord32Equal &&
+         selector->CanCover(user, value)) {
+    Int32BinopMatcher m(value);
+    if (!m.right().Is(0)) break;
+
+    user = value;
+    value = m.left().node();
+    cont->Negate();
+  }
+
+  if (selector->CanCover(user, value)) {
     switch (value->opcode()) {
-      case IrOpcode::kWord32Equal: {
-        // Combine with comparisons against 0 by simply inverting the
-        // continuation.
-        Int32BinopMatcher m(value);
-        if (m.right().Is(0)) {
-          user = value;
-          value = m.left().node();
-          cont->Negate();
-          continue;
-        }
+      case IrOpcode::kWord32Equal:
         cont->OverwriteAndNegateIfEqual(kEqual);
         return VisitWordCompare(selector, value, cont);
-      }
       case IrOpcode::kInt32LessThan:
         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
         return VisitWordCompare(selector, value, cont);
@@ -1968,7 +1975,6 @@
       default:
         break;
     }
-    break;
   }
 
   if (user->opcode() == IrOpcode::kWord32Equal) {
diff --git a/src/compiler/arm/unwinding-info-writer-arm.cc b/src/compiler/arm/unwinding-info-writer-arm.cc
index a950612..579e5c7 100644
--- a/src/compiler/arm/unwinding-info-writer-arm.cc
+++ b/src/compiler/arm/unwinding-info-writer-arm.cc
@@ -15,7 +15,8 @@
 
   block_will_exit_ = false;
 
-  DCHECK_LT(block->rpo_number().ToInt(), block_initial_states_.size());
+  DCHECK_LT(block->rpo_number().ToInt(),
+            static_cast<int>(block_initial_states_.size()));
   const BlockInitialState* initial_state =
       block_initial_states_[block->rpo_number().ToInt()];
   if (initial_state) {
@@ -42,7 +43,7 @@
 
   for (const RpoNumber& successor : block->successors()) {
     int successor_index = successor.ToInt();
-    DCHECK_LT(successor_index, block_initial_states_.size());
+    DCHECK_LT(successor_index, static_cast<int>(block_initial_states_.size()));
     const BlockInitialState* existing_state =
         block_initial_states_[successor_index];
 
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index f543b18..8b1cb57 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -709,8 +709,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchTailCallJSFunctionFromJSFunction:
-    case kArchTailCallJSFunction: {
+    case kArchTailCallJSFunctionFromJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
         // Check the function's context matches the context argument.
@@ -720,11 +719,9 @@
         __ cmp(cp, temp);
         __ Assert(eq, kWrongFunctionContext);
       }
-      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
-        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
-                                         i.TempRegister(0), i.TempRegister(1),
-                                         i.TempRegister(2));
-      }
+      AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                       i.TempRegister(0), i.TempRegister(1),
+                                       i.TempRegister(2));
       __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(x10);
       frame_access_state()->ClearSPDelta();
@@ -786,7 +783,7 @@
       break;
     }
     case kArchRet:
-      AssembleReturn();
+      AssembleReturn(instr->InputAt(0));
       break;
     case kArchStackPointer:
       __ mov(i.OutputRegister(), masm()->StackPointer());
@@ -1759,7 +1756,7 @@
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   DeoptimizeReason deoptimization_reason =
       GetDeoptimizationReason(deoptimization_id);
-  __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -1798,43 +1795,57 @@
     __ AssertCspAligned();
   }
 
+  int fixed_frame_size = descriptor->CalculateFixedFrameSize();
+  int shrink_slots =
+      frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+
   if (frame_access_state()->has_frame()) {
+    // Link the frame
     if (descriptor->IsJSFunctionCall()) {
       DCHECK(!descriptor->UseNativeStack());
       __ Prologue(this->info()->GeneratePreagedPrologue());
     } else {
-      if (descriptor->IsCFunctionCall()) {
-        __ Push(lr, fp);
-        __ Mov(fp, masm_.StackPointer());
-        __ Claim(frame()->GetSpillSlotCount());
-      } else {
-        __ StubPrologue(info()->GetOutputStackFrameType(),
-                        frame()->GetTotalFrameSlotCount());
-      }
+      __ Push(lr, fp);
+      __ Mov(fp, masm_.StackPointer());
     }
-
     if (!info()->GeneratePreagedPrologue()) {
       unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
     }
-  }
 
-  int shrink_slots = frame()->GetSpillSlotCount();
+    // Create OSR entry if applicable
+    if (info()->is_osr()) {
+      // TurboFan OSR-compiled functions cannot be entered directly.
+      __ Abort(kShouldNotDirectlyEnterOsrFunction);
 
-  if (info()->is_osr()) {
-    // TurboFan OSR-compiled functions cannot be entered directly.
-    __ Abort(kShouldNotDirectlyEnterOsrFunction);
+      // Unoptimized code jumps directly to this entrypoint while the
+      // unoptimized
+      // frame is still on the stack. Optimized code uses OSR values directly
+      // from
+      // the unoptimized frame. Thus, all that needs to be done is to allocate
+      // the
+      // remaining stack slots.
+      if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+      osr_pc_offset_ = __ pc_offset();
+      shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+    }
 
-    // Unoptimized code jumps directly to this entrypoint while the unoptimized
-    // frame is still on the stack. Optimized code uses OSR values directly from
-    // the unoptimized frame. Thus, all that needs to be done is to allocate the
-    // remaining stack slots.
-    if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
-    osr_pc_offset_ = __ pc_offset();
-    shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
-  }
-
-  if (descriptor->IsJSFunctionCall()) {
-    __ Claim(shrink_slots);
+    // Build remainder of frame, including accounting for and filling-in
+    // frame-specific header information, e.g. claiming the extra slot that
+    // other platforms explicitly push for STUB frames and frames recording
+    // their argument count.
+    __ Claim(shrink_slots + (fixed_frame_size & 1));
+    if (descriptor->PushArgumentCount()) {
+      __ Str(kJavaScriptCallArgCountRegister,
+             MemOperand(fp, OptimizedBuiltinFrameConstants::kArgCOffset));
+    }
+    bool is_stub_frame =
+        !descriptor->IsJSFunctionCall() && !descriptor->IsCFunctionCall();
+    if (is_stub_frame) {
+      UseScratchRegisterScope temps(masm());
+      Register temp = temps.AcquireX();
+      __ Mov(temp, Smi::FromInt(info()->GetOutputStackFrameType()));
+      __ Str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
+    }
   }
 
   // Save FP registers.
@@ -1857,8 +1868,7 @@
   }
 }
 
-
-void CodeGenerator::AssembleReturn() {
+void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
 
   // Restore registers.
@@ -1877,16 +1887,25 @@
 
   unwinding_info_writer_.MarkBlockWillExit();
 
+  Arm64OperandConverter g(this, nullptr);
   int pop_count = static_cast<int>(descriptor->StackParameterCount());
   if (descriptor->IsCFunctionCall()) {
     AssembleDeconstructFrame();
   } else if (frame_access_state()->has_frame()) {
-    // Canonicalize JSFunction return sites for now.
-    if (return_label_.is_bound()) {
-      __ B(&return_label_);
-      return;
+    // Canonicalize JSFunction return sites for now unless they have an variable
+    // number of stack slot pops.
+    if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+      if (return_label_.is_bound()) {
+        __ B(&return_label_);
+        return;
+      } else {
+        __ Bind(&return_label_);
+        AssembleDeconstructFrame();
+        if (descriptor->UseNativeStack()) {
+          pop_count += (pop_count & 1);  // align
+        }
+      }
     } else {
-      __ Bind(&return_label_);
       AssembleDeconstructFrame();
       if (descriptor->UseNativeStack()) {
         pop_count += (pop_count & 1);  // align
@@ -1895,7 +1914,16 @@
   } else if (descriptor->UseNativeStack()) {
     pop_count += (pop_count & 1);  // align
   }
-  __ Drop(pop_count);
+
+  if (pop->IsImmediate()) {
+    DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+    pop_count += g.ToConstant(pop).ToInt32();
+    __ Drop(pop_count);
+  } else {
+    Register pop_reg = g.ToRegister(pop);
+    __ Add(pop_reg, pop_reg, pop_count);
+    __ Drop(pop_reg);
+  }
 
   if (descriptor->UseNativeStack()) {
     __ AssertCspAligned();
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index da27be8..0eef53c 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -2152,6 +2152,20 @@
   }
 }
 
+void EmitBranchOrDeoptimize(InstructionSelector* selector,
+                            InstructionCode opcode, InstructionOperand value,
+                            FlagsContinuation* cont) {
+  Arm64OperandGenerator g(selector);
+  if (cont->IsBranch()) {
+    selector->Emit(cont->Encode(opcode), g.NoOutput(), value,
+                   g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else {
+    DCHECK(cont->IsDeoptimize());
+    selector->EmitDeoptimize(cont->Encode(opcode), g.NoOutput(), value,
+                             cont->reason(), cont->frame_state());
+  }
+}
+
 // Try to emit TBZ, TBNZ, CBZ or CBNZ for certain comparisons of {node}
 // against zero, depending on the condition.
 bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, Node* user,
@@ -2160,12 +2174,16 @@
   USE(m_user);
   DCHECK(m_user.right().Is(0) || m_user.left().Is(0));
 
-  // Only handle branches.
-  if (!cont->IsBranch()) return false;
+  // Only handle branches and deoptimisations.
+  if (!cont->IsBranch() && !cont->IsDeoptimize()) return false;
 
   switch (cond) {
     case kSignedLessThan:
     case kSignedGreaterThanOrEqual: {
+      // We don't generate TBZ/TBNZ for deoptimisations, as they have a
+      // shorter range than conditional branches and generating them for
+      // deoptimisations results in more veneers.
+      if (cont->IsDeoptimize()) return false;
       Arm64OperandGenerator g(selector);
       cont->Overwrite(MapForTbz(cond));
       Int32Matcher m(node);
@@ -2192,9 +2210,8 @@
     case kUnsignedGreaterThan: {
       Arm64OperandGenerator g(selector);
       cont->Overwrite(MapForCbz(cond));
-      selector->Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
-                     g.UseRegister(node), g.Label(cont->true_block()),
-                     g.Label(cont->false_block()));
+      EmitBranchOrDeoptimize(selector, kArm64CompareAndBranch32,
+                             g.UseRegister(node), cont);
       return true;
     }
     default:
@@ -2336,21 +2353,22 @@
 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
                           Node* value, FlagsContinuation* cont) {
   Arm64OperandGenerator g(selector);
-  while (selector->CanCover(user, value)) {
+  // Try to combine with comparisons against 0 by simply inverting the branch.
+  while (value->opcode() == IrOpcode::kWord32Equal &&
+         selector->CanCover(user, value)) {
+    Int32BinopMatcher m(value);
+    if (!m.right().Is(0)) break;
+
+    user = value;
+    value = m.left().node();
+    cont->Negate();
+  }
+
+  if (selector->CanCover(user, value)) {
     switch (value->opcode()) {
-      case IrOpcode::kWord32Equal: {
-        // Combine with comparisons against 0 by simply inverting the
-        // continuation.
-        Int32BinopMatcher m(value);
-        if (m.right().Is(0)) {
-          user = value;
-          value = m.left().node();
-          cont->Negate();
-          continue;
-        }
+      case IrOpcode::kWord32Equal:
         cont->OverwriteAndNegateIfEqual(kEqual);
         return VisitWord32Compare(selector, value, cont);
-      }
       case IrOpcode::kInt32LessThan:
         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
         return VisitWord32Compare(selector, value, cont);
@@ -2380,10 +2398,10 @@
                                     kLogical64Imm);
           }
           // Merge the Word64Equal(x, 0) comparison into a cbz instruction.
-          if (cont->IsBranch()) {
-            selector->Emit(cont->Encode(kArm64CompareAndBranch), g.NoOutput(),
-                           g.UseRegister(left), g.Label(cont->true_block()),
-                           g.Label(cont->false_block()));
+          if (cont->IsBranch() || cont->IsDeoptimize()) {
+            EmitBranchOrDeoptimize(selector,
+                                   cont->Encode(kArm64CompareAndBranch),
+                                   g.UseRegister(left), cont);
             return;
           }
         }
@@ -2488,7 +2506,6 @@
       default:
         break;
     }
-    break;
   }
 
   // Branch could not be combined with a compare, compare against 0 and branch.
diff --git a/src/compiler/arm64/unwinding-info-writer-arm64.cc b/src/compiler/arm64/unwinding-info-writer-arm64.cc
index f4b732b..3095423 100644
--- a/src/compiler/arm64/unwinding-info-writer-arm64.cc
+++ b/src/compiler/arm64/unwinding-info-writer-arm64.cc
@@ -15,7 +15,8 @@
 
   block_will_exit_ = false;
 
-  DCHECK_LT(block->rpo_number().ToInt(), block_initial_states_.size());
+  DCHECK_LT(block->rpo_number().ToInt(),
+            static_cast<int>(block_initial_states_.size()));
   const BlockInitialState* initial_state =
       block_initial_states_[block->rpo_number().ToInt()];
   if (initial_state) {
@@ -42,7 +43,7 @@
 
   for (const RpoNumber& successor : block->successors()) {
     int successor_index = successor.ToInt();
-    DCHECK_LT(successor_index, block_initial_states_.size());
+    DCHECK_LT(successor_index, static_cast<int>(block_initial_states_.size()));
     const BlockInitialState* existing_state =
         block_initial_states_[successor_index];
 
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
index b292a2e..1b7d116 100644
--- a/src/compiler/ast-graph-builder.cc
+++ b/src/compiler/ast-graph-builder.cc
@@ -433,7 +433,7 @@
       type_hint_analysis_(type_hint_analysis),
       state_values_cache_(jsgraph),
       liveness_analyzer_(static_cast<size_t>(info->scope()->num_stack_slots()),
-                         local_zone),
+                         false, local_zone),
       frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
           FrameStateType::kJavaScriptFunction, info->num_parameters() + 1,
           info->scope()->num_stack_slots(), info->shared_info())) {
@@ -613,7 +613,7 @@
 
   NonLiveFrameStateSlotReplacer replacer(
       &state_values_cache_, jsgraph()->OptimizedOutConstant(),
-      liveness_analyzer()->local_count(), local_zone());
+      liveness_analyzer()->local_count(), false, local_zone());
   Variable* arguments = info()->scope()->arguments();
   if (arguments != nullptr && arguments->IsStackAllocated()) {
     replacer.MarkPermanentlyLive(arguments->index());
@@ -788,8 +788,10 @@
 }
 
 AstGraphBuilder::Environment* AstGraphBuilder::Environment::CopyForOsrEntry() {
-  return new (zone())
-      Environment(this, builder_->liveness_analyzer()->NewBlock());
+  LivenessAnalyzerBlock* copy_block =
+      liveness_block() == nullptr ? nullptr
+                                  : builder_->liveness_analyzer()->NewBlock();
+  return new (zone()) Environment(this, copy_block);
 }
 
 AstGraphBuilder::Environment*
@@ -839,13 +841,6 @@
 }
 
 
-void AstGraphBuilder::Environment::UpdateStateValuesWithCache(
-    Node** state_values, int offset, int count) {
-  Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
-  *state_values = builder_->state_values_cache_.GetNodeForValues(
-      env_values, static_cast<size_t>(count));
-}
-
 Node* AstGraphBuilder::Environment::Checkpoint(BailoutId ast_id,
                                                OutputFrameStateCombine combine,
                                                bool owner_has_exception) {
@@ -854,7 +849,7 @@
   }
 
   UpdateStateValues(&parameters_node_, 0, parameters_count());
-  UpdateStateValuesWithCache(&locals_node_, parameters_count(), locals_count());
+  UpdateStateValues(&locals_node_, parameters_count(), locals_count());
   UpdateStateValues(&stack_node_, parameters_count() + locals_count(),
                     stack_height());
 
@@ -2283,21 +2278,9 @@
   ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
   Node* receiver_value = nullptr;
   Node* callee_value = nullptr;
-  bool possibly_eval = false;
-  switch (call_type) {
-    case Call::GLOBAL_CALL: {
-      VariableProxy* proxy = callee->AsVariableProxy();
-      VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
-      PrepareEagerCheckpoint(BeforeId(proxy));
-      callee_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
-                                       pair, OutputFrameStateCombine::Push());
-      receiver_hint = ConvertReceiverMode::kNullOrUndefined;
-      receiver_value = jsgraph()->UndefinedConstant();
-      break;
-    }
-    case Call::LOOKUP_SLOT_CALL: {
+  if (expr->is_possibly_eval()) {
+    if (callee->AsVariableProxy()->var()->IsLookupSlot()) {
       Variable* variable = callee->AsVariableProxy()->var();
-      DCHECK(variable->location() == VariableLocation::LOOKUP);
       Node* name = jsgraph()->Constant(variable->name());
       const Operator* op =
           javascript()->CallRuntime(Runtime::kLoadLookupSlotForCall);
@@ -2306,89 +2289,26 @@
       receiver_value = NewNode(common()->Projection(1), pair);
       PrepareFrameState(pair, expr->LookupId(),
                         OutputFrameStateCombine::Push(2));
-      break;
+    } else {
+      VisitForValue(callee);
+      callee_value = environment()->Pop();
+      receiver_hint = ConvertReceiverMode::kNullOrUndefined;
+      receiver_value = jsgraph()->UndefinedConstant();
     }
-    case Call::NAMED_PROPERTY_CALL: {
-      Property* property = callee->AsProperty();
-      VectorSlotPair feedback =
-          CreateVectorSlotPair(property->PropertyFeedbackSlot());
-      VisitForValue(property->obj());
-      Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-      Node* object = environment()->Top();
-      callee_value = BuildNamedLoad(object, name, feedback);
-      PrepareFrameState(callee_value, property->LoadId(),
-                        OutputFrameStateCombine::Push());
-      // Note that a property call requires the receiver to be wrapped into
-      // an object for sloppy callees. However the receiver is guaranteed
-      // not to be null or undefined at this point.
-      receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
-      receiver_value = environment()->Pop();
-      break;
-    }
-    case Call::KEYED_PROPERTY_CALL: {
-      Property* property = callee->AsProperty();
-      VectorSlotPair feedback =
-          CreateVectorSlotPair(property->PropertyFeedbackSlot());
-      VisitForValue(property->obj());
-      VisitForValue(property->key());
-      Node* key = environment()->Pop();
-      Node* object = environment()->Top();
-      callee_value = BuildKeyedLoad(object, key, feedback);
-      PrepareFrameState(callee_value, property->LoadId(),
-                        OutputFrameStateCombine::Push());
-      // Note that a property call requires the receiver to be wrapped into
-      // an object for sloppy callees. However the receiver is guaranteed
-      // not to be null or undefined at this point.
-      receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
-      receiver_value = environment()->Pop();
-      break;
-    }
-    case Call::NAMED_SUPER_PROPERTY_CALL: {
-      Property* property = callee->AsProperty();
-      SuperPropertyReference* super_ref =
-          property->obj()->AsSuperPropertyReference();
-      VisitForValue(super_ref->home_object());
-      VisitForValue(super_ref->this_var());
-      Node* home = environment()->Peek(1);
-      Node* object = environment()->Top();
-      Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-      callee_value = BuildNamedSuperLoad(object, home, name, VectorSlotPair());
-      PrepareFrameState(callee_value, property->LoadId(),
-                        OutputFrameStateCombine::Push());
-      // Note that a property call requires the receiver to be wrapped into
-      // an object for sloppy callees. Since the receiver is not the target of
-      // the load, it could very well be null or undefined at this point.
-      receiver_value = environment()->Pop();
-      environment()->Drop(1);
-      break;
-    }
-    case Call::KEYED_SUPER_PROPERTY_CALL: {
-      Property* property = callee->AsProperty();
-      SuperPropertyReference* super_ref =
-          property->obj()->AsSuperPropertyReference();
-      VisitForValue(super_ref->home_object());
-      VisitForValue(super_ref->this_var());
-      environment()->Push(environment()->Top());    // Duplicate this_var.
-      environment()->Push(environment()->Peek(2));  // Duplicate home_obj.
-      VisitForValue(property->key());
-      Node* key = environment()->Pop();
-      Node* home = environment()->Pop();
-      Node* object = environment()->Pop();
-      callee_value = BuildKeyedSuperLoad(object, home, key, VectorSlotPair());
-      PrepareFrameState(callee_value, property->LoadId(),
-                        OutputFrameStateCombine::Push());
-      // Note that a property call requires the receiver to be wrapped into
-      // an object for sloppy callees. Since the receiver is not the target of
-      // the load, it could very well be null or undefined at this point.
-      receiver_value = environment()->Pop();
-      environment()->Drop(1);
-      break;
-    }
-    case Call::SUPER_CALL:
-      return VisitCallSuper(expr);
-    case Call::POSSIBLY_EVAL_CALL:
-      possibly_eval = true;
-      if (callee->AsVariableProxy()->var()->IsLookupSlot()) {
+  } else {
+    switch (call_type) {
+      case Call::GLOBAL_CALL: {
+        VariableProxy* proxy = callee->AsVariableProxy();
+        VectorSlotPair pair =
+            CreateVectorSlotPair(proxy->VariableFeedbackSlot());
+        PrepareEagerCheckpoint(BeforeId(proxy));
+        callee_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
+                                         pair, OutputFrameStateCombine::Push());
+        receiver_hint = ConvertReceiverMode::kNullOrUndefined;
+        receiver_value = jsgraph()->UndefinedConstant();
+        break;
+      }
+      case Call::WITH_CALL: {
         Variable* variable = callee->AsVariableProxy()->var();
         Node* name = jsgraph()->Constant(variable->name());
         const Operator* op =
@@ -2400,13 +2320,92 @@
                           OutputFrameStateCombine::Push(2));
         break;
       }
-    // Fall through.
-    case Call::OTHER_CALL:
-      VisitForValue(callee);
-      callee_value = environment()->Pop();
-      receiver_hint = ConvertReceiverMode::kNullOrUndefined;
-      receiver_value = jsgraph()->UndefinedConstant();
-      break;
+      case Call::NAMED_PROPERTY_CALL: {
+        Property* property = callee->AsProperty();
+        VectorSlotPair feedback =
+            CreateVectorSlotPair(property->PropertyFeedbackSlot());
+        VisitForValue(property->obj());
+        Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+        Node* object = environment()->Top();
+        callee_value = BuildNamedLoad(object, name, feedback);
+        PrepareFrameState(callee_value, property->LoadId(),
+                          OutputFrameStateCombine::Push());
+        // Note that a property call requires the receiver to be wrapped into
+        // an object for sloppy callees. However the receiver is guaranteed
+        // not to be null or undefined at this point.
+        receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
+        receiver_value = environment()->Pop();
+        break;
+      }
+      case Call::KEYED_PROPERTY_CALL: {
+        Property* property = callee->AsProperty();
+        VectorSlotPair feedback =
+            CreateVectorSlotPair(property->PropertyFeedbackSlot());
+        VisitForValue(property->obj());
+        VisitForValue(property->key());
+        Node* key = environment()->Pop();
+        Node* object = environment()->Top();
+        callee_value = BuildKeyedLoad(object, key, feedback);
+        PrepareFrameState(callee_value, property->LoadId(),
+                          OutputFrameStateCombine::Push());
+        // Note that a property call requires the receiver to be wrapped into
+        // an object for sloppy callees. However the receiver is guaranteed
+        // not to be null or undefined at this point.
+        receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
+        receiver_value = environment()->Pop();
+        break;
+      }
+      case Call::NAMED_SUPER_PROPERTY_CALL: {
+        Property* property = callee->AsProperty();
+        SuperPropertyReference* super_ref =
+            property->obj()->AsSuperPropertyReference();
+        VisitForValue(super_ref->home_object());
+        VisitForValue(super_ref->this_var());
+        Node* home = environment()->Peek(1);
+        Node* object = environment()->Top();
+        Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+        callee_value =
+            BuildNamedSuperLoad(object, home, name, VectorSlotPair());
+        PrepareFrameState(callee_value, property->LoadId(),
+                          OutputFrameStateCombine::Push());
+        // Note that a property call requires the receiver to be wrapped into
+        // an object for sloppy callees. Since the receiver is not the target of
+        // the load, it could very well be null or undefined at this point.
+        receiver_value = environment()->Pop();
+        environment()->Drop(1);
+        break;
+      }
+      case Call::KEYED_SUPER_PROPERTY_CALL: {
+        Property* property = callee->AsProperty();
+        SuperPropertyReference* super_ref =
+            property->obj()->AsSuperPropertyReference();
+        VisitForValue(super_ref->home_object());
+        VisitForValue(super_ref->this_var());
+        environment()->Push(environment()->Top());    // Duplicate this_var.
+        environment()->Push(environment()->Peek(2));  // Duplicate home_obj.
+        VisitForValue(property->key());
+        Node* key = environment()->Pop();
+        Node* home = environment()->Pop();
+        Node* object = environment()->Pop();
+        callee_value = BuildKeyedSuperLoad(object, home, key, VectorSlotPair());
+        PrepareFrameState(callee_value, property->LoadId(),
+                          OutputFrameStateCombine::Push());
+        // Note that a property call requires the receiver to be wrapped into
+        // an object for sloppy callees. Since the receiver is not the target of
+        // the load, it could very well be null or undefined at this point.
+        receiver_value = environment()->Pop();
+        environment()->Drop(1);
+        break;
+      }
+      case Call::SUPER_CALL:
+        return VisitCallSuper(expr);
+      case Call::OTHER_CALL:
+        VisitForValue(callee);
+        callee_value = environment()->Pop();
+        receiver_hint = ConvertReceiverMode::kNullOrUndefined;
+        receiver_value = jsgraph()->UndefinedConstant();
+        break;
+    }
   }
 
   // The callee and the receiver both have to be pushed onto the operand stack
@@ -2420,7 +2419,7 @@
 
   // Resolve callee for a potential direct eval call. This block will mutate the
   // callee value pushed onto the environment.
-  if (possibly_eval && args->length() > 0) {
+  if (expr->is_possibly_eval() && args->length() > 0) {
     int arg_count = args->length();
 
     // Extract callee and source string from the environment.
@@ -2451,7 +2450,8 @@
   const Operator* call =
       javascript()->CallFunction(args->length() + 2, frequency, feedback,
                                  receiver_hint, expr->tail_call_mode());
-  PrepareEagerCheckpoint(possibly_eval ? expr->EvalId() : expr->CallId());
+  PrepareEagerCheckpoint(expr->is_possibly_eval() ? expr->EvalId()
+                                                  : expr->CallId());
   Node* value = ProcessArguments(call, args->length() + 2);
   // The callee passed to the call, we just need to push something here to
   // satisfy the bailout location contract. The fullcodegen code will not
@@ -2893,8 +2893,7 @@
   UNREACHABLE();
 }
 
-
-void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
+void AstGraphBuilder::VisitDeclarations(Declaration::List* declarations) {
   DCHECK(globals()->empty());
   AstVisitor<AstGraphBuilder>::VisitDeclarations(declarations);
   if (globals()->empty()) return;
@@ -3357,7 +3356,11 @@
     case VariableLocation::CONTEXT: {
       // Context variable (potentially up the context chain).
       int depth = current_scope()->ContextChainLength(variable->scope());
-      bool immutable = variable->maybe_assigned() == kNotAssigned;
+      // TODO(mstarzinger): The {maybe_assigned} flag computed during variable
+      // resolution is highly inaccurate and cannot be trusted. We are only
+      // taking this information into account when asm.js compilation is used.
+      bool immutable = variable->maybe_assigned() == kNotAssigned &&
+                       info()->is_function_context_specializing();
       const Operator* op =
           javascript()->LoadContext(depth, variable->index(), immutable);
       Node* value = NewNode(op, current_context());
@@ -3775,7 +3778,8 @@
     return_value =
         NewNode(javascript()->CallRuntime(Runtime::kTraceExit), return_value);
   }
-  Node* control = NewNode(common()->Return(), return_value);
+  Node* pop_node = jsgraph()->ZeroConstant();
+  Node* control = NewNode(common()->Return(), pop_node, return_value);
   UpdateControlDependencyToLeaveFunction(control);
   return control;
 }
@@ -4202,27 +4206,49 @@
                                         graph->start(), graph->start());
   UpdateControlDependency(osr_loop_entry);
   UpdateEffectDependency(osr_loop_entry);
+
   // Set OSR values.
   for (int i = 0; i < size; ++i) {
     values()->at(i) =
         graph->NewNode(builder_->common()->OsrValue(i), osr_loop_entry);
   }
 
-  // Set the contexts.
-  // The innermost context is the OSR value, and the outer contexts are
-  // reconstructed by dynamically walking up the context chain.
-  Node* osr_context = nullptr;
-  const Operator* op =
-      builder_->javascript()->LoadContext(0, Context::PREVIOUS_INDEX, true);
+  // Set the innermost context.
   const Operator* op_inner =
       builder_->common()->OsrValue(Linkage::kOsrContextSpillSlotIndex);
+  contexts()->back() = graph->NewNode(op_inner, osr_loop_entry);
+
+  // Create a checkpoint.
+  Node* frame_state = Checkpoint(builder_->info()->osr_ast_id());
+  Node* checkpoint = graph->NewNode(common()->Checkpoint(), frame_state,
+                                    osr_loop_entry, osr_loop_entry);
+  UpdateEffectDependency(checkpoint);
+
+  // Create the OSR guard nodes.
+  const Operator* guard_op =
+      builder_->info()->is_deoptimization_enabled()
+          ? builder_->common()->OsrGuard(OsrGuardType::kUninitialized)
+          : builder_->common()->OsrGuard(OsrGuardType::kAny);
+  Node* effect = checkpoint;
+  for (int i = 0; i < size; ++i) {
+    values()->at(i) = effect =
+        graph->NewNode(guard_op, values()->at(i), effect, osr_loop_entry);
+  }
+  contexts()->back() = effect =
+      graph->NewNode(guard_op, contexts()->back(), effect, osr_loop_entry);
+
+  // The innermost context is the OSR value, and the outer contexts are
+  // reconstructed by dynamically walking up the context chain.
+  const Operator* load_op =
+      builder_->javascript()->LoadContext(0, Context::PREVIOUS_INDEX, true);
+  Node* osr_context = effect = contexts()->back();
   int last = static_cast<int>(contexts()->size() - 1);
-  for (int i = last; i >= 0; i--) {
-    osr_context = (i == last) ? graph->NewNode(op_inner, osr_loop_entry)
-                              : graph->NewNode(op, osr_context, osr_context,
-                                               osr_loop_entry);
+  for (int i = last - 1; i >= 0; i--) {
+    osr_context = effect =
+        graph->NewNode(load_op, osr_context, osr_context, effect);
     contexts()->at(i) = osr_context;
   }
+  UpdateEffectDependency(effect);
 }
 
 void AstGraphBuilder::Environment::PrepareForLoop(BitVector* assigned) {
@@ -4335,6 +4361,16 @@
   return value;
 }
 
+AstGraphBuilderWithPositions::AstGraphBuilderWithPositions(
+    Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
+    float invocation_frequency, LoopAssignmentAnalysis* loop_assignment,
+    TypeHintAnalysis* type_hint_analysis, SourcePositionTable* source_positions,
+    int inlining_id)
+    : AstGraphBuilder(local_zone, info, jsgraph, invocation_frequency,
+                      loop_assignment, type_hint_analysis),
+      source_positions_(source_positions),
+      start_position_(info->shared_info()->start_position(), inlining_id) {}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
index 27f2c9b..2013f50 100644
--- a/src/compiler/ast-graph-builder.h
+++ b/src/compiler/ast-graph-builder.h
@@ -6,6 +6,7 @@
 #define V8_COMPILER_AST_GRAPH_BUILDER_H_
 
 #include "src/ast/ast.h"
+#include "src/compiler/compiler-source-position-table.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/liveness-analyzer.h"
 #include "src/compiler/state-values-utils.h"
@@ -61,7 +62,7 @@
 #undef DECLARE_VISIT
 
   // Visiting function for declarations list is overridden.
-  void VisitDeclarations(ZoneList<Declaration*>* declarations);
+  void VisitDeclarations(Declaration::List* declarations);
 
  private:
   class AstContext;
@@ -601,7 +602,6 @@
                        LivenessAnalyzerBlock* liveness_block);
   Environment* CopyAndShareLiveness();
   void UpdateStateValues(Node** state_values, int offset, int count);
-  void UpdateStateValuesWithCache(Node** state_values, int offset, int count);
   Zone* zone() const { return builder_->local_zone(); }
   Graph* graph() const { return builder_->graph(); }
   AstGraphBuilder* builder() const { return builder_; }
@@ -617,6 +617,35 @@
   void PrepareForOsrEntry();
 };
 
+class AstGraphBuilderWithPositions final : public AstGraphBuilder {
+ public:
+  AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
+                               JSGraph* jsgraph, float invocation_frequency,
+                               LoopAssignmentAnalysis* loop_assignment,
+                               TypeHintAnalysis* type_hint_analysis,
+                               SourcePositionTable* source_positions,
+                               int inlining_id = SourcePosition::kNotInlined);
+
+  bool CreateGraph(bool stack_check = true) {
+    SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
+    return AstGraphBuilder::CreateGraph(stack_check);
+  }
+
+#define DEF_VISIT(type)                                                  \
+  void Visit##type(type* node) override {                                \
+    SourcePositionTable::Scope pos(                                      \
+        source_positions_,                                               \
+        SourcePosition(node->position(), start_position_.InliningId())); \
+    AstGraphBuilder::Visit##type(node);                                  \
+  }
+  AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+ private:
+  SourcePositionTable* const source_positions_;
+  SourcePosition const start_position_;
+};
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/branch-elimination.h b/src/compiler/branch-elimination.h
index 7abeeca..3c2cdb2 100644
--- a/src/compiler/branch-elimination.h
+++ b/src/compiler/branch-elimination.h
@@ -5,7 +5,9 @@
 #ifndef V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
 #define V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -15,8 +17,8 @@
 class CommonOperatorBuilder;
 class JSGraph;
 
-
-class BranchElimination final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE BranchElimination final
+    : public NON_EXPORTED_BASE(AdvancedReducer) {
  public:
   BranchElimination(Editor* editor, JSGraph* js_graph, Zone* zone);
   ~BranchElimination() final;
diff --git a/src/compiler/bytecode-graph-builder.cc b/src/compiler/bytecode-graph-builder.cc
index d26ff93..34b50df 100644
--- a/src/compiler/bytecode-graph-builder.cc
+++ b/src/compiler/bytecode-graph-builder.cc
@@ -8,6 +8,7 @@
 #include "src/ast/scopes.h"
 #include "src/compilation-info.h"
 #include "src/compiler/bytecode-branch-analysis.h"
+#include "src/compiler/compiler-source-position-table.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/operator-properties.h"
 #include "src/interpreter/bytecodes.h"
@@ -25,6 +26,11 @@
   Environment(BytecodeGraphBuilder* builder, int register_count,
               int parameter_count, Node* control_dependency, Node* context);
 
+  // Specifies whether environment binding methods should attach frame state
+  // inputs to nodes representing the value being bound. This is done because
+  // the {OutputFrameStateCombine} is closely related to the binding method.
+  enum FrameStateAttachmentMode { kAttachFrameState, kDontAttachFrameState };
+
   int parameter_count() const { return parameter_count_; }
   int register_count() const { return register_count_; }
 
@@ -32,12 +38,15 @@
   Node* LookupRegister(interpreter::Register the_register) const;
   void MarkAllRegistersLive();
 
-  void BindAccumulator(Node* node, FrameStateBeforeAndAfter* states = nullptr);
+  void BindAccumulator(Node* node,
+                       FrameStateAttachmentMode mode = kDontAttachFrameState);
   void BindRegister(interpreter::Register the_register, Node* node,
-                    FrameStateBeforeAndAfter* states = nullptr);
-  void BindRegistersToProjections(interpreter::Register first_reg, Node* node,
-                                  FrameStateBeforeAndAfter* states = nullptr);
-  void RecordAfterState(Node* node, FrameStateBeforeAndAfter* states);
+                    FrameStateAttachmentMode mode = kDontAttachFrameState);
+  void BindRegistersToProjections(
+      interpreter::Register first_reg, Node* node,
+      FrameStateAttachmentMode mode = kDontAttachFrameState);
+  void RecordAfterState(Node* node,
+                        FrameStateAttachmentMode mode = kDontAttachFrameState);
 
   // Effect dependency tracked by this environment.
   Node* GetEffectDependency() { return effect_dependency_; }
@@ -50,10 +59,6 @@
   Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine,
                    bool owner_has_exception);
 
-  // Returns true if the state values are up to date with the current
-  // environment.
-  bool StateValuesAreUpToDate(int output_poke_offset, int output_poke_count);
-
   // Control dependency tracked by this environment.
   Node* GetControlDependency() const { return control_dependency_; }
   void UpdateControlDependency(Node* dependency) {
@@ -75,14 +80,8 @@
   Environment(const Environment* copy, LivenessAnalyzerBlock* liveness_block);
   void PrepareForLoop();
 
-  enum { kNotCached, kCached };
-
-  bool StateValuesAreUpToDate(Node** state_values, int offset, int count,
-                              int output_poke_start, int output_poke_end,
-                              int cached = kNotCached);
   bool StateValuesRequireUpdate(Node** state_values, int offset, int count);
   void UpdateStateValues(Node** state_values, int offset, int count);
-  void UpdateStateValuesWithCache(Node** state_values, int offset, int count);
 
   int RegisterToValuesIndex(interpreter::Register the_register) const;
 
@@ -113,69 +112,6 @@
   int accumulator_base_;
 };
 
-// Helper for generating frame states for before and after a bytecode.
-class BytecodeGraphBuilder::FrameStateBeforeAndAfter {
- public:
-  explicit FrameStateBeforeAndAfter(BytecodeGraphBuilder* builder)
-      : builder_(builder),
-        id_after_(BailoutId::None()),
-        added_to_node_(false),
-        frame_states_unused_(false),
-        output_poke_offset_(0),
-        output_poke_count_(0) {
-    BailoutId id_before(builder->bytecode_iterator().current_offset());
-    frame_state_before_ = builder_->environment()->Checkpoint(
-        id_before, OutputFrameStateCombine::Ignore(), false);
-    id_after_ = BailoutId(id_before.ToInt() +
-                          builder->bytecode_iterator().current_bytecode_size());
-    // Create an explicit checkpoint node for before the operation.
-    Node* node = builder_->NewNode(builder_->common()->Checkpoint());
-    DCHECK_EQ(IrOpcode::kDead,
-              NodeProperties::GetFrameStateInput(node)->opcode());
-    NodeProperties::ReplaceFrameStateInput(node, frame_state_before_);
-  }
-
-  ~FrameStateBeforeAndAfter() {
-    DCHECK(added_to_node_);
-    DCHECK(frame_states_unused_ ||
-           builder_->environment()->StateValuesAreUpToDate(output_poke_offset_,
-                                                           output_poke_count_));
-  }
-
- private:
-  friend class Environment;
-
-  void AddToNode(Node* node, OutputFrameStateCombine combine) {
-    DCHECK(!added_to_node_);
-    bool has_frame_state = OperatorProperties::HasFrameStateInput(node->op());
-    if (has_frame_state) {
-      // Add the frame state for after the operation.
-      DCHECK_EQ(IrOpcode::kDead,
-                NodeProperties::GetFrameStateInput(node)->opcode());
-      bool has_exception = NodeProperties::IsExceptionalCall(node);
-      Node* frame_state_after = builder_->environment()->Checkpoint(
-          id_after_, combine, has_exception);
-      NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
-    }
-
-    if (!combine.IsOutputIgnored()) {
-      output_poke_offset_ = static_cast<int>(combine.GetOffsetToPokeAt());
-      output_poke_count_ = node->op()->ValueOutputCount();
-    }
-    frame_states_unused_ = !has_frame_state;
-    added_to_node_ = true;
-  }
-
-  BytecodeGraphBuilder* builder_;
-  Node* frame_state_before_;
-  BailoutId id_after_;
-
-  bool added_to_node_;
-  bool frame_states_unused_;
-  int output_poke_offset_;
-  int output_poke_count_;
-};
-
 
 // Issues:
 // - Scopes - intimately tied to AST. Need to eval what is needed.
@@ -259,6 +195,10 @@
 }
 
 Node* BytecodeGraphBuilder::Environment::LookupAccumulator() const {
+  DCHECK(IsLivenessBlockConsistent());
+  if (liveness_block() != nullptr) {
+    liveness_block()->LookupAccumulator();
+  }
   return values()->at(accumulator_base_);
 }
 
@@ -291,21 +231,24 @@
 }
 
 void BytecodeGraphBuilder::Environment::BindAccumulator(
-    Node* node, FrameStateBeforeAndAfter* states) {
-  if (states) {
-    states->AddToNode(node, OutputFrameStateCombine::PokeAt(0));
+    Node* node, FrameStateAttachmentMode mode) {
+  if (mode == FrameStateAttachmentMode::kAttachFrameState) {
+    builder()->PrepareFrameState(node, OutputFrameStateCombine::PokeAt(0));
+  }
+  DCHECK(IsLivenessBlockConsistent());
+  if (liveness_block() != nullptr) {
+    liveness_block()->BindAccumulator();
   }
   values()->at(accumulator_base_) = node;
 }
 
-
 void BytecodeGraphBuilder::Environment::BindRegister(
     interpreter::Register the_register, Node* node,
-    FrameStateBeforeAndAfter* states) {
+    FrameStateAttachmentMode mode) {
   int values_index = RegisterToValuesIndex(the_register);
-  if (states) {
-    states->AddToNode(node, OutputFrameStateCombine::PokeAt(accumulator_base_ -
-                                                            values_index));
+  if (mode == FrameStateAttachmentMode::kAttachFrameState) {
+    builder()->PrepareFrameState(node, OutputFrameStateCombine::PokeAt(
+                                           accumulator_base_ - values_index));
   }
   values()->at(values_index) = node;
   if (liveness_block() != nullptr && !the_register.is_parameter()) {
@@ -314,14 +257,13 @@
   }
 }
 
-
 void BytecodeGraphBuilder::Environment::BindRegistersToProjections(
     interpreter::Register first_reg, Node* node,
-    FrameStateBeforeAndAfter* states) {
+    FrameStateAttachmentMode mode) {
   int values_index = RegisterToValuesIndex(first_reg);
-  if (states) {
-    states->AddToNode(node, OutputFrameStateCombine::PokeAt(accumulator_base_ -
-                                                            values_index));
+  if (mode == FrameStateAttachmentMode::kAttachFrameState) {
+    builder()->PrepareFrameState(node, OutputFrameStateCombine::PokeAt(
+                                           accumulator_base_ - values_index));
   }
   for (int i = 0; i < node->op()->ValueOutputCount(); i++) {
     values()->at(values_index + i) =
@@ -329,10 +271,11 @@
   }
 }
 
-
 void BytecodeGraphBuilder::Environment::RecordAfterState(
-    Node* node, FrameStateBeforeAndAfter* states) {
-  states->AddToNode(node, OutputFrameStateCombine::Ignore());
+    Node* node, FrameStateAttachmentMode mode) {
+  if (mode == FrameStateAttachmentMode::kAttachFrameState) {
+    builder()->PrepareFrameState(node, OutputFrameStateCombine::Ignore());
+  }
 }
 
 
@@ -438,6 +381,24 @@
     if (i >= accumulator_base()) idx = Linkage::kOsrAccumulatorRegisterIndex;
     values()->at(i) = graph()->NewNode(common()->OsrValue(idx), entry);
   }
+
+  BailoutId loop_id(builder_->bytecode_iterator().current_offset());
+  Node* frame_state =
+      Checkpoint(loop_id, OutputFrameStateCombine::Ignore(), false);
+  Node* checkpoint =
+      graph()->NewNode(common()->Checkpoint(), frame_state, entry, entry);
+  UpdateEffectDependency(checkpoint);
+
+  // Create the OSR guard nodes.
+  const Operator* guard_op = common()->OsrGuard(OsrGuardType::kUninitialized);
+  Node* effect = checkpoint;
+  for (int i = 0; i < size; i++) {
+    values()->at(i) = effect =
+        graph()->NewNode(guard_op, values()->at(i), effect, entry);
+  }
+  Node* context = effect = graph()->NewNode(guard_op, Context(), effect, entry);
+  SetContext(context);
+  UpdateEffectDependency(effect);
 }
 
 bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
@@ -491,19 +452,12 @@
   }
 }
 
-void BytecodeGraphBuilder::Environment::UpdateStateValuesWithCache(
-    Node** state_values, int offset, int count) {
-  Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
-  *state_values = builder_->state_values_cache_.GetNodeForValues(
-      env_values, static_cast<size_t>(count));
-}
-
 Node* BytecodeGraphBuilder::Environment::Checkpoint(
     BailoutId bailout_id, OutputFrameStateCombine combine,
     bool owner_has_exception) {
   UpdateStateValues(&parameters_state_values_, 0, parameter_count());
-  UpdateStateValuesWithCache(&registers_state_values_, register_base(),
-                             register_count());
+  UpdateStateValues(&registers_state_values_, register_base(),
+                    register_count());
   UpdateStateValues(&accumulator_state_values_, accumulator_base(), 1);
 
   const Operator* op = common()->FrameState(
@@ -528,51 +482,10 @@
   return result;
 }
 
-bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
-    Node** state_values, int offset, int count, int output_poke_start,
-    int output_poke_end, int cached) {
-  DCHECK_LE(static_cast<size_t>(offset + count), values()->size());
-  if (cached == kNotCached) {
-    for (int i = 0; i < count; i++, offset++) {
-      if (offset < output_poke_start || offset >= output_poke_end) {
-        if ((*state_values)->InputAt(i) != values()->at(offset)) {
-          return false;
-        }
-      }
-    }
-  } else {
-    for (StateValuesAccess::TypedNode state_value :
-         StateValuesAccess(*state_values)) {
-      if (offset < output_poke_start || offset >= output_poke_end) {
-        if (state_value.node != values()->at(offset)) {
-          return false;
-        }
-      }
-      ++offset;
-    }
-  }
-  return true;
-}
-
-
-bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
-    int output_poke_offset, int output_poke_count) {
-  // Poke offset is relative to the top of the stack (i.e., the accumulator).
-  int output_poke_start = accumulator_base() - output_poke_offset;
-  int output_poke_end = output_poke_start + output_poke_count;
-  return StateValuesAreUpToDate(&parameters_state_values_, 0, parameter_count(),
-                                output_poke_start, output_poke_end) &&
-         StateValuesAreUpToDate(&registers_state_values_, register_base(),
-                                register_count(), output_poke_start,
-                                output_poke_end, kCached) &&
-         StateValuesAreUpToDate(&accumulator_state_values_, accumulator_base(),
-                                1, output_poke_start, output_poke_end);
-}
-
-BytecodeGraphBuilder::BytecodeGraphBuilder(Zone* local_zone,
-                                           CompilationInfo* info,
-                                           JSGraph* jsgraph,
-                                           float invocation_frequency)
+BytecodeGraphBuilder::BytecodeGraphBuilder(
+    Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
+    float invocation_frequency, SourcePositionTable* source_positions,
+    int inlining_id)
     : local_zone_(local_zone),
       jsgraph_(jsgraph),
       invocation_frequency_(invocation_frequency),
@@ -595,8 +508,10 @@
                                     info->is_deoptimization_enabled()),
       state_values_cache_(jsgraph),
       liveness_analyzer_(
-          static_cast<size_t>(bytecode_array()->register_count()), local_zone) {
-}
+          static_cast<size_t>(bytecode_array()->register_count()), true,
+          local_zone),
+      source_positions_(source_positions),
+      start_position_(info->shared_info()->start_position(), inlining_id) {}
 
 Node* BytecodeGraphBuilder::GetNewTarget() {
   if (!new_target_.is_set()) {
@@ -649,7 +564,9 @@
   return VectorSlotPair(feedback_vector(), slot);
 }
 
-bool BytecodeGraphBuilder::CreateGraph() {
+bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
+  SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
+
   // Set up the basic structure of the graph. Outputs for {Start} are the formal
   // parameters (including the receiver) plus new target, number of arguments,
   // context and closure.
@@ -661,7 +578,7 @@
                   GetFunctionContext());
   set_environment(&env);
 
-  VisitBytecodes();
+  VisitBytecodes(stack_check);
 
   // Finish the basic structure of the graph.
   DCHECK_NE(0u, exit_controls_.size());
@@ -675,13 +592,44 @@
   return true;
 }
 
+void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
+  if (environment()->GetEffectDependency()->opcode() != IrOpcode::kCheckpoint) {
+    // Create an explicit checkpoint node for before the operation. This only
+    // needs to happen if we aren't effect-dominated by a {Checkpoint} already.
+    Node* node = NewNode(common()->Checkpoint());
+    DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+    DCHECK_EQ(IrOpcode::kDead,
+              NodeProperties::GetFrameStateInput(node)->opcode());
+    BailoutId bailout_id(bytecode_iterator().current_offset());
+    Node* frame_state_before = environment()->Checkpoint(
+        bailout_id, OutputFrameStateCombine::Ignore(), false);
+    NodeProperties::ReplaceFrameStateInput(node, frame_state_before);
+  }
+}
+
+void BytecodeGraphBuilder::PrepareFrameState(Node* node,
+                                             OutputFrameStateCombine combine) {
+  if (OperatorProperties::HasFrameStateInput(node->op())) {
+    // Add the frame state for after the operation. The node in question has
+    // already been created and had a {Dead} frame state input up until now.
+    DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+    DCHECK_EQ(IrOpcode::kDead,
+              NodeProperties::GetFrameStateInput(node)->opcode());
+    BailoutId bailout_id(bytecode_iterator().current_offset());
+    bool has_exception = NodeProperties::IsExceptionalCall(node);
+    Node* frame_state_after =
+        environment()->Checkpoint(bailout_id, combine, has_exception);
+    NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
+  }
+}
+
 void BytecodeGraphBuilder::ClearNonLiveSlotsInFrameStates() {
   if (!IsLivenessAnalysisEnabled()) {
     return;
   }
   NonLiveFrameStateSlotReplacer replacer(
       &state_values_cache_, jsgraph()->OptimizedOutConstant(),
-      liveness_analyzer()->local_count(), local_zone());
+      liveness_analyzer()->local_count(), true, local_zone());
   liveness_analyzer()->Run(&replacer);
   if (FLAG_trace_environment_liveness) {
     OFStream os(stdout);
@@ -689,24 +637,36 @@
   }
 }
 
-void BytecodeGraphBuilder::VisitBytecodes() {
+void BytecodeGraphBuilder::VisitBytecodes(bool stack_check) {
   BytecodeBranchAnalysis analysis(bytecode_array(), local_zone());
   BytecodeLoopAnalysis loop_analysis(bytecode_array(), &analysis, local_zone());
   analysis.Analyze();
   loop_analysis.Analyze();
   set_branch_analysis(&analysis);
   set_loop_analysis(&loop_analysis);
+
   interpreter::BytecodeArrayIterator iterator(bytecode_array());
   set_bytecode_iterator(&iterator);
+  SourcePositionTableIterator source_position_iterator(
+      bytecode_array()->source_position_table());
+
   BuildOSRNormalEntryPoint();
-  while (!iterator.done()) {
+  for (; !iterator.done(); iterator.Advance()) {
     int current_offset = iterator.current_offset();
+    UpdateCurrentSourcePosition(&source_position_iterator, current_offset);
     EnterAndExitExceptionHandlers(current_offset);
     SwitchToMergeEnvironment(current_offset);
     if (environment() != nullptr) {
       BuildLoopHeaderEnvironment(current_offset);
       BuildOSRLoopEntryPoint(current_offset);
 
+      // Skip the first stack check if stack_check is false
+      if (!stack_check &&
+          iterator.current_bytecode() == interpreter::Bytecode::kStackCheck) {
+        stack_check = true;
+        continue;
+      }
+
       switch (iterator.current_bytecode()) {
 #define BYTECODE_CASE(name, ...)       \
   case interpreter::Bytecode::k##name: \
@@ -716,8 +676,8 @@
 #undef BYTECODE_CODE
       }
     }
-    iterator.Advance();
   }
+
   set_branch_analysis(nullptr);
   set_bytecode_iterator(nullptr);
   DCHECK(exception_handlers_.empty());
@@ -744,11 +704,6 @@
   environment()->BindAccumulator(node);
 }
 
-void BytecodeGraphBuilder::VisitLdrUndefined() {
-  Node* node = jsgraph()->UndefinedConstant();
-  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), node);
-}
-
 void BytecodeGraphBuilder::VisitLdaNull() {
   Node* node = jsgraph()->NullConstant();
   environment()->BindAccumulator(node);
@@ -797,29 +752,21 @@
 }
 
 void BytecodeGraphBuilder::VisitLdaGlobal() {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
                                TypeofMode::NOT_INSIDE_TYPEOF);
-  environment()->BindAccumulator(node, &states);
-}
-
-void BytecodeGraphBuilder::VisitLdrGlobal() {
-  FrameStateBeforeAndAfter states(this);
-  Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
-                               TypeofMode::NOT_INSIDE_TYPEOF);
-  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), node,
-                              &states);
+  environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
                                TypeofMode::INSIDE_TYPEOF);
-  environment()->BindAccumulator(node, &states);
+  environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Handle<Name> name =
       Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
   VectorSlotPair feedback =
@@ -828,7 +775,7 @@
 
   const Operator* op = javascript()->StoreGlobal(language_mode, name, feedback);
   Node* node = NewNode(op, value, GetFunctionClosure());
-  environment()->RecordAfterState(node, &states);
+  environment()->RecordAfterState(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitStaGlobalSloppy() {
@@ -839,7 +786,7 @@
   BuildStoreGlobal(LanguageMode::STRICT);
 }
 
-Node* BytecodeGraphBuilder::BuildLoadContextSlot() {
+void BytecodeGraphBuilder::VisitLdaContextSlot() {
   // TODO(mythria): immutable flag is also set to false. This information is not
   // available in bytecode array. update this code when the implementation
   // changes.
@@ -848,17 +795,19 @@
       bytecode_iterator().GetIndexOperand(1), false);
   Node* context =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
-  return NewNode(op, context);
-}
-
-void BytecodeGraphBuilder::VisitLdaContextSlot() {
-  Node* node = BuildLoadContextSlot();
+  Node* node = NewNode(op, context);
   environment()->BindAccumulator(node);
 }
 
-void BytecodeGraphBuilder::VisitLdrContextSlot() {
-  Node* node = BuildLoadContextSlot();
-  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(3), node);
+void BytecodeGraphBuilder::VisitLdaCurrentContextSlot() {
+  // TODO(mythria): immutable flag is also set to false. This information is not
+  // available in bytecode array. update this code when the implementation
+  // changes.
+  const Operator* op = javascript()->LoadContext(
+      0, bytecode_iterator().GetIndexOperand(0), false);
+  Node* context = environment()->Context();
+  Node* node = NewNode(op, context);
+  environment()->BindAccumulator(node);
 }
 
 void BytecodeGraphBuilder::VisitStaContextSlot() {
@@ -871,8 +820,16 @@
   NewNode(op, context, value);
 }
 
+void BytecodeGraphBuilder::VisitStaCurrentContextSlot() {
+  const Operator* op =
+      javascript()->StoreContext(0, bytecode_iterator().GetIndexOperand(0));
+  Node* context = environment()->Context();
+  Node* value = environment()->LookupAccumulator();
+  NewNode(op, context, value);
+}
+
 void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* name =
       jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
   const Operator* op =
@@ -880,7 +837,7 @@
                                     ? Runtime::kLoadLookupSlot
                                     : Runtime::kLoadLookupSlotInsideTypeof);
   Node* value = NewNode(op, name);
-  environment()->BindAccumulator(value, &states);
+  environment()->BindAccumulator(value, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitLdaLookupSlot() {
@@ -960,8 +917,6 @@
     // Slow path, do a runtime load lookup.
     set_environment(slow_environment);
     {
-      FrameStateBeforeAndAfter states(this);
-
       Node* name = jsgraph()->Constant(
           bytecode_iterator().GetConstantForIndexOperand(0));
 
@@ -970,7 +925,7 @@
                                         ? Runtime::kLoadLookupSlot
                                         : Runtime::kLoadLookupSlotInsideTypeof);
       Node* value = NewNode(op, name);
-      environment()->BindAccumulator(value, &states);
+      environment()->BindAccumulator(value, Environment::kAttachFrameState);
     }
 
     fast_environment->Merge(environment());
@@ -994,10 +949,10 @@
 
   // Fast path, do a global load.
   {
-    FrameStateBeforeAndAfter states(this);
+    PrepareEagerCheckpoint();
     Node* node =
         BuildLoadGlobal(bytecode_iterator().GetIndexOperand(1), typeof_mode);
-    environment()->BindAccumulator(node, &states);
+    environment()->BindAccumulator(node, Environment::kAttachFrameState);
   }
 
   // Only build the slow path if there were any slow-path checks.
@@ -1009,8 +964,6 @@
     // Slow path, do a runtime load lookup.
     set_environment(slow_environment);
     {
-      FrameStateBeforeAndAfter states(this);
-
       Node* name = jsgraph()->Constant(
           bytecode_iterator().GetConstantForIndexOperand(0));
 
@@ -1019,7 +972,7 @@
                                         ? Runtime::kLoadLookupSlot
                                         : Runtime::kLoadLookupSlotInsideTypeof);
       Node* value = NewNode(op, name);
-      environment()->BindAccumulator(value, &states);
+      environment()->BindAccumulator(value, Environment::kAttachFrameState);
     }
 
     fast_environment->Merge(environment());
@@ -1036,7 +989,7 @@
 }
 
 void BytecodeGraphBuilder::BuildStaLookupSlot(LanguageMode language_mode) {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* value = environment()->LookupAccumulator();
   Node* name =
       jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
@@ -1044,7 +997,7 @@
       is_strict(language_mode) ? Runtime::kStoreLookupSlot_Strict
                                : Runtime::kStoreLookupSlot_Sloppy);
   Node* store = NewNode(op, name, value);
-  environment()->BindAccumulator(store, &states);
+  environment()->BindAccumulator(store, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitStaLookupSlotSloppy() {
@@ -1055,7 +1008,8 @@
   BuildStaLookupSlot(LanguageMode::STRICT);
 }
 
-Node* BytecodeGraphBuilder::BuildNamedLoad() {
+void BytecodeGraphBuilder::VisitLdaNamedProperty() {
+  PrepareEagerCheckpoint();
   Node* object =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Handle<Name> name =
@@ -1064,23 +1018,12 @@
       CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
 
   const Operator* op = javascript()->LoadNamed(name, feedback);
-  return NewNode(op, object, GetFunctionClosure());
+  Node* node = NewNode(op, object, GetFunctionClosure());
+  environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
-void BytecodeGraphBuilder::VisitLdaNamedProperty() {
-  FrameStateBeforeAndAfter states(this);
-  Node* node = BuildNamedLoad();
-  environment()->BindAccumulator(node, &states);
-}
-
-void BytecodeGraphBuilder::VisitLdrNamedProperty() {
-  FrameStateBeforeAndAfter states(this);
-  Node* node = BuildNamedLoad();
-  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(3), node,
-                              &states);
-}
-
-Node* BytecodeGraphBuilder::BuildKeyedLoad() {
+void BytecodeGraphBuilder::VisitLdaKeyedProperty() {
+  PrepareEagerCheckpoint();
   Node* key = environment()->LookupAccumulator();
   Node* object =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -1088,24 +1031,12 @@
       CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
 
   const Operator* op = javascript()->LoadProperty(feedback);
-  return NewNode(op, object, key, GetFunctionClosure());
-}
-
-void BytecodeGraphBuilder::VisitLdaKeyedProperty() {
-  FrameStateBeforeAndAfter states(this);
-  Node* node = BuildKeyedLoad();
-  environment()->BindAccumulator(node, &states);
-}
-
-void BytecodeGraphBuilder::VisitLdrKeyedProperty() {
-  FrameStateBeforeAndAfter states(this);
-  Node* node = BuildKeyedLoad();
-  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(2), node,
-                              &states);
+  Node* node = NewNode(op, object, key, GetFunctionClosure());
+  environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* value = environment()->LookupAccumulator();
   Node* object =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -1116,7 +1047,7 @@
 
   const Operator* op = javascript()->StoreNamed(language_mode, name, feedback);
   Node* node = NewNode(op, object, value, GetFunctionClosure());
-  environment()->RecordAfterState(node, &states);
+  environment()->RecordAfterState(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitStaNamedPropertySloppy() {
@@ -1128,7 +1059,7 @@
 }
 
 void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* value = environment()->LookupAccumulator();
   Node* object =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -1139,7 +1070,7 @@
 
   const Operator* op = javascript()->StoreProperty(language_mode, feedback);
   Node* node = NewNode(op, object, key, value, GetFunctionClosure());
-  environment()->RecordAfterState(node, &states);
+  environment()->RecordAfterState(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitStaKeyedPropertySloppy() {
@@ -1150,6 +1081,26 @@
   BuildKeyedStore(LanguageMode::STRICT);
 }
 
+void BytecodeGraphBuilder::VisitLdaModuleVariable() {
+  int32_t cell_index = bytecode_iterator().GetImmediateOperand(0);
+  uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(1);
+  Node* module =
+      NewNode(javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
+              environment()->Context());
+  Node* value = NewNode(javascript()->LoadModule(cell_index), module);
+  environment()->BindAccumulator(value);
+}
+
+void BytecodeGraphBuilder::VisitStaModuleVariable() {
+  int32_t cell_index = bytecode_iterator().GetImmediateOperand(0);
+  uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(1);
+  Node* module =
+      NewNode(javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
+              environment()->Context());
+  Node* value = environment()->LookupAccumulator();
+  NewNode(javascript()->StoreModule(cell_index), module, value);
+}
+
 void BytecodeGraphBuilder::VisitPushContext() {
   Node* new_context = environment()->LookupAccumulator();
   environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0),
@@ -1218,10 +1169,9 @@
 }
 
 void BytecodeGraphBuilder::BuildCreateArguments(CreateArgumentsType type) {
-  FrameStateBeforeAndAfter states(this);
   const Operator* op = javascript()->CreateArguments(type);
   Node* object = NewNode(op, GetFunctionClosure());
-  environment()->BindAccumulator(object, &states);
+  environment()->BindAccumulator(object, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitCreateMappedArguments() {
@@ -1236,20 +1186,15 @@
   BuildCreateArguments(CreateArgumentsType::kRestParameter);
 }
 
-void BytecodeGraphBuilder::BuildCreateLiteral(const Operator* op) {
-  FrameStateBeforeAndAfter states(this);
-  Node* literal = NewNode(op, GetFunctionClosure());
-  environment()->BindAccumulator(literal, &states);
-}
-
 void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
   Handle<String> constant_pattern =
       Handle<String>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
   int literal_index = bytecode_iterator().GetIndexOperand(1);
   int literal_flags = bytecode_iterator().GetFlagOperand(2);
-  const Operator* op = javascript()->CreateLiteralRegExp(
-      constant_pattern, literal_flags, literal_index);
-  BuildCreateLiteral(op);
+  Node* literal = NewNode(javascript()->CreateLiteralRegExp(
+                              constant_pattern, literal_flags, literal_index),
+                          GetFunctionClosure());
+  environment()->BindAccumulator(literal, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
@@ -1263,13 +1208,15 @@
   // code. We can revisit this when we have data to the contrary.
   literal_flags |= ArrayLiteral::kDisableMementos;
   int number_of_elements = constant_elements->length();
-  const Operator* op = javascript()->CreateLiteralArray(
-      constant_elements, literal_flags, literal_index, number_of_elements);
-  BuildCreateLiteral(op);
+  Node* literal = NewNode(
+      javascript()->CreateLiteralArray(constant_elements, literal_flags,
+                                       literal_index, number_of_elements),
+      GetFunctionClosure());
+  environment()->BindAccumulator(literal, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Handle<FixedArray> constant_properties = Handle<FixedArray>::cast(
       bytecode_iterator().GetConstantForIndexOperand(0));
   int literal_index = bytecode_iterator().GetIndexOperand(1);
@@ -1283,7 +1230,7 @@
                                         literal_index, number_of_properties),
       GetFunctionClosure());
   environment()->BindRegister(bytecode_iterator().GetRegisterOperand(3),
-                              literal, &states);
+                              literal, Environment::kAttachFrameState);
 }
 
 Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
@@ -1302,9 +1249,10 @@
   return value;
 }
 
-void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode) {
-  FrameStateBeforeAndAfter states(this);
-  ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
+void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode,
+                                     ConvertReceiverMode receiver_hint) {
+  PrepareEagerCheckpoint();
+
   Node* callee =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
@@ -1320,21 +1268,27 @@
   const Operator* call = javascript()->CallFunction(
       arg_count + 1, frequency, feedback, receiver_hint, tail_call_mode);
   Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
-  environment()->BindAccumulator(value, &states);
+  environment()->BindAccumulator(value, Environment::kAttachFrameState);
 }
 
-void BytecodeGraphBuilder::VisitCall() { BuildCall(TailCallMode::kDisallow); }
+void BytecodeGraphBuilder::VisitCall() {
+  BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kAny);
+}
+
+void BytecodeGraphBuilder::VisitCallProperty() {
+  BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNotNullOrUndefined);
+}
 
 void BytecodeGraphBuilder::VisitTailCall() {
   TailCallMode tail_call_mode =
       bytecode_array_->GetIsolate()->is_tail_call_elimination_enabled()
           ? TailCallMode::kAllow
           : TailCallMode::kDisallow;
-  BuildCall(tail_call_mode);
+  BuildCall(tail_call_mode, ConvertReceiverMode::kAny);
 }
 
 void BytecodeGraphBuilder::VisitCallJSRuntime() {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* callee =
       BuildLoadNativeContextField(bytecode_iterator().GetIndexOperand(0));
   interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
@@ -1343,7 +1297,7 @@
   // Create node to perform the JS runtime call.
   const Operator* call = javascript()->CallFunction(arg_count + 1);
   Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
-  environment()->BindAccumulator(value, &states);
+  environment()->BindAccumulator(value, Environment::kAttachFrameState);
 }
 
 Node* BytecodeGraphBuilder::ProcessCallRuntimeArguments(
@@ -1360,7 +1314,7 @@
 }
 
 void BytecodeGraphBuilder::VisitCallRuntime() {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Runtime::FunctionId functionId = bytecode_iterator().GetRuntimeIdOperand(0);
   interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
   size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
@@ -1368,11 +1322,11 @@
   // Create node to perform the runtime call.
   const Operator* call = javascript()->CallRuntime(functionId, arg_count);
   Node* value = ProcessCallRuntimeArguments(call, first_arg, arg_count);
-  environment()->BindAccumulator(value, &states);
+  environment()->BindAccumulator(value, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Runtime::FunctionId functionId = bytecode_iterator().GetRuntimeIdOperand(0);
   interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
   size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
@@ -1382,11 +1336,12 @@
   // Create node to perform the runtime call.
   const Operator* call = javascript()->CallRuntime(functionId, arg_count);
   Node* return_pair = ProcessCallRuntimeArguments(call, first_arg, arg_count);
-  environment()->BindRegistersToProjections(first_return, return_pair, &states);
+  environment()->BindRegistersToProjections(first_return, return_pair,
+                                            Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Runtime::FunctionId functionId = bytecode_iterator().GetIntrinsicIdOperand(0);
   interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
   size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
@@ -1395,7 +1350,7 @@
   // lowering.
   const Operator* call = javascript()->CallRuntime(functionId, arg_count);
   Node* value = ProcessCallRuntimeArguments(call, first_arg, arg_count);
-  environment()->BindAccumulator(value, &states);
+  environment()->BindAccumulator(value, Environment::kAttachFrameState);
 }
 
 Node* BytecodeGraphBuilder::ProcessCallNewArguments(
@@ -1414,7 +1369,7 @@
 }
 
 void BytecodeGraphBuilder::VisitNew() {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
   interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
   size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
@@ -1432,14 +1387,14 @@
       static_cast<int>(arg_count) + 2, frequency, feedback);
   Node* value = ProcessCallNewArguments(call, callee, new_target, first_arg,
                                         arg_count + 2);
-  environment()->BindAccumulator(value, &states);
+  environment()->BindAccumulator(value, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::BuildThrow() {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* value = environment()->LookupAccumulator();
   Node* call = NewNode(javascript()->CallRuntime(Runtime::kThrow), value);
-  environment()->BindAccumulator(call, &states);
+  environment()->BindAccumulator(call, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitThrow() {
@@ -1459,12 +1414,12 @@
 }
 
 void BytecodeGraphBuilder::BuildBinaryOp(const Operator* js_op) {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* left =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* right = environment()->LookupAccumulator();
   Node* node = NewNode(js_op, left, right);
-  environment()->BindAccumulator(node, &states);
+  environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
 // Helper function to create binary operation hint from the recorded type
@@ -1495,11 +1450,8 @@
 }
 
 float BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
-  if (slot_id >= TypeFeedbackVector::kReservedIndexCount) {
-    CallICNexus nexus(feedback_vector(), feedback_vector()->ToSlot(slot_id));
-    return nexus.ComputeCallFrequency() * invocation_frequency_;
-  }
-  return 0.0f;
+  CallICNexus nexus(feedback_vector(), feedback_vector()->ToSlot(slot_id));
+  return nexus.ComputeCallFrequency() * invocation_frequency_;
 }
 
 void BytecodeGraphBuilder::VisitAdd() {
@@ -1558,12 +1510,12 @@
 }
 
 void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* js_op) {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* left =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
   Node* right = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
   Node* node = NewNode(js_op, left, right);
-  environment()->BindAccumulator(node, &states);
+  environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitAddSmi() {
@@ -1597,23 +1549,23 @@
 }
 
 void BytecodeGraphBuilder::VisitInc() {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   // Note: Use subtract -1 here instead of add 1 to ensure we always convert to
   // a number, not a string.
   const Operator* js_op =
       javascript()->Subtract(GetBinaryOperationHint(kCountOperationHintIndex));
   Node* node = NewNode(js_op, environment()->LookupAccumulator(),
                        jsgraph()->Constant(-1));
-  environment()->BindAccumulator(node, &states);
+  environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitDec() {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   const Operator* js_op =
       javascript()->Subtract(GetBinaryOperationHint(kCountOperationHintIndex));
   Node* node = NewNode(js_op, environment()->LookupAccumulator(),
                        jsgraph()->OneConstant());
-  environment()->BindAccumulator(node, &states);
+  environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitLogicalNot() {
@@ -1638,13 +1590,13 @@
 }
 
 void BytecodeGraphBuilder::BuildDelete(LanguageMode language_mode) {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* key = environment()->LookupAccumulator();
   Node* object =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* node =
       NewNode(javascript()->DeleteProperty(language_mode), object, key);
-  environment()->BindAccumulator(node, &states);
+  environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitDeletePropertyStrict() {
@@ -1656,12 +1608,12 @@
 }
 
 void BytecodeGraphBuilder::BuildCompareOp(const Operator* js_op) {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* left =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* right = environment()->LookupAccumulator();
   Node* node = NewNode(js_op, left, right);
-  environment()->BindAccumulator(node, &states);
+  environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitTestEqual() {
@@ -1701,10 +1653,10 @@
 }
 
 void BytecodeGraphBuilder::BuildCastOperator(const Operator* js_op) {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* value = NewNode(js_op, environment()->LookupAccumulator());
   environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), value,
-                              &states);
+                              Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitToName() {
@@ -1772,23 +1724,24 @@
 void BytecodeGraphBuilder::VisitJumpLoop() { BuildJump(); }
 
 void BytecodeGraphBuilder::VisitStackCheck() {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* node = NewNode(javascript()->StackCheck());
-  environment()->RecordAfterState(node, &states);
+  environment()->RecordAfterState(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitReturn() {
   BuildLoopExitsForFunctionExit();
+  Node* pop_node = jsgraph()->ZeroConstant();
   Node* control =
-      NewNode(common()->Return(), environment()->LookupAccumulator());
+      NewNode(common()->Return(), pop_node, environment()->LookupAccumulator());
   MergeControlToLeaveFunction(control);
 }
 
 void BytecodeGraphBuilder::VisitDebugger() {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* call =
       NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement));
-  environment()->BindAccumulator(call, &states);
+  environment()->BindAccumulator(call, Environment::kAttachFrameState);
   environment()->MarkAllRegistersLive();
 }
 
@@ -1799,18 +1752,19 @@
 #undef DEBUG_BREAK
 
 void BytecodeGraphBuilder::BuildForInPrepare() {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* receiver =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* prepare = NewNode(javascript()->ForInPrepare(), receiver);
   environment()->BindRegistersToProjections(
-      bytecode_iterator().GetRegisterOperand(1), prepare, &states);
+      bytecode_iterator().GetRegisterOperand(1), prepare,
+      Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitForInPrepare() { BuildForInPrepare(); }
 
 void BytecodeGraphBuilder::VisitForInContinue() {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* index =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* cache_length =
@@ -1818,11 +1772,11 @@
   Node* exit_cond =
       NewNode(javascript()->LessThan(CompareOperationHint::kSignedSmall), index,
               cache_length);
-  environment()->BindAccumulator(exit_cond, &states);
+  environment()->BindAccumulator(exit_cond, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::BuildForInNext() {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* receiver =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* index =
@@ -1835,18 +1789,18 @@
 
   Node* value = NewNode(javascript()->ForInNext(), receiver, cache_array,
                         cache_type, index);
-  environment()->BindAccumulator(value, &states);
+  environment()->BindAccumulator(value, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitForInNext() { BuildForInNext(); }
 
 void BytecodeGraphBuilder::VisitForInStep() {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
   Node* index =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   index = NewNode(javascript()->Add(BinaryOperationHint::kSignedSmall), index,
                   jsgraph()->OneConstant());
-  environment()->BindAccumulator(index, &states);
+  environment()->BindAccumulator(index, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitSuspendGenerator() {
@@ -1876,7 +1830,7 @@
 }
 
 void BytecodeGraphBuilder::VisitResumeGenerator() {
-  FrameStateBeforeAndAfter states(this);
+  PrepareEagerCheckpoint();
 
   Node* generator = environment()->LookupRegister(
       bytecode_iterator().GetRegisterOperand(0));
@@ -1891,7 +1845,7 @@
   Node* state =
       NewNode(javascript()->GeneratorRestoreContinuation(), generator);
 
-  environment()->BindAccumulator(state, &states);
+  environment()->BindAccumulator(state, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitWide() {
@@ -2114,9 +2068,9 @@
       *current_input++ = environment()->Context();
     }
     if (has_frame_state) {
-      // The frame state will be inserted later. Here we misuse
-      // the {Dead} node as a sentinel to be later overwritten
-      // with the real frame state.
+      // The frame state will be inserted later. Here we misuse the {Dead} node
+      // as a sentinel to be later overwritten with the real frame state by the
+      // calls to {PrepareFrameState} within individual visitor methods.
       *current_input++ = jsgraph()->Dead();
     }
     if (has_effect) {
@@ -2238,6 +2192,19 @@
   return value;
 }
 
+void BytecodeGraphBuilder::UpdateCurrentSourcePosition(
+    SourcePositionTableIterator* it, int offset) {
+  if (it->done()) return;
+
+  if (it->code_offset() == offset) {
+    source_positions_->SetCurrentPosition(SourcePosition(
+        it->source_position().ScriptOffset(), start_position_.InliningId()));
+    it->Advance();
+  } else {
+    DCHECK_GT(it->code_offset(), offset);
+  }
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/bytecode-graph-builder.h b/src/compiler/bytecode-graph-builder.h
index 53582f7..6994226 100644
--- a/src/compiler/bytecode-graph-builder.h
+++ b/src/compiler/bytecode-graph-builder.h
@@ -14,6 +14,7 @@
 #include "src/interpreter/bytecode-array-iterator.h"
 #include "src/interpreter/bytecode-flags.h"
 #include "src/interpreter/bytecodes.h"
+#include "src/source-position-table.h"
 
 namespace v8 {
 namespace internal {
@@ -22,21 +23,24 @@
 
 namespace compiler {
 
+class SourcePositionTable;
+
 // The BytecodeGraphBuilder produces a high-level IR graph based on
 // interpreter bytecodes.
 class BytecodeGraphBuilder {
  public:
   BytecodeGraphBuilder(Zone* local_zone, CompilationInfo* info,
-                       JSGraph* jsgraph, float invocation_frequency);
+                       JSGraph* jsgraph, float invocation_frequency,
+                       SourcePositionTable* source_positions,
+                       int inlining_id = SourcePosition::kNotInlined);
 
   // Creates a graph by visiting bytecodes.
-  bool CreateGraph();
+  bool CreateGraph(bool stack_check = true);
 
  private:
   class Environment;
-  class FrameStateBeforeAndAfter;
 
-  void VisitBytecodes();
+  void VisitBytecodes(bool stack_check);
 
   // Get or create the node that represents the outer function closure.
   Node* GetFunctionClosure();
@@ -117,24 +121,31 @@
                                     interpreter::Register first_arg,
                                     size_t arity);
 
+  // Prepare information for eager deoptimization. This information is carried
+  // by dedicated {Checkpoint} nodes that are wired into the effect chain.
+  // Conceptually this frame state is "before" a given operation.
+  void PrepareEagerCheckpoint();
+
+  // Prepare information for lazy deoptimization. This information is attached
+  // to the given node and the output value produced by the node is combined.
+  // Conceptually this frame state is "after" a given operation.
+  void PrepareFrameState(Node* node, OutputFrameStateCombine combine);
+
   // Computes register liveness and replaces dead ones in frame states with the
   // undefined values.
   void ClearNonLiveSlotsInFrameStates();
 
-  void BuildCreateLiteral(const Operator* op);
   void BuildCreateArguments(CreateArgumentsType type);
-  Node* BuildLoadContextSlot();
   Node* BuildLoadGlobal(uint32_t feedback_slot_index, TypeofMode typeof_mode);
   void BuildStoreGlobal(LanguageMode language_mode);
-  Node* BuildNamedLoad();
   void BuildNamedStore(LanguageMode language_mode);
-  Node* BuildKeyedLoad();
   void BuildKeyedStore(LanguageMode language_mode);
   void BuildLdaLookupSlot(TypeofMode typeof_mode);
   void BuildLdaLookupContextSlot(TypeofMode typeof_mode);
   void BuildLdaLookupGlobalSlot(TypeofMode typeof_mode);
   void BuildStaLookupSlot(LanguageMode language_mode);
-  void BuildCall(TailCallMode tail_call_mode);
+  void BuildCall(TailCallMode tail_call_mode,
+                 ConvertReceiverMode receiver_hint);
   void BuildThrow();
   void BuildBinaryOp(const Operator* op);
   void BuildBinaryOpWithImmediate(const Operator* op);
@@ -301,6 +312,15 @@
   // Analyzer of register liveness.
   LivenessAnalyzer liveness_analyzer_;
 
+  // The Turbofan source position table, to be populated.
+  SourcePositionTable* source_positions_;
+
+  SourcePosition const start_position_;
+
+  // Update [source_positions_]'s current position to that of the bytecode at
+  // [offset], if any.
+  void UpdateCurrentSourcePosition(SourcePositionTableIterator* it, int offset);
+
   static int const kBinaryOperationHintIndex = 1;
   static int const kCountOperationHintIndex = 0;
   static int const kBinaryOperationSmiHintIndex = 2;
diff --git a/src/compiler/checkpoint-elimination.h b/src/compiler/checkpoint-elimination.h
index edaa0e7..f30eec0 100644
--- a/src/compiler/checkpoint-elimination.h
+++ b/src/compiler/checkpoint-elimination.h
@@ -5,14 +5,17 @@
 #ifndef V8_COMPILER_CHECKPOINT_ELIMINATION_H_
 #define V8_COMPILER_CHECKPOINT_ELIMINATION_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
 // Performs elimination of redundant checkpoints within the graph.
-class CheckpointElimination final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE CheckpointElimination final
+    : public NON_EXPORTED_BASE(AdvancedReducer) {
  public:
   explicit CheckpointElimination(Editor* editor);
   ~CheckpointElimination() final {}
diff --git a/src/compiler/code-assembler.cc b/src/compiler/code-assembler.cc
index ff7ef31..3431098 100644
--- a/src/compiler/code-assembler.cc
+++ b/src/compiler/code-assembler.cc
@@ -41,8 +41,11 @@
 CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
                              Code::Flags flags, const char* name)
     : CodeAssembler(isolate, zone,
-                    Linkage::GetJSCallDescriptor(zone, false, parameter_count,
-                                                 CallDescriptor::kNoFlags),
+                    Linkage::GetJSCallDescriptor(
+                        zone, false, parameter_count,
+                        Code::ExtractKindFromFlags(flags) == Code::BUILTIN
+                            ? CallDescriptor::kPushArgumentCount
+                            : CallDescriptor::kNoFlags),
                     flags, name) {}
 
 CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
@@ -86,6 +89,10 @@
   return raw_assembler_->machine()->Float64RoundDown().IsSupported();
 }
 
+bool CodeAssembler::IsFloat64RoundTiesEvenSupported() const {
+  return raw_assembler_->machine()->Float64RoundTiesEven().IsSupported();
+}
+
 bool CodeAssembler::IsFloat64RoundTruncateSupported() const {
   return raw_assembler_->machine()->Float64RoundTruncate().IsSupported();
 }
@@ -107,7 +114,11 @@
 }
 
 Node* CodeAssembler::SmiConstant(Smi* value) {
-  return IntPtrConstant(bit_cast<intptr_t>(value));
+  return BitcastWordToTaggedSigned(IntPtrConstant(bit_cast<intptr_t>(value)));
+}
+
+Node* CodeAssembler::SmiConstant(int value) {
+  return SmiConstant(Smi::FromInt(value));
 }
 
 Node* CodeAssembler::HeapConstant(Handle<HeapObject> object) {
@@ -148,6 +159,20 @@
   return m.HasValue();
 }
 
+bool CodeAssembler::ToSmiConstant(Node* node, Smi*& out_value) {
+  if (node->opcode() == IrOpcode::kBitcastWordToTaggedSigned) {
+    node = node->InputAt(0);
+  } else {
+    return false;
+  }
+  IntPtrMatcher m(node);
+  if (m.HasValue()) {
+    out_value = Smi::cast(bit_cast<Object*>(m.Value()));
+    return true;
+  }
+  return false;
+}
+
 bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t& out_value) {
   IntPtrMatcher m(node);
   if (m.HasValue()) out_value = m.Value();
@@ -162,6 +187,10 @@
   return raw_assembler_->Return(value);
 }
 
+void CodeAssembler::PopAndReturn(Node* pop, Node* value) {
+  return raw_assembler_->PopAndReturn(pop, value);
+}
+
 void CodeAssembler::DebugBreak() { raw_assembler_->DebugBreak(); }
 
 void CodeAssembler::Comment(const char* format, ...) {
@@ -216,7 +245,7 @@
 }
 
 Node* CodeAssembler::Word32Shr(Node* value, int shift) {
-  return (shift != 0) ? raw_assembler_->Word32Shr(value, IntPtrConstant(shift))
+  return (shift != 0) ? raw_assembler_->Word32Shr(value, Int32Constant(shift))
                       : value;
 }
 
@@ -314,15 +343,6 @@
   return raw_assembler_->Projection(index, value);
 }
 
-void CodeAssembler::BranchIf(Node* condition, Label* if_true, Label* if_false) {
-  Label if_condition_is_true(this), if_condition_is_false(this);
-  Branch(condition, &if_condition_is_true, &if_condition_is_false);
-  Bind(&if_condition_is_true);
-  Goto(if_true);
-  Bind(&if_condition_is_false);
-  Goto(if_false);
-}
-
 void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
                                     Variable* exception_var) {
   Label success(this), exception(this, Label::kDeferred);
@@ -400,6 +420,16 @@
   return return_value;
 }
 
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
+                                 Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+                                 Node* arg5) {
+  CallPrologue();
+  Node* return_value = raw_assembler_->CallRuntime5(function_id, arg1, arg2,
+                                                    arg3, arg4, arg5, context);
+  CallEpilogue();
+  return return_value;
+}
+
 Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
                                      Node* context) {
   return raw_assembler_->TailCallRuntime0(function_id, context);
@@ -706,6 +736,14 @@
                       arg4, result_size);
 }
 
+Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
+                                  Node* arg1, Node* arg2, Node* arg3,
+                                  Node* arg4, Node* arg5, size_t result_size) {
+  Node* target = HeapConstant(callable.code());
+  return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
+                      arg4, arg5, result_size);
+}
+
 Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
                                   Node* target, Node* context, Node* arg1,
                                   size_t result_size) {
@@ -794,6 +832,27 @@
 }
 
 Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+                                  Node* target, Node* context, Node* arg1,
+                                  Node* arg2, Node* arg3, Node* arg4,
+                                  Node* arg5, Node* arg6, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(7);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = arg3;
+  args[3] = arg4;
+  args[4] = arg5;
+  args[5] = arg6;
+  args[6] = context;
+
+  return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
                                   Node* target, Node* context, const Arg& arg1,
                                   const Arg& arg2, const Arg& arg3,
                                   const Arg& arg4, size_t result_size) {
@@ -896,6 +955,24 @@
   return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
 }
 
+Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
+                            Node* function, Node* receiver, Node* arg1,
+                            Node* arg2, Node* arg3, size_t result_size) {
+  const int argc = 3;
+  Node* target = HeapConstant(callable.code());
+
+  Node** args = zone()->NewArray<Node*>(argc + 4);
+  args[0] = function;
+  args[1] = Int32Constant(argc);
+  args[2] = receiver;
+  args[3] = arg1;
+  args[4] = arg2;
+  args[5] = arg3;
+  args[6] = context;
+
+  return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
+}
+
 Node* CodeAssembler::CallCFunction2(MachineType return_type,
                                     MachineType arg0_type,
                                     MachineType arg1_type, Node* function,
@@ -1006,16 +1083,15 @@
   return impl_->value_ != nullptr;
 }
 
-CodeAssembler::Label::Label(CodeAssembler* assembler, int merged_value_count,
-                            CodeAssembler::Variable** merged_variables,
-                            CodeAssembler::Label::Type type)
+CodeAssembler::Label::Label(CodeAssembler* assembler, size_t vars_count,
+                            Variable** vars, CodeAssembler::Label::Type type)
     : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
   void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
   label_ = new (buffer)
       RawMachineLabel(type == kDeferred ? RawMachineLabel::kDeferred
                                         : RawMachineLabel::kNonDeferred);
-  for (int i = 0; i < merged_value_count; ++i) {
-    variable_phis_[merged_variables[i]->impl_] = nullptr;
+  for (size_t i = 0; i < vars_count; ++i) {
+    variable_phis_[vars[i]->impl_] = nullptr;
   }
 }
 
diff --git a/src/compiler/code-assembler.h b/src/compiler/code-assembler.h
index 8372334..1f364d9 100644
--- a/src/compiler/code-assembler.h
+++ b/src/compiler/code-assembler.h
@@ -12,6 +12,7 @@
 // Do not include anything from src/compiler here!
 #include "src/allocation.h"
 #include "src/builtins/builtins.h"
+#include "src/globals.h"
 #include "src/heap/heap.h"
 #include "src/machine-type.h"
 #include "src/runtime/runtime.h"
@@ -57,6 +58,7 @@
   V(Uint32LessThanOrEqual)                       \
   V(Uint32GreaterThanOrEqual)                    \
   V(UintPtrLessThan)                             \
+  V(UintPtrLessThanOrEqual)                      \
   V(UintPtrGreaterThan)                          \
   V(UintPtrGreaterThanOrEqual)                   \
   V(WordEqual)                                   \
@@ -148,9 +150,11 @@
   V(ChangeUint32ToFloat64)              \
   V(ChangeUint32ToUint64)               \
   V(RoundFloat64ToInt32)                \
+  V(RoundInt32ToFloat32)                \
   V(Float64SilenceNaN)                  \
   V(Float64RoundDown)                   \
   V(Float64RoundUp)                     \
+  V(Float64RoundTiesEven)               \
   V(Float64RoundTruncate)               \
   V(Word32Clz)                          \
   V(Word32BinaryNot)
@@ -171,7 +175,7 @@
 // clients, CodeAssembler also provides an abstraction for creating variables
 // and enhanced Label functionality to merge variable values along paths where
 // they have differing values, including loops.
-class CodeAssembler {
+class V8_EXPORT_PRIVATE CodeAssembler {
  public:
   // Create with CallStub linkage.
   // |result_size| specifies the number of results returned by the stub.
@@ -191,6 +195,7 @@
   bool Is64() const;
   bool IsFloat64RoundUpSupported() const;
   bool IsFloat64RoundDownSupported() const;
+  bool IsFloat64RoundTiesEvenSupported() const;
   bool IsFloat64RoundTruncateSupported() const;
 
   class Label;
@@ -210,6 +215,8 @@
     CodeAssembler* assembler_;
   };
 
+  typedef ZoneList<Variable*> VariableList;
+
   // ===========================================================================
   // Base Assembler
   // ===========================================================================
@@ -220,6 +227,7 @@
   Node* IntPtrConstant(intptr_t value);
   Node* NumberConstant(double value);
   Node* SmiConstant(Smi* value);
+  Node* SmiConstant(int value);
   Node* HeapConstant(Handle<HeapObject> object);
   Node* BooleanConstant(bool value);
   Node* ExternalConstant(ExternalReference address);
@@ -228,10 +236,12 @@
 
   bool ToInt32Constant(Node* node, int32_t& out_value);
   bool ToInt64Constant(Node* node, int64_t& out_value);
+  bool ToSmiConstant(Node* node, Smi*& out_value);
   bool ToIntPtrConstant(Node* node, intptr_t& out_value);
 
   Node* Parameter(int value);
   void Return(Node* value);
+  void PopAndReturn(Node* pop, Node* value);
 
   void DebugBreak();
   void Comment(const char* format, ...);
@@ -400,6 +410,10 @@
   Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
                      Node* arg2, Node* arg3, Node* arg4,
                      size_t result_size = 1);
+  Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
+                     Node* arg2, Node* arg3, Node* arg4, Node* arg5,
+                     size_t result_size = 1);
+
   Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
                      Node* context, Node* arg1, size_t result_size = 1);
   Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
@@ -414,6 +428,10 @@
   Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
                      Node* context, Node* arg1, Node* arg2, Node* arg3,
                      Node* arg4, Node* arg5, size_t result_size = 1);
+  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                     Node* context, Node* arg1, Node* arg2, Node* arg3,
+                     Node* arg4, Node* arg5, Node* arg6,
+                     size_t result_size = 1);
 
   Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
                      Node* context, const Arg& arg1, const Arg& arg2,
@@ -432,6 +450,9 @@
                Node* receiver, Node* arg1, size_t result_size = 1);
   Node* CallJS(Callable const& callable, Node* context, Node* function,
                Node* receiver, Node* arg1, Node* arg2, size_t result_size = 1);
+  Node* CallJS(Callable const& callable, Node* context, Node* function,
+               Node* receiver, Node* arg1, Node* arg2, Node* arg3,
+               size_t result_size = 1);
 
   // Call to a C function with two arguments.
   Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
@@ -442,16 +463,6 @@
   void GotoIfException(Node* node, Label* if_exception,
                        Variable* exception_var = nullptr);
 
-  // Branching helpers.
-  void BranchIf(Node* condition, Label* if_true, Label* if_false);
-
-#define BRANCH_HELPER(name)                                                \
-  void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \
-    BranchIf(name(a, b), if_true, if_false);                               \
-  }
-  CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER)
-#undef BRANCH_HELPER
-
   // Helpers which delegate to RawMachineAssembler.
   Factory* factory() const;
   Isolate* isolate() const;
@@ -486,12 +497,15 @@
       CodeAssembler* assembler,
       CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
       : CodeAssembler::Label(assembler, 0, nullptr, type) {}
+  Label(CodeAssembler* assembler, const VariableList& merged_variables,
+        CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
+      : CodeAssembler::Label(assembler, merged_variables.length(),
+                             &(merged_variables[0]), type) {}
+  Label(CodeAssembler* assembler, size_t count, Variable** vars,
+        CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred);
   Label(CodeAssembler* assembler, CodeAssembler::Variable* merged_variable,
         CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
-      : CodeAssembler::Label(assembler, 1, &merged_variable, type) {}
-  Label(CodeAssembler* assembler, int merged_variable_count,
-        CodeAssembler::Variable** merged_variables,
-        CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred);
+      : Label(assembler, 1, &merged_variable, type) {}
   ~Label() {}
 
  private:
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index 043582b..c69e86e 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -88,10 +88,11 @@
 
   // Define deoptimization literals for all inlined functions.
   DCHECK_EQ(0u, deoptimization_literals_.size());
-  for (const CompilationInfo::InlinedFunctionHolder& inlined :
+  for (CompilationInfo::InlinedFunctionHolder& inlined :
        info->inlined_functions()) {
     if (!inlined.shared_info.is_identical_to(info->shared_info())) {
-      DefineDeoptimizationLiteral(inlined.shared_info);
+      int index = DefineDeoptimizationLiteral(inlined.shared_info);
+      inlined.RegisterInlinedFunctionId(index);
     }
   }
   inlined_function_count_ = deoptimization_literals_.size();
@@ -469,29 +470,19 @@
 
 
 void CodeGenerator::AssembleSourcePosition(Instruction* instr) {
-  SourcePosition source_position;
+  SourcePosition source_position = SourcePosition::Unknown();
   if (!code()->GetSourcePosition(instr, &source_position)) return;
   if (source_position == current_source_position_) return;
   current_source_position_ = source_position;
-  if (source_position.IsUnknown()) return;
-  int code_pos = source_position.raw();
-  source_position_table_builder_.AddPosition(masm()->pc_offset(), code_pos,
-                                             false);
+  if (!source_position.IsKnown()) return;
+  source_position_table_builder_.AddPosition(masm()->pc_offset(),
+                                             source_position, false);
   if (FLAG_code_comments) {
     CompilationInfo* info = this->info();
     if (!info->parse_info()) return;
-    Vector<char> buffer = Vector<char>::New(256);
-    int ln = Script::GetLineNumber(info->script(), code_pos);
-    int cn = Script::GetColumnNumber(info->script(), code_pos);
-    if (info->script()->name()->IsString()) {
-      Handle<String> file(String::cast(info->script()->name()));
-      base::OS::SNPrintF(buffer.start(), buffer.length(), "-- %s:%d:%d --",
-                         file->ToCString().get(), ln, cn);
-    } else {
-      base::OS::SNPrintF(buffer.start(), buffer.length(),
-                         "-- <unknown>:%d:%d --", ln, cn);
-    }
-    masm()->RecordComment(buffer.start());
+    std::ostringstream buffer;
+    buffer << "-- " << source_position.InliningStack(info) << " --";
+    masm()->RecordComment(StrDup(buffer.str().c_str()));
   }
 }
 
@@ -516,6 +507,26 @@
   }
 }
 
+namespace {
+
+Handle<PodArray<InliningPosition>> CreateInliningPositions(
+    CompilationInfo* info) {
+  const CompilationInfo::InlinedFunctionList& inlined_functions =
+      info->inlined_functions();
+  if (inlined_functions.size() == 0) {
+    return Handle<PodArray<InliningPosition>>::cast(
+        info->isolate()->factory()->empty_byte_array());
+  }
+  Handle<PodArray<InliningPosition>> inl_positions =
+      PodArray<InliningPosition>::New(
+          info->isolate(), static_cast<int>(inlined_functions.size()), TENURED);
+  for (size_t i = 0; i < inlined_functions.size(); ++i) {
+    inl_positions->set(static_cast<int>(i), inlined_functions[i].position);
+  }
+  return inl_positions;
+}
+
+}  // namespace
 
 void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
   CompilationInfo* info = this->info();
@@ -535,7 +546,7 @@
   if (info->has_shared_info()) {
     data->SetSharedFunctionInfo(*info->shared_info());
   } else {
-    data->SetSharedFunctionInfo(Smi::FromInt(0));
+    data->SetSharedFunctionInfo(Smi::kZero);
   }
 
   Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(
@@ -548,6 +559,9 @@
     data->SetLiteralArray(*literals);
   }
 
+  Handle<PodArray<InliningPosition>> inl_pos = CreateInliningPositions(info);
+  data->SetInliningPositions(*inl_pos);
+
   if (info->is_osr()) {
     DCHECK(osr_pc_offset_ >= 0);
     data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
@@ -565,7 +579,7 @@
     CHECK(deoptimization_states_[i]);
     data->SetTranslationIndex(
         i, Smi::FromInt(deoptimization_states_[i]->translation_id()));
-    data->SetArgumentsStackHeight(i, Smi::FromInt(0));
+    data->SetArgumentsStackHeight(i, Smi::kZero);
     data->SetPc(i, Smi::FromInt(deoptimization_state->pc_offset()));
   }
 
@@ -858,10 +872,19 @@
           constant_object =
               handle(reinterpret_cast<Smi*>(constant.ToInt32()), isolate());
           DCHECK(constant_object->IsSmi());
+        } else if (type.representation() == MachineRepresentation::kBit) {
+          if (constant.ToInt32() == 0) {
+            constant_object = isolate()->factory()->false_value();
+          } else {
+            DCHECK_EQ(1, constant.ToInt32());
+            constant_object = isolate()->factory()->true_value();
+          }
         } else {
+          // TODO(jarin,bmeurer): We currently pass in raw pointers to the
+          // JSFunction::entry here. We should really consider fixing this.
           DCHECK(type == MachineType::Int32() ||
                  type == MachineType::Uint32() ||
-                 type.representation() == MachineRepresentation::kBit ||
+                 type.representation() == MachineRepresentation::kWord32 ||
                  type.representation() == MachineRepresentation::kNone);
           DCHECK(type.representation() != MachineRepresentation::kNone ||
                  constant.ToInt32() == FrameStateDescriptor::kImpossibleValue);
@@ -873,7 +896,10 @@
       case Constant::kInt64:
         // When pointers are 8 bytes, we can use int64 constants to represent
         // Smis.
-        DCHECK(type.representation() == MachineRepresentation::kTagged ||
+        // TODO(jarin,bmeurer): We currently pass in raw pointers to the
+        // JSFunction::entry here. We should really consider fixing this.
+        DCHECK(type.representation() == MachineRepresentation::kWord64 ||
+               type.representation() == MachineRepresentation::kTagged ||
                type.representation() == MachineRepresentation::kTaggedSigned);
         DCHECK_EQ(8, kPointerSize);
         constant_object =
diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h
index 3032163..7aed85a 100644
--- a/src/compiler/code-generator.h
+++ b/src/compiler/code-generator.h
@@ -129,7 +129,7 @@
 
   // Generates an architecture-specific, descriptor-specific return sequence
   // to tear down a stack frame.
-  void AssembleReturn();
+  void AssembleReturn(InstructionOperand* pop);
 
   void AssembleDeconstructFrame();
 
diff --git a/src/compiler/common-node-cache.h b/src/compiler/common-node-cache.h
index 1f07703..bce8d0f 100644
--- a/src/compiler/common-node-cache.h
+++ b/src/compiler/common-node-cache.h
@@ -45,6 +45,10 @@
 
   Node** FindExternalConstant(ExternalReference value);
 
+  Node** FindPointerConstant(intptr_t value) {
+    return pointer_constants_.Find(zone(), value);
+  }
+
   Node** FindNumberConstant(double value) {
     // We canonicalize double constants at the bit representation level.
     return number_constants_.Find(zone(), bit_cast<int64_t>(value));
@@ -73,6 +77,7 @@
   Int32NodeCache float32_constants_;
   Int64NodeCache float64_constants_;
   IntPtrNodeCache external_constants_;
+  IntPtrNodeCache pointer_constants_;
   Int64NodeCache number_constants_;
   IntPtrNodeCache heap_constants_;
   RelocInt32NodeCache relocatable_int32_constants_;
diff --git a/src/compiler/common-operator-reducer.cc b/src/compiler/common-operator-reducer.cc
index c5ced20..9a36816 100644
--- a/src/compiler/common-operator-reducer.cc
+++ b/src/compiler/common-operator-reducer.cc
@@ -284,7 +284,7 @@
 
 Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
   DCHECK_EQ(IrOpcode::kReturn, node->opcode());
-  Node* const value = node->InputAt(0);
+  Node* const value = node->InputAt(1);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* const control = NodeProperties::GetControlInput(node);
   bool changed = false;
@@ -311,8 +311,9 @@
       // {end} as revisit, because we mark {node} as {Dead} below, which was
       // previously connected to {end}, so we know for sure that at some point
       // the reducer logic will visit {end} again.
-      Node* ret = graph()->NewNode(common()->Return(), value->InputAt(i),
-                                   effect->InputAt(i), control->InputAt(i));
+      Node* ret = graph()->NewNode(common()->Return(), node->InputAt(0),
+                                   value->InputAt(i), effect->InputAt(i),
+                                   control->InputAt(i));
       NodeProperties::MergeControlToEnd(graph(), common(), ret);
     }
     // Mark the merge {control} and return {node} as {dead}.
diff --git a/src/compiler/common-operator-reducer.h b/src/compiler/common-operator-reducer.h
index b7aeeb7..acc2092 100644
--- a/src/compiler/common-operator-reducer.h
+++ b/src/compiler/common-operator-reducer.h
@@ -5,7 +5,9 @@
 #ifndef V8_COMPILER_COMMON_OPERATOR_REDUCER_H_
 #define V8_COMPILER_COMMON_OPERATOR_REDUCER_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -19,7 +21,8 @@
 
 
 // Performs strength reduction on nodes that have common operators.
-class CommonOperatorReducer final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE CommonOperatorReducer final
+    : public NON_EXPORTED_BASE(AdvancedReducer) {
  public:
   CommonOperatorReducer(Editor* editor, Graph* graph,
                         CommonOperatorBuilder* common,
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
index e57160a..9ce6f71 100644
--- a/src/compiler/common-operator.cc
+++ b/src/compiler/common-operator.cc
@@ -210,6 +210,37 @@
   return os;
 }
 
+int OsrValueIndexOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kOsrValue, op->opcode());
+  return OpParameter<int>(op);
+}
+
+size_t hash_value(OsrGuardType type) { return static_cast<size_t>(type); }
+
+std::ostream& operator<<(std::ostream& os, OsrGuardType type) {
+  switch (type) {
+    case OsrGuardType::kUninitialized:
+      return os << "Uninitialized";
+    case OsrGuardType::kSignedSmall:
+      return os << "SignedSmall";
+    case OsrGuardType::kAny:
+      return os << "Any";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+OsrGuardType OsrGuardTypeOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kOsrGuard, op->opcode());
+  return OpParameter<OsrGuardType>(op);
+}
+
+ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
+  DCHECK(op->opcode() == IrOpcode::kTypedObjectState ||
+         op->opcode() == IrOpcode::kTypedStateValues);
+  return OpParameter<const ZoneVector<MachineType>*>(op);
+}
+
 #define CACHED_OP_LIST(V)                                                     \
   V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1)                              \
   V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1)                             \
@@ -231,8 +262,8 @@
 #define CACHED_RETURN_LIST(V) \
   V(1)                        \
   V(2)                        \
-  V(3)
-
+  V(3)                        \
+  V(4)
 
 #define CACHED_END_LIST(V) \
   V(1)                     \
@@ -293,7 +324,7 @@
   V(LostPrecisionOrNaN)                  \
   V(NoReason)                            \
   V(NotAHeapNumber)                      \
-  V(NotAHeapNumberUndefinedBoolean)      \
+  V(NotANumberOrOddball)                 \
   V(NotASmi)                             \
   V(OutOfBounds)                         \
   V(WrongInstanceType)                   \
@@ -371,16 +402,16 @@
   CACHED_END_LIST(CACHED_END)
 #undef CACHED_END
 
-  template <size_t kInputCount>
+  template <size_t kValueInputCount>
   struct ReturnOperator final : public Operator {
     ReturnOperator()
-        : Operator(                                   // --
-              IrOpcode::kReturn, Operator::kNoThrow,  // opcode
-              "Return",                               // name
-              kInputCount, 1, 1, 0, 0, 1) {}          // counts
+        : Operator(                                    // --
+              IrOpcode::kReturn, Operator::kNoThrow,   // opcode
+              "Return",                                // name
+              kValueInputCount + 1, 1, 1, 0, 0, 1) {}  // counts
   };
-#define CACHED_RETURN(input_count) \
-  ReturnOperator<input_count> kReturn##input_count##Operator;
+#define CACHED_RETURN(value_input_count) \
+  ReturnOperator<value_input_count> kReturn##value_input_count##Operator;
   CACHED_RETURN_LIST(CACHED_RETURN)
 #undef CACHED_RETURN
 
@@ -607,7 +638,6 @@
       0, 0, control_input_count, 0, 0, 0);  // counts
 }
 
-
 const Operator* CommonOperatorBuilder::Return(int value_input_count) {
   switch (value_input_count) {
 #define CACHED_RETURN(input_count) \
@@ -622,7 +652,7 @@
   return new (zone()) Operator(               //--
       IrOpcode::kReturn, Operator::kNoThrow,  // opcode
       "Return",                               // name
-      value_input_count, 1, 1, 0, 0, 1);      // counts
+      value_input_count + 1, 1, 1, 0, 0, 1);  // counts
 }
 
 
@@ -780,7 +810,6 @@
       ParameterInfo(index, debug_name));         // parameter info
 }
 
-
 const Operator* CommonOperatorBuilder::OsrValue(int index) {
   return new (zone()) Operator1<int>(                // --
       IrOpcode::kOsrValue, Operator::kNoProperties,  // opcode
@@ -789,6 +818,13 @@
       index);                                        // parameter
 }
 
+const Operator* CommonOperatorBuilder::OsrGuard(OsrGuardType type) {
+  return new (zone()) Operator1<OsrGuardType>(  // --
+      IrOpcode::kOsrGuard, Operator::kNoThrow,  // opcode
+      "OsrGuard",                               // name
+      1, 1, 1, 1, 1, 0,                         // counts
+      type);                                    // parameter
+}
 
 const Operator* CommonOperatorBuilder::Int32Constant(int32_t value) {
   return new (zone()) Operator1<int32_t>(         // --
@@ -844,6 +880,13 @@
       value);                                      // parameter
 }
 
+const Operator* CommonOperatorBuilder::PointerConstant(intptr_t value) {
+  return new (zone()) Operator1<intptr_t>(          // --
+      IrOpcode::kPointerConstant, Operator::kPure,  // opcode
+      "PointerConstant",                            // name
+      0, 0, 0, 1, 0, 0,                             // counts
+      value);                                       // parameter
+}
 
 const Operator* CommonOperatorBuilder::HeapConstant(
     const Handle<HeapObject>& value) {
@@ -974,23 +1017,31 @@
       arguments, 0, 0, 1, 0, 0);                // counts
 }
 
-
-const Operator* CommonOperatorBuilder::ObjectState(int pointer_slots, int id) {
-  return new (zone()) Operator1<int>(           // --
-      IrOpcode::kObjectState, Operator::kPure,  // opcode
-      "ObjectState",                            // name
-      pointer_slots, 0, 0, 1, 0, 0, id);        // counts
-}
-
-
 const Operator* CommonOperatorBuilder::TypedStateValues(
     const ZoneVector<MachineType>* types) {
   return new (zone()) Operator1<const ZoneVector<MachineType>*>(  // --
       IrOpcode::kTypedStateValues, Operator::kPure,               // opcode
       "TypedStateValues",                                         // name
-      static_cast<int>(types->size()), 0, 0, 1, 0, 0, types);     // counts
+      static_cast<int>(types->size()), 0, 0, 1, 0, 0,             // counts
+      types);                                                     // parameter
 }
 
+const Operator* CommonOperatorBuilder::ObjectState(int pointer_slots) {
+  return new (zone()) Operator1<int>(           // --
+      IrOpcode::kObjectState, Operator::kPure,  // opcode
+      "ObjectState",                            // name
+      pointer_slots, 0, 0, 1, 0, 0,             // counts
+      pointer_slots);                           // parameter
+}
+
+const Operator* CommonOperatorBuilder::TypedObjectState(
+    const ZoneVector<MachineType>* types) {
+  return new (zone()) Operator1<const ZoneVector<MachineType>*>(  // --
+      IrOpcode::kTypedObjectState, Operator::kPure,               // opcode
+      "TypedObjectState",                                         // name
+      static_cast<int>(types->size()), 0, 0, 1, 0, 0,             // counts
+      types);                                                     // parameter
+}
 
 const Operator* CommonOperatorBuilder::FrameState(
     BailoutId bailout_id, OutputFrameStateCombine state_combine,
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
index 2db0bfa..1f258a0 100644
--- a/src/compiler/common-operator.h
+++ b/src/compiler/common-operator.h
@@ -6,8 +6,10 @@
 #define V8_COMPILER_COMMON_OPERATOR_H_
 
 #include "src/assembler.h"
+#include "src/base/compiler-specific.h"
 #include "src/compiler/frame-states.h"
 #include "src/deoptimize-reason.h"
+#include "src/globals.h"
 #include "src/machine-type.h"
 #include "src/zone/zone-containers.h"
 
@@ -39,9 +41,9 @@
 
 inline size_t hash_value(BranchHint hint) { return static_cast<size_t>(hint); }
 
-std::ostream& operator<<(std::ostream&, BranchHint);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchHint);
 
-BranchHint BranchHintOf(const Operator* const);
+V8_EXPORT_PRIVATE BranchHint BranchHintOf(const Operator* const);
 
 // Deoptimize reason for Deoptimize, DeoptimizeIf and DeoptimizeUnless.
 DeoptimizeReason DeoptimizeReasonOf(Operator const* const);
@@ -98,14 +100,15 @@
 
 std::ostream& operator<<(std::ostream&, SelectParameters const& p);
 
-SelectParameters const& SelectParametersOf(const Operator* const);
+V8_EXPORT_PRIVATE SelectParameters const& SelectParametersOf(
+    const Operator* const);
 
-CallDescriptor const* CallDescriptorOf(const Operator* const);
+V8_EXPORT_PRIVATE CallDescriptor const* CallDescriptorOf(const Operator* const);
 
-size_t ProjectionIndexOf(const Operator* const);
+V8_EXPORT_PRIVATE size_t ProjectionIndexOf(const Operator* const);
 
-MachineRepresentation PhiRepresentationOf(const Operator* const);
-
+V8_EXPORT_PRIVATE MachineRepresentation
+PhiRepresentationOf(const Operator* const);
 
 // The {IrOpcode::kParameter} opcode represents an incoming parameter to the
 // function. This class bundles the index and a debug name for such operators.
@@ -124,7 +127,7 @@
 
 std::ostream& operator<<(std::ostream&, ParameterInfo const&);
 
-int ParameterIndexOf(const Operator* const);
+V8_EXPORT_PRIVATE int ParameterIndexOf(const Operator* const);
 const ParameterInfo& ParameterInfoOf(const Operator* const);
 
 class RelocatablePtrConstantInfo final {
@@ -171,9 +174,20 @@
 
 Type* TypeGuardTypeOf(Operator const*) WARN_UNUSED_RESULT;
 
+int OsrValueIndexOf(Operator const*);
+
+enum class OsrGuardType { kUninitialized, kSignedSmall, kAny };
+size_t hash_value(OsrGuardType type);
+std::ostream& operator<<(std::ostream&, OsrGuardType);
+OsrGuardType OsrGuardTypeOf(Operator const*);
+
+ZoneVector<MachineType> const* MachineTypesOf(Operator const*)
+    WARN_UNUSED_RESULT;
+
 // Interface for building common operators that can be used at any level of IR,
 // including JavaScript, mid-level, and low-level.
-class CommonOperatorBuilder final : public ZoneObject {
+class V8_EXPORT_PRIVATE CommonOperatorBuilder final
+    : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   explicit CommonOperatorBuilder(Zone* zone);
 
@@ -202,6 +216,7 @@
   const Operator* OsrNormalEntry();
   const Operator* OsrLoopEntry();
   const Operator* OsrValue(int index);
+  const Operator* OsrGuard(OsrGuardType type);
 
   const Operator* Int32Constant(int32_t);
   const Operator* Int64Constant(int64_t);
@@ -209,6 +224,7 @@
   const Operator* Float64Constant(volatile double);
   const Operator* ExternalConstant(const ExternalReference&);
   const Operator* NumberConstant(volatile double);
+  const Operator* PointerConstant(intptr_t);
   const Operator* HeapConstant(const Handle<HeapObject>&);
 
   const Operator* RelocatableInt32Constant(int32_t value,
@@ -228,8 +244,9 @@
   const Operator* BeginRegion(RegionObservability);
   const Operator* FinishRegion();
   const Operator* StateValues(int arguments);
-  const Operator* ObjectState(int pointer_slots, int id);
   const Operator* TypedStateValues(const ZoneVector<MachineType>* types);
+  const Operator* ObjectState(int pointer_slots);
+  const Operator* TypedObjectState(const ZoneVector<MachineType>* types);
   const Operator* FrameState(BailoutId bailout_id,
                              OutputFrameStateCombine state_combine,
                              const FrameStateFunctionInfo* function_info);
diff --git a/src/compiler/source-position.cc b/src/compiler/compiler-source-position-table.cc
similarity index 94%
rename from src/compiler/source-position.cc
rename to src/compiler/compiler-source-position-table.cc
index 80f1800..c5520e7 100644
--- a/src/compiler/source-position.cc
+++ b/src/compiler/compiler-source-position-table.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/compiler/source-position.h"
+#include "src/compiler/compiler-source-position-table.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/node-aux-data.h"
 
@@ -24,28 +24,24 @@
   SourcePositionTable* source_positions_;
 };
 
-
 SourcePositionTable::SourcePositionTable(Graph* graph)
     : graph_(graph),
       decorator_(nullptr),
       current_position_(SourcePosition::Unknown()),
       table_(graph->zone()) {}
 
-
 void SourcePositionTable::AddDecorator() {
   DCHECK_NULL(decorator_);
   decorator_ = new (graph_->zone()) Decorator(this);
   graph_->AddDecorator(decorator_);
 }
 
-
 void SourcePositionTable::RemoveDecorator() {
   DCHECK_NOT_NULL(decorator_);
   graph_->RemoveDecorator(decorator_);
   decorator_ = nullptr;
 }
 
-
 SourcePosition SourcePositionTable::GetSourcePosition(Node* node) const {
   return table_.Get(node);
 }
@@ -65,7 +61,7 @@
         os << ",";
       }
       os << "\"" << i.first << "\""
-         << ":" << pos.raw();
+         << ":" << pos.ScriptOffset();
       needs_comma = true;
     }
   }
diff --git a/src/compiler/source-position.h b/src/compiler/compiler-source-position-table.h
similarity index 62%
rename from src/compiler/source-position.h
rename to src/compiler/compiler-source-position-table.h
index d4df783..4d14ae2 100644
--- a/src/compiler/source-position.h
+++ b/src/compiler/compiler-source-position-table.h
@@ -2,43 +2,20 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_COMPILER_SOURCE_POSITION_H_
-#define V8_COMPILER_SOURCE_POSITION_H_
+#ifndef V8_COMPILER_COMPILER_SOURCE_POSITION_TABLE_H_
+#define V8_COMPILER_COMPILER_SOURCE_POSITION_TABLE_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/node-aux-data.h"
 #include "src/globals.h"
+#include "src/source-position.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-// Encapsulates encoding and decoding of sources positions from which Nodes
-// originated.
-class SourcePosition final {
- public:
-  explicit SourcePosition(int raw = kUnknownPosition) : raw_(raw) {}
-
-  static SourcePosition Unknown() { return SourcePosition(kUnknownPosition); }
-  bool IsUnknown() const { return raw() == kUnknownPosition; }
-  bool IsKnown() const { return raw() != kUnknownPosition; }
-
-  int raw() const { return raw_; }
-
- private:
-  static const int kUnknownPosition = kNoSourcePosition;
-  int raw_;
-};
-
-
-inline bool operator==(const SourcePosition& lhs, const SourcePosition& rhs) {
-  return lhs.raw() == rhs.raw();
-}
-
-inline bool operator!=(const SourcePosition& lhs, const SourcePosition& rhs) {
-  return !(lhs == rhs);
-}
-
-class SourcePositionTable final : public ZoneObject {
+class V8_EXPORT_PRIVATE SourcePositionTable final
+    : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   class Scope final {
    public:
@@ -72,6 +49,10 @@
   SourcePosition GetSourcePosition(Node* node) const;
   void SetSourcePosition(Node* node, SourcePosition position);
 
+  void SetCurrentPosition(const SourcePosition& pos) {
+    current_position_ = pos;
+  }
+
   void Print(std::ostream& os) const;
 
  private:
@@ -80,7 +61,7 @@
   Graph* const graph_;
   Decorator* decorator_;
   SourcePosition current_position_;
-  NodeAuxData<SourcePosition> table_;
+  NodeAuxData<SourcePosition, SourcePosition::Unknown> table_;
 
   DISALLOW_COPY_AND_ASSIGN(SourcePositionTable);
 };
@@ -89,4 +70,4 @@
 }  // namespace internal
 }  // namespace v8
 
-#endif  // V8_COMPILER_SOURCE_POSITION_H_
+#endif  // V8_COMPILER_COMPILER_SOURCE_POSITION_TABLE_H_
diff --git a/src/compiler/control-equivalence.h b/src/compiler/control-equivalence.h
index 4fb9c27..05777d7 100644
--- a/src/compiler/control-equivalence.h
+++ b/src/compiler/control-equivalence.h
@@ -5,8 +5,10 @@
 #ifndef V8_COMPILER_CONTROL_EQUIVALENCE_H_
 #define V8_COMPILER_CONTROL_EQUIVALENCE_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/node.h"
+#include "src/globals.h"
 #include "src/zone/zone-containers.h"
 
 namespace v8 {
@@ -28,7 +30,8 @@
 // control regions in linear time" by Johnson, Pearson & Pingali (PLDI94) which
 // also contains proofs for the aforementioned equivalence. References to line
 // numbers in the algorithm from figure 4 have been added [line:x].
-class ControlEquivalence final : public ZoneObject {
+class V8_EXPORT_PRIVATE ControlEquivalence final
+    : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   ControlEquivalence(Zone* zone, Graph* graph)
       : zone_(zone),
diff --git a/src/compiler/control-flow-optimizer.h b/src/compiler/control-flow-optimizer.h
index 61785a0..577c40d 100644
--- a/src/compiler/control-flow-optimizer.h
+++ b/src/compiler/control-flow-optimizer.h
@@ -6,6 +6,7 @@
 #define V8_COMPILER_CONTROL_FLOW_OPTIMIZER_H_
 
 #include "src/compiler/node-marker.h"
+#include "src/globals.h"
 #include "src/zone/zone-containers.h"
 
 namespace v8 {
@@ -18,8 +19,7 @@
 class MachineOperatorBuilder;
 class Node;
 
-
-class ControlFlowOptimizer final {
+class V8_EXPORT_PRIVATE ControlFlowOptimizer final {
  public:
   ControlFlowOptimizer(Graph* graph, CommonOperatorBuilder* common,
                        MachineOperatorBuilder* machine, Zone* zone);
diff --git a/src/compiler/dead-code-elimination.h b/src/compiler/dead-code-elimination.h
index 8e18561..1cf9b22 100644
--- a/src/compiler/dead-code-elimination.h
+++ b/src/compiler/dead-code-elimination.h
@@ -5,7 +5,9 @@
 #ifndef V8_COMPILER_DEAD_CODE_ELIMINATION_H_
 #define V8_COMPILER_DEAD_CODE_ELIMINATION_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -19,7 +21,8 @@
 // Note that this does not include trimming dead uses from the graph, and it
 // also does not include detecting dead code by any other means than seeing a
 // {Dead} control input; that is left to other reducers.
-class DeadCodeElimination final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE DeadCodeElimination final
+    : public NON_EXPORTED_BASE(AdvancedReducer) {
  public:
   DeadCodeElimination(Editor* editor, Graph* graph,
                       CommonOperatorBuilder* common);
diff --git a/src/compiler/effect-control-linearizer.cc b/src/compiler/effect-control-linearizer.cc
index 4e53e5d..d4b0576 100644
--- a/src/compiler/effect-control-linearizer.cc
+++ b/src/compiler/effect-control-linearizer.cc
@@ -6,6 +6,7 @@
 
 #include "src/code-factory.h"
 #include "src/compiler/access-builder.h"
+#include "src/compiler/compiler-source-position-table.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/node-matchers.h"
@@ -17,10 +18,13 @@
 namespace internal {
 namespace compiler {
 
-EffectControlLinearizer::EffectControlLinearizer(JSGraph* js_graph,
-                                                 Schedule* schedule,
-                                                 Zone* temp_zone)
-    : js_graph_(js_graph), schedule_(schedule), temp_zone_(temp_zone) {}
+EffectControlLinearizer::EffectControlLinearizer(
+    JSGraph* js_graph, Schedule* schedule, Zone* temp_zone,
+    SourcePositionTable* source_positions)
+    : js_graph_(js_graph),
+      schedule_(schedule),
+      temp_zone_(temp_zone),
+      source_positions_(source_positions) {}
 
 Graph* EffectControlLinearizer::graph() const { return js_graph_->graph(); }
 CommonOperatorBuilder* EffectControlLinearizer::common() const {
@@ -74,7 +78,8 @@
   // Update all inputs to an effect phi with the effects from the given
   // block->effect map.
   DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
-  DCHECK_EQ(node->op()->EffectInputCount(), block->PredecessorCount());
+  DCHECK_EQ(static_cast<size_t>(node->op()->EffectInputCount()),
+            block->PredecessorCount());
   for (int i = 0; i < node->op()->EffectInputCount(); i++) {
     Node* input = node->InputAt(i);
     BasicBlock* predecessor = block->PredecessorAt(static_cast<size_t>(i));
@@ -96,8 +101,10 @@
 
   // Update all inputs to the given control node with the correct control.
   DCHECK(control->opcode() == IrOpcode::kMerge ||
-         control->op()->ControlInputCount() == block->PredecessorCount());
-  if (control->op()->ControlInputCount() != block->PredecessorCount()) {
+         static_cast<size_t>(control->op()->ControlInputCount()) ==
+             block->PredecessorCount());
+  if (static_cast<size_t>(control->op()->ControlInputCount()) !=
+      block->PredecessorCount()) {
     return;  // We already re-wired the control inputs of this node.
   }
   for (int i = 0; i < control->op()->ControlInputCount(); i++) {
@@ -141,7 +148,8 @@
 
 void TryCloneBranch(Node* node, BasicBlock* block, Graph* graph,
                     CommonOperatorBuilder* common,
-                    BlockEffectControlMap* block_effects) {
+                    BlockEffectControlMap* block_effects,
+                    SourcePositionTable* source_positions) {
   DCHECK_EQ(IrOpcode::kBranch, node->opcode());
 
   // This optimization is a special case of (super)block cloning. It takes an
@@ -193,6 +201,8 @@
   //       ^                   ^
   //       |                   |
 
+  SourcePositionTable::Scope scope(source_positions,
+                                   source_positions->GetSourcePosition(node));
   Node* branch = node;
   Node* cond = NodeProperties::GetValueInput(branch, 0);
   if (!cond->OwnedBy(branch) || cond->opcode() != IrOpcode::kPhi) return;
@@ -246,7 +256,7 @@
     merge_true->AppendInput(graph->zone(), merge_true_inputs[i]);
     merge_false->AppendInput(graph->zone(), merge_false_inputs[i]);
   }
-  DCHECK_EQ(2, block->SuccessorCount());
+  DCHECK_EQ(2u, block->SuccessorCount());
   NodeProperties::ChangeOp(matcher.IfTrue(), common->Merge(input_count));
   NodeProperties::ChangeOp(matcher.IfFalse(), common->Merge(input_count));
   int const true_index =
@@ -445,7 +455,7 @@
       case BasicBlock::kBranch:
         ProcessNode(block->control_input(), &frame_state, &effect, &control);
         TryCloneBranch(block->control_input(), block, graph(), common(),
-                       &block_effects);
+                       &block_effects, source_positions_);
         break;
     }
 
@@ -491,6 +501,9 @@
 
 void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
                                           Node** effect, Node** control) {
+  SourcePositionTable::Scope scope(source_positions_,
+                                   source_positions_->GetSourcePosition(node));
+
   // If the node needs to be wired into the effect/control chain, do this
   // here. Pass current frame state for lowering to eager deoptimization.
   if (TryWireInStateEffect(node, *frame_state, effect, control)) {
@@ -600,6 +613,9 @@
     case IrOpcode::kChangeFloat64ToTagged:
       state = LowerChangeFloat64ToTagged(node, *effect, *control);
       break;
+    case IrOpcode::kChangeFloat64ToTaggedPointer:
+      state = LowerChangeFloat64ToTaggedPointer(node, *effect, *control);
+      break;
     case IrOpcode::kChangeTaggedSignedToInt32:
       state = LowerChangeTaggedSignedToInt32(node, *effect, *control);
       break;
@@ -636,9 +652,6 @@
     case IrOpcode::kCheckIf:
       state = LowerCheckIf(node, frame_state, *effect, *control);
       break;
-    case IrOpcode::kCheckHeapObject:
-      state = LowerCheckHeapObject(node, frame_state, *effect, *control);
-      break;
     case IrOpcode::kCheckedInt32Add:
       state = LowerCheckedInt32Add(node, frame_state, *effect, *control);
       break;
@@ -688,6 +701,10 @@
       state = LowerCheckedTaggedToTaggedSigned(node, frame_state, *effect,
                                                *control);
       break;
+    case IrOpcode::kCheckedTaggedToTaggedPointer:
+      state = LowerCheckedTaggedToTaggedPointer(node, frame_state, *effect,
+                                                *control);
+      break;
     case IrOpcode::kTruncateTaggedToWord32:
       state = LowerTruncateTaggedToWord32(node, *effect, *control);
       break;
@@ -776,6 +793,9 @@
     case IrOpcode::kFloat64RoundTruncate:
       state = LowerFloat64RoundTruncate(node, *effect, *control);
       break;
+    case IrOpcode::kFloat64RoundTiesEven:
+      state = LowerFloat64RoundTiesEven(node, *effect, *control);
+      break;
     default:
       return false;
   }
@@ -793,6 +813,14 @@
 }
 
 EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeFloat64ToTaggedPointer(Node* node,
+                                                           Node* effect,
+                                                           Node* control) {
+  Node* value = node->InputAt(0);
+  return AllocateHeapNumberWithValue(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::LowerChangeBitToTagged(Node* node, Node* effect,
                                                 Node* control) {
   Node* value = node->InputAt(0);
@@ -901,15 +929,14 @@
 EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node, Node* effect,
                                                   Node* control) {
   Node* value = node->InputAt(0);
-  Node* one = jsgraph()->Int32Constant(1);
   Node* zero = jsgraph()->Int32Constant(0);
   Node* fzero = jsgraph()->Float64Constant(0.0);
 
   // Collect effect/control/value triples.
   int count = 0;
-  Node* values[7];
-  Node* effects[7];
-  Node* controls[6];
+  Node* values[6];
+  Node* effects[6];
+  Node* controls[5];
 
   // Check if {value} is a Smi.
   Node* check_smi = ObjectIsSmi(value);
@@ -925,7 +952,7 @@
     values[count] =
         graph()->NewNode(machine()->Word32Equal(),
                          graph()->NewNode(machine()->WordEqual(), value,
-                                          jsgraph()->ZeroConstant()),
+                                          jsgraph()->IntPtrConstant(0)),
                          zero);
     count++;
   }
@@ -978,7 +1005,7 @@
     values[count] =
         graph()->NewNode(machine()->Word32Equal(),
                          graph()->NewNode(machine()->WordEqual(), value_length,
-                                          jsgraph()->ZeroConstant()),
+                                          jsgraph()->IntPtrConstant(0)),
                          zero);
     count++;
   }
@@ -1000,20 +1027,12 @@
         simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
         eheapnumber, if_heapnumber);
 
-    // Check if {value} is either less than 0.0 or greater than 0.0.
-    Node* check =
-        graph()->NewNode(machine()->Float64LessThan(), fzero, value_value);
-    Node* branch = graph()->NewNode(common()->Branch(), check, if_heapnumber);
-
-    controls[count] = graph()->NewNode(common()->IfTrue(), branch);
+    // Check if {value} is not one of 0, -0, or NaN.
+    controls[count] = if_heapnumber;
     effects[count] = eheapnumber;
-    values[count] = one;
-    count++;
-
-    controls[count] = graph()->NewNode(common()->IfFalse(), branch);
-    effects[count] = eheapnumber;
-    values[count] =
-        graph()->NewNode(machine()->Float64LessThan(), value_value, fzero);
+    values[count] = graph()->NewNode(
+        machine()->Float64LessThan(), fzero,
+        graph()->NewNode(machine()->Float64Abs(), value_value));
     count++;
   }
   control = graph()->NewNode(common()->IfFalse(), branch_heapnumber);
@@ -1274,19 +1293,6 @@
 }
 
 EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckHeapObject(Node* node, Node* frame_state,
-                                              Node* effect, Node* control) {
-  Node* value = node->InputAt(0);
-
-  Node* check = ObjectIsSmi(value);
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kSmi), check,
-                       frame_state, effect, control);
-
-  return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::LowerCheckedInt32Add(Node* node, Node* frame_state,
                                               Node* effect, Node* control) {
   Node* lhs = node->InputAt(0);
@@ -1818,8 +1824,7 @@
           graph()->NewNode(machine()->Word32Equal(), instance_type,
                            jsgraph()->Int32Constant(ODDBALL_TYPE));
       if_false = efalse = graph()->NewNode(
-          common()->DeoptimizeUnless(
-              DeoptimizeReason::kNotAHeapNumberUndefinedBoolean),
+          common()->DeoptimizeUnless(DeoptimizeReason::kNotANumberOrOddball),
           check_oddball, frame_state, efalse, if_false);
       STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
 
@@ -1884,6 +1889,21 @@
 }
 
 EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(Node* node,
+                                                           Node* frame_state,
+                                                           Node* effect,
+                                                           Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check = ObjectIsSmi(value);
+  control = effect =
+      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kSmi), check,
+                       frame_state, effect, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node, Node* effect,
                                                      Node* control) {
   Node* value = node->InputAt(0);
@@ -2447,104 +2467,63 @@
   Node* branch0 =
       graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* etrue0 = effect;
-  Node* vtrue0;
-  {
-    // Load the isolate wide single character string cache.
-    Node* cache =
-        jsgraph()->HeapConstant(factory()->single_character_string_cache());
-
-    // Compute the {cache} index for {code}.
-    Node* index =
-        machine()->Is32() ? code : graph()->NewNode(
-                                       machine()->ChangeUint32ToUint64(), code);
-
-    // Check if we have an entry for the {code} in the single character string
-    // cache already.
-    Node* entry = etrue0 = graph()->NewNode(
-        simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()), cache,
-        index, etrue0, if_true0);
-
-    Node* check1 = graph()->NewNode(machine()->WordEqual(), entry,
-                                    jsgraph()->UndefinedConstant());
-    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                     check1, if_true0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1 = etrue0;
-    Node* vtrue1;
-    {
-      // Allocate a new SeqOneByteString for {code}.
-      vtrue1 = etrue1 = graph()->NewNode(
-          simplified()->Allocate(NOT_TENURED),
-          jsgraph()->Int32Constant(SeqOneByteString::SizeFor(1)), etrue1,
-          if_true1);
-      etrue1 = graph()->NewNode(
-          simplified()->StoreField(AccessBuilder::ForMap()), vtrue1,
-          jsgraph()->HeapConstant(factory()->one_byte_string_map()), etrue1,
-          if_true1);
-      etrue1 = graph()->NewNode(
-          simplified()->StoreField(AccessBuilder::ForNameHashField()), vtrue1,
-          jsgraph()->IntPtrConstant(Name::kEmptyHashField), etrue1, if_true1);
-      etrue1 = graph()->NewNode(
-          simplified()->StoreField(AccessBuilder::ForStringLength()), vtrue1,
-          jsgraph()->SmiConstant(1), etrue1, if_true1);
-      etrue1 = graph()->NewNode(
-          machine()->Store(StoreRepresentation(MachineRepresentation::kWord8,
-                                               kNoWriteBarrier)),
-          vtrue1, jsgraph()->IntPtrConstant(SeqOneByteString::kHeaderSize -
-                                            kHeapObjectTag),
-          code, etrue1, if_true1);
-
-      // Remember it in the {cache}.
-      etrue1 = graph()->NewNode(
-          simplified()->StoreElement(AccessBuilder::ForFixedArrayElement()),
-          cache, index, vtrue1, etrue1, if_true1);
-    }
-
-    // Use the {entry} from the {cache}.
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1 = etrue0;
-    Node* vfalse1 = entry;
-
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    etrue0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
-    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                              vtrue1, vfalse1, if_true0);
-  }
-
   Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
   Node* efalse0 = effect;
-  Node* vfalse0;
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* etrue0 = effect;
+
+  // Load the isolate wide single character string cache.
+  Node* cache =
+      jsgraph()->HeapConstant(factory()->single_character_string_cache());
+
+  // Compute the {cache} index for {code}.
+  Node* index = machine()->Is32()
+                    ? code
+                    : graph()->NewNode(machine()->ChangeUint32ToUint64(), code);
+
+  // Check if we have an entry for the {code} in the single character string
+  // cache already.
+  Node* entry = etrue0 = graph()->NewNode(
+      simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()), cache,
+      index, etrue0, if_true0);
+
+  Node* check1 = graph()->NewNode(machine()->WordEqual(), entry,
+                                  jsgraph()->UndefinedConstant());
+  Node* branch1 =
+      graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, if_true0);
+
+  // Use the {entry} from the {cache}.
+  Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+  Node* efalse1 = etrue0;
+  Node* vfalse1 = entry;
+
+  // Let %StringFromCharCode handle this case.
+  // TODO(turbofan): At some point we may consider adding a stub for this
+  // deferred case, so that we don't need to call to C++ here.
+  Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+  Node* etrue1 = etrue0;
+  Node* vtrue1;
   {
-    // Allocate a new SeqTwoByteString for {code}.
-    vfalse0 = efalse0 =
-        graph()->NewNode(simplified()->Allocate(NOT_TENURED),
-                         jsgraph()->Int32Constant(SeqTwoByteString::SizeFor(1)),
-                         efalse0, if_false0);
-    efalse0 = graph()->NewNode(
-        simplified()->StoreField(AccessBuilder::ForMap()), vfalse0,
-        jsgraph()->HeapConstant(factory()->string_map()), efalse0, if_false0);
-    efalse0 = graph()->NewNode(
-        simplified()->StoreField(AccessBuilder::ForNameHashField()), vfalse0,
-        jsgraph()->IntPtrConstant(Name::kEmptyHashField), efalse0, if_false0);
-    efalse0 = graph()->NewNode(
-        simplified()->StoreField(AccessBuilder::ForStringLength()), vfalse0,
-        jsgraph()->SmiConstant(1), efalse0, if_false0);
-    efalse0 = graph()->NewNode(
-        machine()->Store(StoreRepresentation(MachineRepresentation::kWord16,
-                                             kNoWriteBarrier)),
-        vfalse0, jsgraph()->IntPtrConstant(SeqTwoByteString::kHeaderSize -
-                                           kHeapObjectTag),
-        code, efalse0, if_false0);
+    if_true1 = graph()->NewNode(common()->Merge(2), if_true1, if_false0);
+    etrue1 =
+        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse0, if_true1);
+    Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+    Runtime::FunctionId id = Runtime::kStringCharFromCode;
+    CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+        graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
+    vtrue1 = etrue1 = graph()->NewNode(
+        common()->Call(desc), jsgraph()->CEntryStubConstant(1),
+        ChangeInt32ToSmi(code),
+        jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
+        jsgraph()->Int32Constant(1), jsgraph()->NoContextConstant(), etrue1,
+        if_true1);
   }
 
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+  control = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, control);
   value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                           vtrue0, vfalse0, control);
+                           vtrue1, vfalse1, control);
 
   return ValueEffectControl(value, effect, control);
 }
@@ -3412,6 +3391,137 @@
 }
 
 EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::BuildFloat64RoundDown(Node* value, Node* effect,
+                                               Node* control) {
+  if (machine()->Float64RoundDown().IsSupported()) {
+    value = graph()->NewNode(machine()->Float64RoundDown().op(), value);
+  } else {
+    Node* const one = jsgraph()->Float64Constant(1.0);
+    Node* const zero = jsgraph()->Float64Constant(0.0);
+    Node* const minus_one = jsgraph()->Float64Constant(-1.0);
+    Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
+    Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
+    Node* const minus_two_52 =
+        jsgraph()->Float64Constant(-4503599627370496.0E0);
+    Node* const input = value;
+
+    // General case for floor.
+    //
+    //   if 0.0 < input then
+    //     if 2^52 <= input then
+    //       input
+    //     else
+    //       let temp1 = (2^52 + input) - 2^52 in
+    //       if input < temp1 then
+    //         temp1 - 1
+    //       else
+    //         temp1
+    //   else
+    //     if input == 0 then
+    //       input
+    //     else
+    //       if input <= -2^52 then
+    //         input
+    //       else
+    //         let temp1 = -0 - input in
+    //         let temp2 = (2^52 + temp1) - 2^52 in
+    //         if temp2 < temp1 then
+    //           -1 - temp2
+    //         else
+    //           -0 - temp2
+    //
+    // Note: We do not use the Diamond helper class here, because it really
+    // hurts
+    // readability with nested diamonds.
+
+    Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
+    Node* branch0 =
+        graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+    Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+    Node* vtrue0;
+    {
+      Node* check1 =
+          graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
+      Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+      Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+      Node* vtrue1 = input;
+
+      Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+      Node* vfalse1;
+      {
+        Node* temp1 = graph()->NewNode(
+            machine()->Float64Sub(),
+            graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
+        vfalse1 = graph()->NewNode(
+            common()->Select(MachineRepresentation::kFloat64),
+            graph()->NewNode(machine()->Float64LessThan(), input, temp1),
+            graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
+      }
+
+      if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+      vtrue0 =
+          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                           vtrue1, vfalse1, if_true0);
+    }
+
+    Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+    Node* vfalse0;
+    {
+      Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
+      Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                       check1, if_false0);
+
+      Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+      Node* vtrue1 = input;
+
+      Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+      Node* vfalse1;
+      {
+        Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
+                                        input, minus_two_52);
+        Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                         check2, if_false1);
+
+        Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+        Node* vtrue2 = input;
+
+        Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+        Node* vfalse2;
+        {
+          Node* temp1 =
+              graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
+          Node* temp2 = graph()->NewNode(
+              machine()->Float64Sub(),
+              graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
+          vfalse2 = graph()->NewNode(
+              common()->Select(MachineRepresentation::kFloat64),
+              graph()->NewNode(machine()->Float64LessThan(), temp2, temp1),
+              graph()->NewNode(machine()->Float64Sub(), minus_one, temp2),
+              graph()->NewNode(machine()->Float64Sub(), minus_zero, temp2));
+        }
+
+        if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+        vfalse1 =
+            graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                             vtrue2, vfalse2, if_false1);
+      }
+
+      if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+      vfalse0 =
+          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                           vtrue1, vfalse1, if_false0);
+    }
+
+    control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+    value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                             vtrue0, vfalse0, control);
+  }
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::LowerFloat64RoundDown(Node* node, Node* effect,
                                                Node* control) {
   // Nothing to be done if a fast hardware instruction is available.
@@ -3419,108 +3529,78 @@
     return ValueEffectControl(node, effect, control);
   }
 
+  Node* const input = node->InputAt(0);
+  return BuildFloat64RoundDown(input, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerFloat64RoundTiesEven(Node* node, Node* effect,
+                                                   Node* control) {
+  // Nothing to be done if a fast hardware instruction is available.
+  if (machine()->Float64RoundTiesEven().IsSupported()) {
+    return ValueEffectControl(node, effect, control);
+  }
+
   Node* const one = jsgraph()->Float64Constant(1.0);
+  Node* const two = jsgraph()->Float64Constant(2.0);
+  Node* const half = jsgraph()->Float64Constant(0.5);
   Node* const zero = jsgraph()->Float64Constant(0.0);
-  Node* const minus_one = jsgraph()->Float64Constant(-1.0);
-  Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
-  Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
-  Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
   Node* const input = node->InputAt(0);
 
-  // General case for floor.
+  // Generate case for round ties to even:
   //
-  //   if 0.0 < input then
-  //     if 2^52 <= input then
-  //       input
-  //     else
-  //       let temp1 = (2^52 + input) - 2^52 in
-  //       if input < temp1 then
-  //         temp1 - 1
-  //       else
-  //         temp1
+  //   let value = floor(input) in
+  //   let temp1 = input - value in
+  //   if temp1 < 0.5 then
+  //     value
+  //   else if 0.5 < temp1 then
+  //     value + 1.0
   //   else
-  //     if input == 0 then
-  //       input
+  //     let temp2 = value % 2.0 in
+  //     if temp2 == 0.0 then
+  //       value
   //     else
-  //       if input <= -2^52 then
-  //         input
-  //       else
-  //         let temp1 = -0 - input in
-  //         let temp2 = (2^52 + temp1) - 2^52 in
-  //         if temp2 < temp1 then
-  //           -1 - temp2
-  //         else
-  //           -0 - temp2
+  //       value + 1.0
   //
   // Note: We do not use the Diamond helper class here, because it really hurts
   // readability with nested diamonds.
 
-  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+  ValueEffectControl continuation =
+      BuildFloat64RoundDown(input, effect, control);
+  Node* value = continuation.value;
+  effect = continuation.effect;
+  control = continuation.control;
+
+  Node* temp1 = graph()->NewNode(machine()->Float64Sub(), input, value);
+
+  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), temp1, half);
+  Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
 
   Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* vtrue0;
-  {
-    Node* check1 =
-        graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = input;
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1;
-    {
-      Node* temp1 = graph()->NewNode(
-          machine()->Float64Sub(),
-          graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
-      vfalse1 = graph()->NewNode(
-          common()->Select(MachineRepresentation::kFloat64),
-          graph()->NewNode(machine()->Float64LessThan(), input, temp1),
-          graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
-    }
-
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                              vtrue1, vfalse1, if_true0);
-  }
+  Node* vtrue0 = value;
 
   Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
   Node* vfalse0;
   {
-    Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
-    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                     check1, if_false0);
+    Node* check1 = graph()->NewNode(machine()->Float64LessThan(), half, temp1);
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
 
     Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = input;
+    Node* vtrue1 = graph()->NewNode(machine()->Float64Add(), value, one);
 
     Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
     Node* vfalse1;
     {
-      Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
-                                      input, minus_two_52);
-      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                       check2, if_false1);
+      Node* temp2 = graph()->NewNode(machine()->Float64Mod(), value, two);
+
+      Node* check2 = graph()->NewNode(machine()->Float64Equal(), temp2, zero);
+      Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_false1);
 
       Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-      Node* vtrue2 = input;
+      Node* vtrue2 = value;
 
       Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-      Node* vfalse2;
-      {
-        Node* temp1 =
-            graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
-        Node* temp2 = graph()->NewNode(
-            machine()->Float64Sub(),
-            graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
-        vfalse2 = graph()->NewNode(
-            common()->Select(MachineRepresentation::kFloat64),
-            graph()->NewNode(machine()->Float64LessThan(), temp2, temp1),
-            graph()->NewNode(machine()->Float64Sub(), minus_one, temp2),
-            graph()->NewNode(machine()->Float64Sub(), minus_zero, temp2));
-      }
+      Node* vfalse2 = graph()->NewNode(machine()->Float64Add(), value, one);
 
       if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
       vfalse1 =
@@ -3534,11 +3614,11 @@
                          vtrue1, vfalse1, if_false0);
   }
 
-  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  Node* value =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                       vtrue0, vfalse0, merge0);
-  return ValueEffectControl(value, effect, merge0);
+  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                           vtrue0, vfalse0, control);
+
+  return ValueEffectControl(value, effect, control);
 }
 
 EffectControlLinearizer::ValueEffectControl
diff --git a/src/compiler/effect-control-linearizer.h b/src/compiler/effect-control-linearizer.h
index 0199fd0..4ed03c6 100644
--- a/src/compiler/effect-control-linearizer.h
+++ b/src/compiler/effect-control-linearizer.h
@@ -8,6 +8,7 @@
 #include "src/compiler/common-operator.h"
 #include "src/compiler/node.h"
 #include "src/compiler/simplified-operator.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -24,10 +25,12 @@
 class JSGraph;
 class Graph;
 class Schedule;
+class SourcePositionTable;
 
-class EffectControlLinearizer {
+class V8_EXPORT_PRIVATE EffectControlLinearizer {
  public:
-  EffectControlLinearizer(JSGraph* graph, Schedule* schedule, Zone* temp_zone);
+  EffectControlLinearizer(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
+                          SourcePositionTable* source_positions);
 
   void Run();
 
@@ -55,6 +58,8 @@
                                                Node* control);
   ValueEffectControl LowerChangeFloat64ToTagged(Node* node, Node* effect,
                                                 Node* control);
+  ValueEffectControl LowerChangeFloat64ToTaggedPointer(Node* node, Node* effect,
+                                                       Node* control);
   ValueEffectControl LowerChangeTaggedSignedToInt32(Node* node, Node* effect,
                                                     Node* control);
   ValueEffectControl LowerChangeTaggedToBit(Node* node, Node* effect,
@@ -73,8 +78,6 @@
                                       Node* effect, Node* control);
   ValueEffectControl LowerCheckIf(Node* node, Node* frame_state, Node* effect,
                                   Node* control);
-  ValueEffectControl LowerCheckHeapObject(Node* node, Node* frame_state,
-                                          Node* effect, Node* control);
   ValueEffectControl LowerCheckedInt32Add(Node* node, Node* frame_state,
                                           Node* effect, Node* control);
   ValueEffectControl LowerCheckedInt32Sub(Node* node, Node* frame_state,
@@ -113,6 +116,10 @@
                                                       Node* frame_state,
                                                       Node* effect,
                                                       Node* control);
+  ValueEffectControl LowerCheckedTaggedToTaggedPointer(Node* node,
+                                                       Node* frame_state,
+                                                       Node* effect,
+                                                       Node* control);
   ValueEffectControl LowerChangeTaggedToFloat64(Node* node, Node* effect,
                                                 Node* control);
   ValueEffectControl LowerTruncateTaggedToBit(Node* node, Node* effect,
@@ -177,6 +184,8 @@
                                          Node* control);
   ValueEffectControl LowerFloat64RoundDown(Node* node, Node* effect,
                                            Node* control);
+  ValueEffectControl LowerFloat64RoundTiesEven(Node* node, Node* effect,
+                                               Node* control);
   ValueEffectControl LowerFloat64RoundTruncate(Node* node, Node* effect,
                                                Node* control);
 
@@ -188,6 +197,8 @@
   ValueEffectControl BuildCheckedHeapNumberOrOddballToFloat64(
       CheckTaggedInputMode mode, Node* value, Node* frame_state, Node* effect,
       Node* control);
+  ValueEffectControl BuildFloat64RoundDown(Node* value, Node* effect,
+                                           Node* control);
   ValueEffectControl LowerStringComparison(Callable const& callable, Node* node,
                                            Node* effect, Node* control);
 
@@ -217,6 +228,7 @@
   Schedule* schedule_;
   Zone* temp_zone_;
   RegionObservability region_observability_ = RegionObservability::kObservable;
+  SourcePositionTable* source_positions_;
 
   SetOncePointer<Operator const> to_number_operator_;
 };
diff --git a/src/compiler/escape-analysis-reducer.cc b/src/compiler/escape-analysis-reducer.cc
index d997813..f7708f8 100644
--- a/src/compiler/escape-analysis-reducer.cc
+++ b/src/compiler/escape-analysis-reducer.cc
@@ -327,7 +327,8 @@
       if (escape_analysis()->IsCyclicObjectState(effect, input)) {
         // TODO(mstarzinger): Represent cyclic object states differently to
         // ensure the scheduler can properly handle such object states.
-        FATAL("Cyclic object state detected by escape analysis.");
+        compilation_failed_ = true;
+        return nullptr;
       }
       if (Node* object_state =
               escape_analysis()->GetOrCreateObjectState(effect, input)) {
diff --git a/src/compiler/escape-analysis-reducer.h b/src/compiler/escape-analysis-reducer.h
index ad67479..61e7607 100644
--- a/src/compiler/escape-analysis-reducer.h
+++ b/src/compiler/escape-analysis-reducer.h
@@ -5,9 +5,11 @@
 #ifndef V8_COMPILER_ESCAPE_ANALYSIS_REDUCER_H_
 #define V8_COMPILER_ESCAPE_ANALYSIS_REDUCER_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/bit-vector.h"
 #include "src/compiler/escape-analysis.h"
 #include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -16,7 +18,8 @@
 // Forward declarations.
 class JSGraph;
 
-class EscapeAnalysisReducer final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
+    : public NON_EXPORTED_BASE(AdvancedReducer) {
  public:
   EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
                         EscapeAnalysis* escape_analysis, Zone* zone);
@@ -27,6 +30,8 @@
   // after this reducer has been applied. Has no effect in release mode.
   void VerifyReplacement() const;
 
+  bool compilation_failed() const { return compilation_failed_; }
+
  private:
   Reduction ReduceLoad(Node* node);
   Reduction ReduceStore(Node* node);
@@ -52,6 +57,7 @@
   // and nodes that do not need a visit from ReduceDeoptState etc.
   BitVector fully_reduced_;
   bool exists_virtual_allocate_;
+  bool compilation_failed_ = false;
 
   DISALLOW_COPY_AND_ASSIGN(EscapeAnalysisReducer);
 };
diff --git a/src/compiler/escape-analysis.cc b/src/compiler/escape-analysis.cc
index 3f889cc..0218045 100644
--- a/src/compiler/escape-analysis.cc
+++ b/src/compiler/escape-analysis.cc
@@ -420,7 +420,7 @@
 
 bool IsEquivalentPhi(Node* phi, ZoneVector<Node*>& inputs) {
   if (phi->opcode() != IrOpcode::kPhi) return false;
-  if (phi->op()->ValueInputCount() != inputs.size()) {
+  if (static_cast<size_t>(phi->op()->ValueInputCount()) != inputs.size()) {
     return false;
   }
   for (size_t i = 0; i < inputs.size(); ++i) {
@@ -481,9 +481,9 @@
       SetField(i, field);
       TRACE("    Field %zu agree on rep #%d\n", i, field->id());
     } else {
-      int arity = at->opcode() == IrOpcode::kEffectPhi
-                      ? at->op()->EffectInputCount()
-                      : at->op()->ValueInputCount();
+      size_t arity = at->opcode() == IrOpcode::kEffectPhi
+                         ? at->op()->EffectInputCount()
+                         : at->op()->ValueInputCount();
       if (cache->fields().size() == arity) {
         changed = MergeFields(i, at, cache, graph, common) || changed;
       } else {
@@ -798,6 +798,7 @@
       case IrOpcode::kStringEqual:
       case IrOpcode::kStringLessThan:
       case IrOpcode::kStringLessThanOrEqual:
+      case IrOpcode::kTypeGuard:
       case IrOpcode::kPlainPrimitiveToNumber:
       case IrOpcode::kPlainPrimitiveToWord32:
       case IrOpcode::kPlainPrimitiveToFloat64:
@@ -1134,7 +1135,17 @@
                                                      Node* node) {
   if (obj->NeedCopyForModification()) {
     state = CopyForModificationAt(state, node);
-    return state->Copy(obj, status_analysis_->GetAlias(obj->id()));
+    // TODO(tebbi): this copies the complete virtual state. Replace with a more
+    // precise analysis of which objects are actually affected by the change.
+    Alias changed_alias = status_analysis_->GetAlias(obj->id());
+    for (Alias alias = 0; alias < state->size(); ++alias) {
+      if (VirtualObject* next_obj = state->VirtualObjectFromAlias(alias)) {
+        if (alias != changed_alias && next_obj->NeedCopyForModification()) {
+          state->Copy(next_obj, alias);
+        }
+      }
+    }
+    return state->Copy(obj, changed_alias);
   }
   return obj;
 }
@@ -1338,9 +1349,19 @@
 
 namespace {
 
+bool IsOffsetForFieldAccessCorrect(const FieldAccess& access) {
+#if V8_TARGET_LITTLE_ENDIAN
+  return (access.offset % kPointerSize) == 0;
+#else
+  return ((access.offset +
+           (1 << ElementSizeLog2Of(access.machine_type.representation()))) %
+          kPointerSize) == 0;
+#endif
+}
+
 int OffsetForFieldAccess(Node* node) {
   FieldAccess access = FieldAccessOf(node->op());
-  DCHECK_EQ(access.offset % kPointerSize, 0);
+  DCHECK(IsOffsetForFieldAccessCorrect(access));
   return access.offset / kPointerSize;
 }
 
@@ -1398,7 +1419,20 @@
   if (VirtualObject* object = GetVirtualObject(state, from)) {
     if (!object->IsTracked()) return;
     int offset = OffsetForFieldAccess(node);
-    if (static_cast<size_t>(offset) >= object->field_count()) return;
+    if (static_cast<size_t>(offset) >= object->field_count()) {
+      // We have a load from a field that is not inside the {object}. This
+      // can only happen with conflicting type feedback and for dead {node}s.
+      // For now, we just mark the {object} as escaping.
+      // TODO(turbofan): Consider introducing an Undefined or None operator
+      // that we can replace this load with, since we know it's dead code.
+      if (status_analysis_->SetEscaped(from)) {
+        TRACE(
+            "Setting #%d (%s) to escaped because load field #%d from "
+            "offset %d outside of object\n",
+            from->id(), from->op()->mnemonic(), node->id(), offset);
+      }
+      return;
+    }
     Node* value = object->GetField(offset);
     if (value) {
       value = ResolveReplacement(value);
@@ -1406,7 +1440,7 @@
     // Record that the load has this alias.
     UpdateReplacement(state, node, value);
   } else if (from->opcode() == IrOpcode::kPhi &&
-             FieldAccessOf(node->op()).offset % kPointerSize == 0) {
+             IsOffsetForFieldAccessCorrect(FieldAccessOf(node->op()))) {
     int offset = OffsetForFieldAccess(node);
     // Only binary phis are supported for now.
     ProcessLoadFromPhi(offset, from, node, state);
@@ -1463,7 +1497,20 @@
   if (VirtualObject* object = GetVirtualObject(state, to)) {
     if (!object->IsTracked()) return;
     int offset = OffsetForFieldAccess(node);
-    if (static_cast<size_t>(offset) >= object->field_count()) return;
+    if (static_cast<size_t>(offset) >= object->field_count()) {
+      // We have a store to a field that is not inside the {object}. This
+      // can only happen with conflicting type feedback and for dead {node}s.
+      // For now, we just mark the {object} as escaping.
+      // TODO(turbofan): Consider just eliminating the store in the reducer
+      // pass, as it's dead code anyways.
+      if (status_analysis_->SetEscaped(to)) {
+        TRACE(
+            "Setting #%d (%s) to escaped because store field #%d to "
+            "offset %d outside of object\n",
+            to->id(), to->op()->mnemonic(), node->id(), offset);
+      }
+      return;
+    }
     Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 1));
     // TODO(mstarzinger): The following is a workaround to not track some well
     // known raw fields. We only ever store default initial values into these
@@ -1541,8 +1588,8 @@
         }
         int input_count = static_cast<int>(cache_->fields().size());
         Node* new_object_state =
-            graph()->NewNode(common()->ObjectState(input_count, vobj->id()),
-                             input_count, &cache_->fields().front());
+            graph()->NewNode(common()->ObjectState(input_count), input_count,
+                             &cache_->fields().front());
         vobj->SetObjectState(new_object_state);
         TRACE(
             "Creating object state #%d for vobj %p (from node #%d) at effect "
diff --git a/src/compiler/escape-analysis.h b/src/compiler/escape-analysis.h
index ec5154e..b85efe7 100644
--- a/src/compiler/escape-analysis.h
+++ b/src/compiler/escape-analysis.h
@@ -6,6 +6,7 @@
 #define V8_COMPILER_ESCAPE_ANALYSIS_H_
 
 #include "src/compiler/graph.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -20,7 +21,7 @@
 
 // EscapeObjectAnalysis simulates stores to determine values of loads if
 // an object is virtual and eliminated.
-class EscapeAnalysis {
+class V8_EXPORT_PRIVATE EscapeAnalysis {
  public:
   EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common, Zone* zone);
   ~EscapeAnalysis();
diff --git a/src/compiler/frame-elider.cc b/src/compiler/frame-elider.cc
index 5ad4aad..bb17d12 100644
--- a/src/compiler/frame-elider.cc
+++ b/src/compiler/frame-elider.cc
@@ -24,7 +24,8 @@
     for (int i = block->code_start(); i < block->code_end(); ++i) {
       const Instruction* instr = InstructionAt(i);
       if (instr->IsCall() || instr->IsDeoptimizeCall() ||
-          instr->arch_opcode() == ArchOpcode::kArchStackPointer) {
+          instr->arch_opcode() == ArchOpcode::kArchStackPointer ||
+          instr->arch_opcode() == ArchOpcode::kArchFramePointer) {
         block->mark_needs_frame();
         break;
       }
diff --git a/src/compiler/gap-resolver.cc b/src/compiler/gap-resolver.cc
index 7b04198..1ba1044 100644
--- a/src/compiler/gap-resolver.cc
+++ b/src/compiler/gap-resolver.cc
@@ -14,27 +14,124 @@
 
 namespace {
 
+#define REP_BIT(rep) (1 << static_cast<int>(rep))
+
+const int kFloat32Bit = REP_BIT(MachineRepresentation::kFloat32);
+const int kFloat64Bit = REP_BIT(MachineRepresentation::kFloat64);
+
 inline bool Blocks(MoveOperands* move, InstructionOperand destination) {
-  return move->Blocks(destination);
+  return !move->IsEliminated() && move->source().InterferesWith(destination);
 }
 
+// Splits a FP move between two location operands into the equivalent series of
+// moves between smaller sub-operands, e.g. a double move to two single moves.
+// This helps reduce the number of cycles that would normally occur under FP
+// aliasing, and makes swaps much easier to implement.
+MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
+                    ParallelMove* moves) {
+  DCHECK(!kSimpleFPAliasing);
+  // Splitting is only possible when the slot size is the same as float size.
+  DCHECK_EQ(kPointerSize, kFloatSize);
+  const LocationOperand& src_loc = LocationOperand::cast(move->source());
+  const LocationOperand& dst_loc = LocationOperand::cast(move->destination());
+  MachineRepresentation dst_rep = dst_loc.representation();
+  DCHECK_NE(smaller_rep, dst_rep);
+  auto src_kind = src_loc.location_kind();
+  auto dst_kind = dst_loc.location_kind();
 
-inline bool IsRedundant(MoveOperands* move) { return move->IsRedundant(); }
+  int aliases =
+      1 << (ElementSizeLog2Of(dst_rep) - ElementSizeLog2Of(smaller_rep));
+  int base = -1;
+  USE(base);
+  DCHECK_EQ(aliases, RegisterConfiguration::Turbofan()->GetAliases(
+                         dst_rep, 0, smaller_rep, &base));
+
+  int src_index = -1;
+  int slot_size = (1 << ElementSizeLog2Of(smaller_rep)) / kPointerSize;
+  int src_step = 1;
+  if (src_kind == LocationOperand::REGISTER) {
+    src_index = src_loc.register_code() * aliases;
+  } else {
+    src_index = src_loc.index();
+    // For operands that occuply multiple slots, the index refers to the last
+    // slot. On little-endian architectures, we start at the high slot and use a
+    // negative step so that register-to-slot moves are in the correct order.
+    src_step = -slot_size;
+  }
+  int dst_index = -1;
+  int dst_step = 1;
+  if (dst_kind == LocationOperand::REGISTER) {
+    dst_index = dst_loc.register_code() * aliases;
+  } else {
+    dst_index = dst_loc.index();
+    dst_step = -slot_size;
+  }
+
+  // Reuse 'move' for the first fragment. It is not pending.
+  move->set_source(AllocatedOperand(src_kind, smaller_rep, src_index));
+  move->set_destination(AllocatedOperand(dst_kind, smaller_rep, dst_index));
+  // Add the remaining fragment moves.
+  for (int i = 1; i < aliases; ++i) {
+    src_index += src_step;
+    dst_index += dst_step;
+    moves->AddMove(AllocatedOperand(src_kind, smaller_rep, src_index),
+                   AllocatedOperand(dst_kind, smaller_rep, dst_index));
+  }
+  // Return the first fragment.
+  return move;
+}
 
 }  // namespace
 
+void GapResolver::Resolve(ParallelMove* moves) {
+  // Clear redundant moves, and collect FP move representations if aliasing
+  // is non-simple.
+  int reps = 0;
+  for (size_t i = 0; i < moves->size();) {
+    MoveOperands* move = (*moves)[i];
+    if (move->IsRedundant()) {
+      (*moves)[i] = moves->back();
+      moves->pop_back();
+      continue;
+    }
+    i++;
+    if (!kSimpleFPAliasing && move->destination().IsFPRegister()) {
+      reps |=
+          REP_BIT(LocationOperand::cast(move->destination()).representation());
+    }
+  }
 
-void GapResolver::Resolve(ParallelMove* moves) const {
-  // Clear redundant moves.
-  auto it =
-      std::remove_if(moves->begin(), moves->end(), std::ptr_fun(IsRedundant));
-  moves->erase(it, moves->end());
-  for (MoveOperands* move : *moves) {
+  if (!kSimpleFPAliasing) {
+    if (reps && !base::bits::IsPowerOfTwo32(reps)) {
+      // Start with the smallest FP moves, so we never encounter smaller moves
+      // in the middle of a cycle of larger moves.
+      if ((reps & kFloat32Bit) != 0) {
+        split_rep_ = MachineRepresentation::kFloat32;
+        for (size_t i = 0; i < moves->size(); ++i) {
+          auto move = (*moves)[i];
+          if (!move->IsEliminated() && move->destination().IsFloatRegister())
+            PerformMove(moves, move);
+        }
+      }
+      if ((reps & kFloat64Bit) != 0) {
+        split_rep_ = MachineRepresentation::kFloat64;
+        for (size_t i = 0; i < moves->size(); ++i) {
+          auto move = (*moves)[i];
+          if (!move->IsEliminated() && move->destination().IsDoubleRegister())
+            PerformMove(moves, move);
+        }
+      }
+    }
+    split_rep_ = MachineRepresentation::kSimd128;
+  }
+
+  for (size_t i = 0; i < moves->size(); ++i) {
+    auto move = (*moves)[i];
     if (!move->IsEliminated()) PerformMove(moves, move);
   }
 }
 
-void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
+void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
   // Each call to this function performs a move and deletes it from the move
   // graph.  We first recursively perform any move blocking this one.  We mark a
   // move as "pending" on entry to PerformMove in order to detect cycles in the
@@ -45,15 +142,32 @@
 
   // Clear this move's destination to indicate a pending move.  The actual
   // destination is saved on the side.
-  DCHECK(!move->source().IsInvalid());  // Or else it will look eliminated.
+  InstructionOperand source = move->source();
+  DCHECK(!source.IsInvalid());  // Or else it will look eliminated.
   InstructionOperand destination = move->destination();
   move->SetPending();
 
+  // We may need to split moves between FP locations differently.
+  bool is_fp_loc_move = !kSimpleFPAliasing && destination.IsFPLocationOperand();
+
   // Perform a depth-first traversal of the move graph to resolve dependencies.
   // Any unperformed, unpending move with a source the same as this one's
   // destination blocks this one so recursively perform all such moves.
-  for (MoveOperands* other : *moves) {
-    if (other->Blocks(destination) && !other->IsPending()) {
+  for (size_t i = 0; i < moves->size(); ++i) {
+    auto other = (*moves)[i];
+    if (other->IsEliminated()) continue;
+    if (other->IsPending()) continue;
+    if (other->source().InterferesWith(destination)) {
+      if (!kSimpleFPAliasing && is_fp_loc_move &&
+          LocationOperand::cast(other->source()).representation() >
+              split_rep_) {
+        // 'other' must also be an FP location move. Break it into fragments
+        // of the same size as 'move'. 'other' is set to one of the fragments,
+        // and the rest are appended to 'moves'.
+        other = Split(other, split_rep_, moves);
+        // 'other' may not block destination now.
+        if (!other->source().InterferesWith(destination)) continue;
+      }
       // Though PerformMove can change any source operand in the move graph,
       // this call cannot create a blocking move via a swap (this loop does not
       // miss any).  Assume there is a non-blocking move with source A and this
@@ -67,18 +181,18 @@
     }
   }
 
-  // We are about to resolve this move and don't need it marked as pending, so
-  // restore its destination.
-  move->set_destination(destination);
-
   // This move's source may have changed due to swaps to resolve cycles and so
   // it may now be the last move in the cycle.  If so remove it.
-  InstructionOperand source = move->source();
-  if (source.InterferesWith(destination)) {
+  source = move->source();
+  if (source.EqualsCanonicalized(destination)) {
     move->Eliminate();
     return;
   }
 
+  // We are about to resolve this move and don't need it marked as pending, so
+  // restore its destination.
+  move->set_destination(destination);
+
   // The move may be blocked on a (at most one) pending move, in which case we
   // have a cycle.  Search for such a blocking move and perform a swap to
   // resolve it.
@@ -91,7 +205,6 @@
     return;
   }
 
-  DCHECK((*blocker)->IsPending());
   // Ensure source is a register or both are stack slots, to limit swap cases.
   if (source.IsStackSlot() || source.IsFPStackSlot()) {
     std::swap(source, destination);
@@ -99,14 +212,36 @@
   assembler_->AssembleSwap(&source, &destination);
   move->Eliminate();
 
-  // Any unperformed (including pending) move with a source of either this
-  // move's source or destination needs to have their source changed to
-  // reflect the state of affairs after the swap.
-  for (MoveOperands* other : *moves) {
-    if (other->Blocks(source)) {
-      other->set_source(destination);
-    } else if (other->Blocks(destination)) {
-      other->set_source(source);
+  // Update outstanding moves whose source may now have been moved.
+  if (!kSimpleFPAliasing && is_fp_loc_move) {
+    // We may have to split larger moves.
+    for (size_t i = 0; i < moves->size(); ++i) {
+      auto other = (*moves)[i];
+      if (other->IsEliminated()) continue;
+      if (source.InterferesWith(other->source())) {
+        if (LocationOperand::cast(other->source()).representation() >
+            split_rep_) {
+          other = Split(other, split_rep_, moves);
+          if (!source.InterferesWith(other->source())) continue;
+        }
+        other->set_source(destination);
+      } else if (destination.InterferesWith(other->source())) {
+        if (LocationOperand::cast(other->source()).representation() >
+            split_rep_) {
+          other = Split(other, split_rep_, moves);
+          if (!destination.InterferesWith(other->source())) continue;
+        }
+        other->set_source(source);
+      }
+    }
+  } else {
+    for (auto other : *moves) {
+      if (other->IsEliminated()) continue;
+      if (source.EqualsCanonicalized(other->source())) {
+        other->set_source(destination);
+      } else if (destination.EqualsCanonicalized(other->source())) {
+        other->set_source(source);
+      }
     }
   }
 }
diff --git a/src/compiler/gap-resolver.h b/src/compiler/gap-resolver.h
index 19806f5..d4c4025 100644
--- a/src/compiler/gap-resolver.h
+++ b/src/compiler/gap-resolver.h
@@ -26,18 +26,24 @@
                               InstructionOperand* destination) = 0;
   };
 
-  explicit GapResolver(Assembler* assembler) : assembler_(assembler) {}
+  explicit GapResolver(Assembler* assembler)
+      : assembler_(assembler), split_rep_(MachineRepresentation::kSimd128) {}
 
   // Resolve a set of parallel moves, emitting assembler instructions.
-  void Resolve(ParallelMove* parallel_move) const;
+  void Resolve(ParallelMove* parallel_move);
 
  private:
-  // Perform the given move, possibly requiring other moves to satisfy
-  // dependencies.
-  void PerformMove(ParallelMove* moves, MoveOperands* move) const;
+  // Performs the given move, possibly performing other moves to unblock the
+  // destination operand.
+  void PerformMove(ParallelMove* moves, MoveOperands* move);
 
   // Assembler used to emit moves and save registers.
   Assembler* const assembler_;
+
+  // While resolving moves, the largest FP representation that can be moved.
+  // Any larger moves must be split into an equivalent series of moves of this
+  // representation.
+  MachineRepresentation split_rep_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/graph-reducer.h b/src/compiler/graph-reducer.h
index a089c12..b95cf9d 100644
--- a/src/compiler/graph-reducer.h
+++ b/src/compiler/graph-reducer.h
@@ -5,7 +5,9 @@
 #ifndef V8_COMPILER_GRAPH_REDUCER_H_
 #define V8_COMPILER_GRAPH_REDUCER_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/node-marker.h"
+#include "src/globals.h"
 #include "src/zone/zone-containers.h"
 
 namespace v8 {
@@ -40,7 +42,7 @@
 // language-specific reductions (e.g. reduction based on types or constant
 // folding of low-level operators) can be integrated into the graph reduction
 // phase.
-class Reducer {
+class V8_EXPORT_PRIVATE Reducer {
  public:
   virtual ~Reducer() {}
 
@@ -119,7 +121,8 @@
 
 
 // Performs an iterative reduction of a node graph.
-class GraphReducer : public AdvancedReducer::Editor {
+class V8_EXPORT_PRIVATE GraphReducer
+    : public NON_EXPORTED_BASE(AdvancedReducer::Editor) {
  public:
   GraphReducer(Zone* zone, Graph* graph, Node* dead = nullptr);
   ~GraphReducer();
diff --git a/src/compiler/graph-replay.cc b/src/compiler/graph-replay.cc
index 352b171..df0160d 100644
--- a/src/compiler/graph-replay.cc
+++ b/src/compiler/graph-replay.cc
@@ -20,7 +20,7 @@
 void GraphReplayPrinter::PrintReplay(Graph* graph) {
   GraphReplayPrinter replay;
   PrintF("  Node* nil = graph()->NewNode(common()->Dead());\n");
-  Zone zone(graph->zone()->allocator());
+  Zone zone(graph->zone()->allocator(), ZONE_NAME);
   AllNodes nodes(&zone, graph);
 
   // Allocate the nodes first.
diff --git a/src/compiler/graph-trimmer.h b/src/compiler/graph-trimmer.h
index 98d335a..e57dc18 100644
--- a/src/compiler/graph-trimmer.h
+++ b/src/compiler/graph-trimmer.h
@@ -6,6 +6,7 @@
 #define V8_COMPILER_GRAPH_TRIMMER_H_
 
 #include "src/compiler/node-marker.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -16,7 +17,7 @@
 
 
 // Trims dead nodes from the node graph.
-class GraphTrimmer final {
+class V8_EXPORT_PRIVATE GraphTrimmer final {
  public:
   GraphTrimmer(Zone* zone, Graph* graph);
   ~GraphTrimmer();
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
index d810c37..ab20f8f 100644
--- a/src/compiler/graph-visualizer.cc
+++ b/src/compiler/graph-visualizer.cc
@@ -11,6 +11,7 @@
 #include "src/code-stubs.h"
 #include "src/compilation-info.h"
 #include "src/compiler/all-nodes.h"
+#include "src/compiler/compiler-source-position-table.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/node.h"
@@ -81,33 +82,28 @@
   return node == nullptr ? "null" : node->op()->mnemonic();
 }
 
-#define DEAD_COLOR "#999999"
-
-class Escaped {
+class JSONEscaped {
  public:
-  explicit Escaped(const std::ostringstream& os,
-                   const char* escaped_chars = "<>|{}\\")
-      : str_(os.str()), escaped_chars_(escaped_chars) {}
+  explicit JSONEscaped(const std::ostringstream& os) : str_(os.str()) {}
 
-  friend std::ostream& operator<<(std::ostream& os, const Escaped& e) {
-    for (std::string::const_iterator i = e.str_.begin(); i != e.str_.end();
-         ++i) {
-      if (e.needs_escape(*i)) os << "\\";
-      os << *i;
-    }
+  friend std::ostream& operator<<(std::ostream& os, const JSONEscaped& e) {
+    for (char c : e.str_) PipeCharacter(os, c);
     return os;
   }
 
  private:
-  bool needs_escape(char ch) const {
-    for (size_t i = 0; i < strlen(escaped_chars_); ++i) {
-      if (ch == escaped_chars_[i]) return true;
-    }
-    return false;
+  static std::ostream& PipeCharacter(std::ostream& os, char c) {
+    if (c == '"') return os << "\\\"";
+    if (c == '\\') return os << "\\\\";
+    if (c == '\b') return os << "\\b";
+    if (c == '\f') return os << "\\f";
+    if (c == '\n') return os << "\\n";
+    if (c == '\r') return os << "\\r";
+    if (c == '\t') return os << "\\t";
+    return os << c;
   }
 
   const std::string str_;
-  const char* const escaped_chars_;
 };
 
 class JSONGraphNodeWriter {
@@ -135,11 +131,11 @@
     node->op()->PrintTo(label, Operator::PrintVerbosity::kSilent);
     node->op()->PrintTo(title, Operator::PrintVerbosity::kVerbose);
     node->op()->PrintPropsTo(properties);
-    os_ << "{\"id\":" << SafeId(node) << ",\"label\":\""
-        << Escaped(label, "\"\\") << "\""
-        << ",\"title\":\"" << Escaped(title, "\"\\") << "\""
+    os_ << "{\"id\":" << SafeId(node) << ",\"label\":\"" << JSONEscaped(label)
+        << "\""
+        << ",\"title\":\"" << JSONEscaped(title) << "\""
         << ",\"live\": " << (live_.IsLive(node) ? "true" : "false")
-        << ",\"properties\":\"" << Escaped(properties, "\"\\") << "\"";
+        << ",\"properties\":\"" << JSONEscaped(properties) << "\"";
     IrOpcode::Value opcode = node->opcode();
     if (IrOpcode::IsPhiOpcode(opcode)) {
       os_ << ",\"rankInputs\":[0," << NodeProperties::FirstControlIndex(node)
@@ -156,7 +152,7 @@
     }
     SourcePosition position = positions_->GetSourcePosition(node);
     if (position.IsKnown()) {
-      os_ << ",\"pos\":" << position.raw();
+      os_ << ",\"pos\":" << position.ScriptOffset();
     }
     os_ << ",\"opcode\":\"" << IrOpcode::Mnemonic(node->opcode()) << "\"";
     os_ << ",\"control\":" << (NodeProperties::IsControl(node) ? "true"
@@ -171,7 +167,7 @@
       Type* type = NodeProperties::GetType(node);
       std::ostringstream type_out;
       type->PrintTo(type_out);
-      os_ << ",\"type\":\"" << Escaped(type_out, "\"\\") << "\"";
+      os_ << ",\"type\":\"" << JSONEscaped(type_out) << "\"";
     }
     os_ << "}";
   }
@@ -240,7 +236,7 @@
 
 std::ostream& operator<<(std::ostream& os, const AsJSON& ad) {
   AccountingAllocator allocator;
-  Zone tmp_zone(&allocator);
+  Zone tmp_zone(&allocator, ZONE_NAME);
   os << "{\n\"nodes\":[";
   JSONGraphNodeWriter(os, &tmp_zone, &ad.graph, ad.positions).Print();
   os << "],\n\"edges\":[";
@@ -501,7 +497,7 @@
         if (positions != nullptr) {
           SourcePosition position = positions->GetSourcePosition(node);
           if (position.IsKnown()) {
-            os_ << " pos:" << position.raw();
+            os_ << " pos:" << position.ScriptOffset();
           }
         }
         os_ << " <|@\n";
@@ -630,7 +626,7 @@
 
 std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac) {
   AccountingAllocator allocator;
-  Zone tmp_zone(&allocator);
+  Zone tmp_zone(&allocator, ZONE_NAME);
   GraphC1Visualizer(os, &tmp_zone).PrintCompilation(ac.info_);
   return os;
 }
@@ -638,7 +634,7 @@
 
 std::ostream& operator<<(std::ostream& os, const AsC1V& ac) {
   AccountingAllocator allocator;
-  Zone tmp_zone(&allocator);
+  Zone tmp_zone(&allocator, ZONE_NAME);
   GraphC1Visualizer(os, &tmp_zone)
       .PrintSchedule(ac.phase_, ac.schedule_, ac.positions_, ac.instructions_);
   return os;
@@ -648,7 +644,7 @@
 std::ostream& operator<<(std::ostream& os,
                          const AsC1VRegisterAllocationData& ac) {
   AccountingAllocator allocator;
-  Zone tmp_zone(&allocator);
+  Zone tmp_zone(&allocator, ZONE_NAME);
   GraphC1Visualizer(os, &tmp_zone).PrintLiveRanges(ac.phase_, ac.data_);
   return os;
 }
@@ -659,7 +655,7 @@
 
 std::ostream& operator<<(std::ostream& os, const AsRPO& ar) {
   AccountingAllocator allocator;
-  Zone local_zone(&allocator);
+  Zone local_zone(&allocator, ZONE_NAME);
 
   // Do a post-order depth-first search on the RPO graph. For every node,
   // print:
diff --git a/src/compiler/graph-visualizer.h b/src/compiler/graph-visualizer.h
index 700d7a7..356dd5e 100644
--- a/src/compiler/graph-visualizer.h
+++ b/src/compiler/graph-visualizer.h
@@ -9,6 +9,8 @@
 #include <iosfwd>
 #include <memory>
 
+#include "src/globals.h"
+
 namespace v8 {
 namespace internal {
 
@@ -32,15 +34,14 @@
   const SourcePositionTable* positions;
 };
 
-std::ostream& operator<<(std::ostream& os, const AsJSON& ad);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const AsJSON& ad);
 
 struct AsRPO {
   explicit AsRPO(const Graph& g) : graph(g) {}
   const Graph& graph;
 };
 
-std::ostream& operator<<(std::ostream& os, const AsRPO& ad);
-
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const AsRPO& ad);
 
 struct AsC1VCompilation {
   explicit AsC1VCompilation(const CompilationInfo* info) : info_(info) {}
diff --git a/src/compiler/graph.cc b/src/compiler/graph.cc
index ff1a17e..373d6d7 100644
--- a/src/compiler/graph.cc
+++ b/src/compiler/graph.cc
@@ -7,8 +7,9 @@
 #include <algorithm>
 
 #include "src/base/bits.h"
-#include "src/compiler/node.h"
+#include "src/compiler/graph-visualizer.h"
 #include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
 #include "src/compiler/verifier.h"
 
 namespace v8 {
@@ -72,6 +73,11 @@
   return id;
 }
 
+void Graph::Print() const {
+  OFStream os(stdout);
+  os << AsRPO(*this);
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/graph.h b/src/compiler/graph.h
index 1d9e85e..1e861c7 100644
--- a/src/compiler/graph.h
+++ b/src/compiler/graph.h
@@ -5,6 +5,8 @@
 #ifndef V8_COMPILER_GRAPH_H_
 #define V8_COMPILER_GRAPH_H_
 
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
 #include "src/zone/zone-containers.h"
 #include "src/zone/zone.h"
 
@@ -28,7 +30,7 @@
 // out-of-line data associated with each node.
 typedef uint32_t NodeId;
 
-class Graph final : public ZoneObject {
+class V8_EXPORT_PRIVATE Graph final : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   explicit Graph(Zone* zone);
 
@@ -119,6 +121,9 @@
   void AddDecorator(GraphDecorator* decorator);
   void RemoveDecorator(GraphDecorator* decorator);
 
+  // Very simple print API usable in a debugger.
+  void Print() const;
+
  private:
   friend class NodeMarkerBase;
 
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index 428570a..20afdc1 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -580,18 +580,15 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchTailCallJSFunctionFromJSFunction:
-    case kArchTailCallJSFunction: {
+    case kArchTailCallJSFunctionFromJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
         // Check the function's context matches the context argument.
         __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
         __ Assert(equal, kWrongFunctionContext);
       }
-      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
-        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
-                                         no_reg, no_reg, no_reg);
-      }
+      AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, no_reg,
+                                       no_reg, no_reg);
       __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
       frame_access_state()->ClearSPDelta();
       frame_access_state()->SetFrameAccessToDefault();
@@ -652,7 +649,7 @@
       break;
     }
     case kArchRet:
-      AssembleReturn();
+      AssembleReturn(instr->InputAt(0));
       break;
     case kArchStackPointer:
       __ mov(i.OutputRegister(), esp);
@@ -1461,7 +1458,7 @@
           if (i.InputRegister(1).is(i.OutputRegister())) {
             __ shl(i.OutputRegister(), 1);
           } else {
-            __ lea(i.OutputRegister(), i.MemoryOperand());
+            __ add(i.OutputRegister(), i.InputRegister(1));
           }
         } else if (mode == kMode_M2) {
           __ shl(i.OutputRegister(), 1);
@@ -1472,6 +1469,9 @@
         } else {
           __ lea(i.OutputRegister(), i.MemoryOperand());
         }
+      } else if (mode == kMode_MR1 &&
+                 i.InputRegister(1).is(i.OutputRegister())) {
+        __ add(i.OutputRegister(), i.InputRegister(0));
       } else {
         __ lea(i.OutputRegister(), i.MemoryOperand());
       }
@@ -1790,7 +1790,7 @@
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   DeoptimizeReason deoptimization_reason =
       GetDeoptimizationReason(deoptimization_id);
-  __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -1945,12 +1945,16 @@
       __ mov(ebp, esp);
     } else if (descriptor->IsJSFunctionCall()) {
       __ Prologue(this->info()->GeneratePreagedPrologue());
+      if (descriptor->PushArgumentCount()) {
+        __ push(kJavaScriptCallArgCountRegister);
+      }
     } else {
       __ StubPrologue(info()->GetOutputStackFrameType());
     }
   }
 
-  int shrink_slots = frame()->GetSpillSlotCount();
+  int shrink_slots =
+      frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
 
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
@@ -1981,8 +1985,7 @@
   }
 }
 
-
-void CodeGenerator::AssembleReturn() {
+void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
 
   const RegList saves = descriptor->CalleeSavedRegisters();
@@ -1994,22 +1997,41 @@
     }
   }
 
+  // Might need ecx for scratch if pop_size is too big or if there is a variable
+  // pop count.
+  DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
+  size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+  IA32OperandConverter g(this, nullptr);
   if (descriptor->IsCFunctionCall()) {
     AssembleDeconstructFrame();
   } else if (frame_access_state()->has_frame()) {
-    // Canonicalize JSFunction return sites for now.
-    if (return_label_.is_bound()) {
-      __ jmp(&return_label_);
-      return;
+    // Canonicalize JSFunction return sites for now if they always have the same
+    // number of return args.
+    if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+      if (return_label_.is_bound()) {
+        __ jmp(&return_label_);
+        return;
+      } else {
+        __ bind(&return_label_);
+        AssembleDeconstructFrame();
+      }
     } else {
-      __ bind(&return_label_);
       AssembleDeconstructFrame();
     }
   }
-  size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
-  // Might need ecx for scratch if pop_size is too big.
+  DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & edx.bit());
   DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
-  __ Ret(static_cast<int>(pop_size), ecx);
+  if (pop->IsImmediate()) {
+    DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+    pop_size += g.ToConstant(pop).ToInt32() * kPointerSize;
+    __ Ret(static_cast<int>(pop_size), ecx);
+  } else {
+    Register pop_reg = g.ToRegister(pop);
+    Register scratch_reg = pop_reg.is(ecx) ? edx : ecx;
+    __ pop(scratch_reg);
+    __ lea(esp, Operand(esp, pop_reg, times_4, static_cast<int>(pop_size)));
+    __ jmp(scratch_reg);
+  }
 }
 
 
diff --git a/src/compiler/ia32/instruction-scheduler-ia32.cc b/src/compiler/ia32/instruction-scheduler-ia32.cc
index ad7535c..3216b1d 100644
--- a/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -146,8 +146,72 @@
 
 
 int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
-  // TODO(all): Add instruction cost modeling.
-  return 1;
+  // Basic latency modeling for ia32 instructions. They have been determined
+  // in an empirical way.
+  switch (instr->arch_opcode()) {
+    case kCheckedLoadInt8:
+    case kCheckedLoadUint8:
+    case kCheckedLoadInt16:
+    case kCheckedLoadUint16:
+    case kCheckedLoadWord32:
+    case kCheckedLoadFloat32:
+    case kCheckedLoadFloat64:
+    case kCheckedStoreWord8:
+    case kCheckedStoreWord16:
+    case kCheckedStoreWord32:
+    case kCheckedStoreFloat32:
+    case kCheckedStoreFloat64:
+    case kSSEFloat64Mul:
+      return 5;
+    case kIA32Imul:
+    case kIA32ImulHigh:
+      return 5;
+    case kSSEFloat32Cmp:
+    case kSSEFloat64Cmp:
+      return 9;
+    case kSSEFloat32Add:
+    case kSSEFloat32Sub:
+    case kSSEFloat32Abs:
+    case kSSEFloat32Neg:
+    case kSSEFloat64Add:
+    case kSSEFloat64Sub:
+    case kSSEFloat64Max:
+    case kSSEFloat64Min:
+    case kSSEFloat64Abs:
+    case kSSEFloat64Neg:
+      return 5;
+    case kSSEFloat32Mul:
+      return 4;
+    case kSSEFloat32ToFloat64:
+    case kSSEFloat64ToFloat32:
+      return 6;
+    case kSSEFloat32Round:
+    case kSSEFloat64Round:
+    case kSSEFloat32ToInt32:
+    case kSSEFloat64ToInt32:
+      return 8;
+    case kSSEFloat32ToUint32:
+      return 21;
+    case kSSEFloat64ToUint32:
+      return 15;
+    case kIA32Idiv:
+      return 33;
+    case kIA32Udiv:
+      return 26;
+    case kSSEFloat32Div:
+      return 35;
+    case kSSEFloat64Div:
+      return 63;
+    case kSSEFloat32Sqrt:
+    case kSSEFloat64Sqrt:
+      return 25;
+    case kSSEFloat64Mod:
+      return 50;
+    case kArchTruncateDoubleToI:
+      return 9;
+    default:
+      return 1;
+  }
 }
 
 }  // namespace compiler
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index 7e98023..c827c68 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -646,55 +646,78 @@
 void InstructionSelector::VisitInt32PairAdd(Node* node) {
   IA32OperandGenerator g(this);
 
-  // We use UseUniqueRegister here to avoid register sharing with the temp
-  // register.
-  InstructionOperand inputs[] = {
-      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
-      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
+  if (projection1) {
+    // We use UseUniqueRegister here to avoid register sharing with the temp
+    // register.
+    InstructionOperand inputs[] = {
+        g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+        g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
 
-  InstructionOperand outputs[] = {
-      g.DefineSameAsFirst(node),
-      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+    InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
+                                    g.DefineAsRegister(projection1)};
 
-  InstructionOperand temps[] = {g.TempRegister()};
+    InstructionOperand temps[] = {g.TempRegister()};
 
-  Emit(kIA32AddPair, 2, outputs, 4, inputs, 1, temps);
+    Emit(kIA32AddPair, 2, outputs, 4, inputs, 1, temps);
+  } else {
+    // The high word of the result is not used, so we emit the standard 32 bit
+    // instruction.
+    Emit(kIA32Add, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+         g.Use(node->InputAt(2)));
+  }
 }
 
 void InstructionSelector::VisitInt32PairSub(Node* node) {
   IA32OperandGenerator g(this);
 
-  // We use UseUniqueRegister here to avoid register sharing with the temp
-  // register.
-  InstructionOperand inputs[] = {
-      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
-      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
+  if (projection1) {
+    // We use UseUniqueRegister here to avoid register sharing with the temp
+    // register.
+    InstructionOperand inputs[] = {
+        g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+        g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
 
-  InstructionOperand outputs[] = {
-      g.DefineSameAsFirst(node),
-      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+    InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
+                                    g.DefineAsRegister(projection1)};
 
-  InstructionOperand temps[] = {g.TempRegister()};
+    InstructionOperand temps[] = {g.TempRegister()};
 
-  Emit(kIA32SubPair, 2, outputs, 4, inputs, 1, temps);
+    Emit(kIA32SubPair, 2, outputs, 4, inputs, 1, temps);
+  } else {
+    // The high word of the result is not used, so we emit the standard 32 bit
+    // instruction.
+    Emit(kIA32Sub, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+         g.Use(node->InputAt(2)));
+  }
 }
 
 void InstructionSelector::VisitInt32PairMul(Node* node) {
   IA32OperandGenerator g(this);
 
-  // InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
-  // register and one mov instruction.
-  InstructionOperand inputs[] = {
-      g.UseUnique(node->InputAt(0)), g.UseUnique(node->InputAt(1)),
-      g.UseUniqueRegister(node->InputAt(2)), g.UseFixed(node->InputAt(3), ecx)};
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
+  if (projection1) {
+    // InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
+    // register and one mov instruction.
+    InstructionOperand inputs[] = {g.UseUnique(node->InputAt(0)),
+                                   g.UseUnique(node->InputAt(1)),
+                                   g.UseUniqueRegister(node->InputAt(2)),
+                                   g.UseFixed(node->InputAt(3), ecx)};
 
-  InstructionOperand outputs[] = {
-      g.DefineAsFixed(node, eax),
-      g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
+    InstructionOperand outputs[] = {
+        g.DefineAsFixed(node, eax),
+        g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
 
-  InstructionOperand temps[] = {g.TempRegister(edx)};
+    InstructionOperand temps[] = {g.TempRegister(edx)};
 
-  Emit(kIA32MulPair, 2, outputs, 4, inputs, 1, temps);
+    Emit(kIA32MulPair, 2, outputs, 4, inputs, 1, temps);
+  } else {
+    // The high word of the result is not used, so we emit the standard 32 bit
+    // instruction.
+    Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+         g.Use(node->InputAt(2)));
+  }
 }
 
 void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
@@ -712,11 +735,19 @@
                                  g.UseFixed(node->InputAt(1), edx),
                                  shift_operand};
 
-  InstructionOperand outputs[] = {
-      g.DefineAsFixed(node, eax),
-      g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
+  InstructionOperand outputs[2];
+  InstructionOperand temps[1];
+  int32_t output_count = 0;
+  int32_t temp_count = 0;
+  outputs[output_count++] = g.DefineAsFixed(node, eax);
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
+  if (projection1) {
+    outputs[output_count++] = g.DefineAsFixed(projection1, edx);
+  } else {
+    temps[temp_count++] = g.TempRegister(edx);
+  }
 
-  selector->Emit(opcode, 2, outputs, 3, inputs);
+  selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
 }
 
 void InstructionSelector::VisitWord32PairShl(Node* node) {
@@ -1362,22 +1393,22 @@
 // Shared routine for word comparison with zero.
 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
                           Node* value, FlagsContinuation* cont) {
-  // Try to combine the branch with a comparison.
-  while (selector->CanCover(user, value)) {
+  // Try to combine with comparisons against 0 by simply inverting the branch.
+  while (value->opcode() == IrOpcode::kWord32Equal &&
+         selector->CanCover(user, value)) {
+    Int32BinopMatcher m(value);
+    if (!m.right().Is(0)) break;
+
+    user = value;
+    value = m.left().node();
+    cont->Negate();
+  }
+
+  if (selector->CanCover(user, value)) {
     switch (value->opcode()) {
-      case IrOpcode::kWord32Equal: {
-        // Try to combine with comparisons against 0 by simply inverting the
-        // continuation.
-        Int32BinopMatcher m(value);
-        if (m.right().Is(0)) {
-          user = value;
-          value = m.left().node();
-          cont->Negate();
-          continue;
-        }
+      case IrOpcode::kWord32Equal:
         cont->OverwriteAndNegateIfEqual(kEqual);
         return VisitWordCompare(selector, value, cont);
-      }
       case IrOpcode::kInt32LessThan:
         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
         return VisitWordCompare(selector, value, cont);
@@ -1443,7 +1474,6 @@
       default:
         break;
     }
-    break;
   }
 
   // Continuation could not be combined with a compare, emit compare against 0.
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
index 22279fe..6242e98 100644
--- a/src/compiler/instruction-codes.h
+++ b/src/compiler/instruction-codes.h
@@ -29,6 +29,7 @@
 #define TARGET_ARCH_OPCODE_LIST(V)
 #define TARGET_ADDRESSING_MODE_LIST(V)
 #endif
+#include "src/globals.h"
 #include "src/utils.h"
 
 namespace v8 {
@@ -47,7 +48,6 @@
   V(ArchTailCallCodeObject)               \
   V(ArchCallJSFunction)                   \
   V(ArchTailCallJSFunctionFromJSFunction) \
-  V(ArchTailCallJSFunction)               \
   V(ArchTailCallAddress)                  \
   V(ArchPrepareCallCFunction)             \
   V(ArchCallCFunction)                    \
@@ -124,7 +124,8 @@
 #undef COUNT_ARCH_OPCODE
 };
 
-std::ostream& operator<<(std::ostream& os, const ArchOpcode& ao);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+                                           const ArchOpcode& ao);
 
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
@@ -143,7 +144,8 @@
 #undef COUNT_ADDRESSING_MODE
 };
 
-std::ostream& operator<<(std::ostream& os, const AddressingMode& am);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+                                           const AddressingMode& am);
 
 // The mode of the flags continuation (see below).
 enum FlagsMode {
@@ -153,7 +155,8 @@
   kFlags_set = 3
 };
 
-std::ostream& operator<<(std::ostream& os, const FlagsMode& fm);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+                                           const FlagsMode& fm);
 
 // The condition of flags continuation (see below).
 enum FlagsCondition {
@@ -189,7 +192,8 @@
 
 FlagsCondition CommuteFlagsCondition(FlagsCondition condition);
 
-std::ostream& operator<<(std::ostream& os, const FlagsCondition& fc);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+                                           const FlagsCondition& fc);
 
 // The InstructionCode is an opaque, target-specific integer that encodes
 // what code to emit for an instruction in the code generator. It is not
diff --git a/src/compiler/instruction-scheduler.cc b/src/compiler/instruction-scheduler.cc
index c7fd1cc..8ba287b 100644
--- a/src/compiler/instruction-scheduler.cc
+++ b/src/compiler/instruction-scheduler.cc
@@ -284,7 +284,6 @@
     case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject:
     case kArchTailCallJSFunctionFromJSFunction:
-    case kArchTailCallJSFunction:
     case kArchTailCallAddress:
       return kHasSideEffect | kIsBlockTerminator;
 
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
index 673d1b0..6cb87ea 100644
--- a/src/compiler/instruction-selector-impl.h
+++ b/src/compiler/instruction-selector-impl.h
@@ -387,6 +387,7 @@
   void Overwrite(FlagsCondition condition) { condition_ = condition; }
 
   void OverwriteAndNegateIfEqual(FlagsCondition condition) {
+    DCHECK(condition_ == kEqual || condition_ == kNotEqual);
     bool negate = condition_ == kEqual;
     condition_ = condition;
     if (negate) Negate();
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index b150725..8f899f3 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -7,6 +7,7 @@
 #include <limits>
 
 #include "src/base/adapters.h"
+#include "src/compiler/compiler-source-position-table.h"
 #include "src/compiler/instruction-selector-impl.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/pipeline.h"
@@ -392,9 +393,13 @@
 }
 
 bool InstructionSelector::CanAddressRelativeToRootsRegister() const {
-  return (enable_serialization_ == kDisableSerialization &&
-          (linkage()->GetIncomingDescriptor()->flags() &
-           CallDescriptor::kCanUseRoots));
+  return enable_serialization_ == kDisableSerialization &&
+         CanUseRootsRegister();
+}
+
+bool InstructionSelector::CanUseRootsRegister() const {
+  return linkage()->GetIncomingDescriptor()->flags() &
+         CallDescriptor::kCanUseRoots;
 }
 
 void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
@@ -430,6 +435,7 @@
     case IrOpcode::kHeapConstant:
       return g->UseImmediate(input);
     case IrOpcode::kObjectState:
+    case IrOpcode::kTypedObjectState:
       UNREACHABLE();
       break;
     default:
@@ -481,6 +487,10 @@
                                         FrameStateInputKind kind, Zone* zone) {
   switch (input->opcode()) {
     case IrOpcode::kObjectState: {
+      UNREACHABLE();
+      return 0;
+    }
+    case IrOpcode::kTypedObjectState: {
       size_t id = deduplicator->GetObjectId(input);
       if (id == StateObjectDeduplicator::kNotDuplicated) {
         size_t entries = 0;
@@ -488,10 +498,12 @@
         descriptor->fields().push_back(
             StateValueDescriptor::Recursive(zone, id));
         StateValueDescriptor* new_desc = &descriptor->fields().back();
-        for (Edge edge : input->input_edges()) {
+        int const input_count = input->op()->ValueInputCount();
+        ZoneVector<MachineType> const* types = MachineTypesOf(input->op());
+        for (int i = 0; i < input_count; ++i) {
           entries += AddOperandToStateValueDescriptor(
-              new_desc, inputs, g, deduplicator, edge.to(),
-              MachineType::AnyTagged(), kind, zone);
+              new_desc, inputs, g, deduplicator, input->InputAt(i),
+              types->at(i), kind, zone);
         }
         return entries;
       } else {
@@ -502,7 +514,6 @@
             StateValueDescriptor::Duplicate(zone, id));
         return 0;
       }
-      break;
     }
     default: {
       inputs->push_back(OperandForDeopt(g, input, kind, type.representation()));
@@ -929,6 +940,16 @@
   }
 }
 
+void InstructionSelector::MarkPairProjectionsAsWord32(Node* node) {
+  Node* projection0 = NodeProperties::FindProjection(node, 0);
+  if (projection0) {
+    MarkAsWord32(projection0);
+  }
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
+  if (projection1) {
+    MarkAsWord32(projection1);
+  }
+}
 
 void InstructionSelector::VisitNode(Node* node) {
   DCHECK_NOT_NULL(schedule()->block(node));  // should only use scheduled nodes.
@@ -1336,28 +1357,28 @@
     case IrOpcode::kCheckedStore:
       return VisitCheckedStore(node);
     case IrOpcode::kInt32PairAdd:
-      MarkAsWord32(NodeProperties::FindProjection(node, 0));
-      MarkAsWord32(NodeProperties::FindProjection(node, 1));
+      MarkAsWord32(node);
+      MarkPairProjectionsAsWord32(node);
       return VisitInt32PairAdd(node);
     case IrOpcode::kInt32PairSub:
-      MarkAsWord32(NodeProperties::FindProjection(node, 0));
-      MarkAsWord32(NodeProperties::FindProjection(node, 1));
+      MarkAsWord32(node);
+      MarkPairProjectionsAsWord32(node);
       return VisitInt32PairSub(node);
     case IrOpcode::kInt32PairMul:
-      MarkAsWord32(NodeProperties::FindProjection(node, 0));
-      MarkAsWord32(NodeProperties::FindProjection(node, 1));
+      MarkAsWord32(node);
+      MarkPairProjectionsAsWord32(node);
       return VisitInt32PairMul(node);
     case IrOpcode::kWord32PairShl:
-      MarkAsWord32(NodeProperties::FindProjection(node, 0));
-      MarkAsWord32(NodeProperties::FindProjection(node, 1));
+      MarkAsWord32(node);
+      MarkPairProjectionsAsWord32(node);
       return VisitWord32PairShl(node);
     case IrOpcode::kWord32PairShr:
-      MarkAsWord32(NodeProperties::FindProjection(node, 0));
-      MarkAsWord32(NodeProperties::FindProjection(node, 1));
+      MarkAsWord32(node);
+      MarkPairProjectionsAsWord32(node);
       return VisitWord32PairShr(node);
     case IrOpcode::kWord32PairSar:
-      MarkAsWord32(NodeProperties::FindProjection(node, 0));
-      MarkAsWord32(NodeProperties::FindProjection(node, 1));
+      MarkAsWord32(node);
+      MarkPairProjectionsAsWord32(node);
       return VisitWord32PairSar(node);
     case IrOpcode::kAtomicLoad: {
       LoadRepresentation type = LoadRepresentationOf(node->op());
@@ -1741,7 +1762,7 @@
 
 void InstructionSelector::VisitOsrValue(Node* node) {
   OperandGenerator g(this);
-  int index = OpParameter<int>(node);
+  int index = OsrValueIndexOf(node->op());
   Emit(kArchNop,
        g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index)));
 }
@@ -1875,109 +1896,63 @@
   DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
 
   CallDescriptor* caller = linkage()->GetIncomingDescriptor();
-  if (caller->CanTailCall(node)) {
-    const CallDescriptor* callee = CallDescriptorOf(node->op());
-    int stack_param_delta = callee->GetStackParameterDelta(caller);
-    CallBuffer buffer(zone(), descriptor, nullptr);
+  DCHECK(caller->CanTailCall(node));
+  const CallDescriptor* callee = CallDescriptorOf(node->op());
+  int stack_param_delta = callee->GetStackParameterDelta(caller);
+  CallBuffer buffer(zone(), descriptor, nullptr);
 
-    // Compute InstructionOperands for inputs and outputs.
-    CallBufferFlags flags(kCallCodeImmediate | kCallTail);
-    if (IsTailCallAddressImmediate()) {
-      flags |= kCallAddressImmediate;
-    }
-    InitializeCallBuffer(node, &buffer, flags, stack_param_delta);
+  // Compute InstructionOperands for inputs and outputs.
+  CallBufferFlags flags(kCallCodeImmediate | kCallTail);
+  if (IsTailCallAddressImmediate()) {
+    flags |= kCallAddressImmediate;
+  }
+  InitializeCallBuffer(node, &buffer, flags, stack_param_delta);
 
-    // Select the appropriate opcode based on the call type.
-    InstructionCode opcode;
-    InstructionOperandVector temps(zone());
-    if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
-      switch (descriptor->kind()) {
-        case CallDescriptor::kCallCodeObject:
-          opcode = kArchTailCallCodeObjectFromJSFunction;
-          break;
-        case CallDescriptor::kCallJSFunction:
-          opcode = kArchTailCallJSFunctionFromJSFunction;
-          break;
-        default:
-          UNREACHABLE();
-          return;
-      }
-      int temps_count = GetTempsCountForTailCallFromJSFunction();
-      for (int i = 0; i < temps_count; i++) {
-        temps.push_back(g.TempRegister());
-      }
-    } else {
-      switch (descriptor->kind()) {
-        case CallDescriptor::kCallCodeObject:
-          opcode = kArchTailCallCodeObject;
-          break;
-        case CallDescriptor::kCallJSFunction:
-          opcode = kArchTailCallJSFunction;
-          break;
-        case CallDescriptor::kCallAddress:
-          opcode = kArchTailCallAddress;
-          break;
-        default:
-          UNREACHABLE();
-          return;
-      }
-    }
-    opcode |= MiscField::encode(descriptor->flags());
-
-    Emit(kArchPrepareTailCall, g.NoOutput());
-
-    int first_unused_stack_slot =
-        (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0) +
-        stack_param_delta;
-    buffer.instruction_args.push_back(g.TempImmediate(first_unused_stack_slot));
-
-    // Emit the tailcall instruction.
-    Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
-         &buffer.instruction_args.front(), temps.size(),
-         temps.empty() ? nullptr : &temps.front());
-  } else {
-    FrameStateDescriptor* frame_state_descriptor =
-        descriptor->NeedsFrameState()
-            ? GetFrameStateDescriptor(
-                  node->InputAt(static_cast<int>(descriptor->InputCount())))
-            : nullptr;
-
-    CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
-    // Compute InstructionOperands for inputs and outputs.
-    CallBufferFlags flags = kCallCodeImmediate;
-    if (IsTailCallAddressImmediate()) {
-      flags |= kCallAddressImmediate;
-    }
-    InitializeCallBuffer(node, &buffer, flags);
-
-    EmitPrepareArguments(&(buffer.pushed_nodes), descriptor, node);
-
-    // Select the appropriate opcode based on the call type.
-    InstructionCode opcode;
+  // Select the appropriate opcode based on the call type.
+  InstructionCode opcode;
+  InstructionOperandVector temps(zone());
+  if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
     switch (descriptor->kind()) {
       case CallDescriptor::kCallCodeObject:
-        opcode = kArchCallCodeObject;
+        opcode = kArchTailCallCodeObjectFromJSFunction;
         break;
       case CallDescriptor::kCallJSFunction:
-        opcode = kArchCallJSFunction;
+        opcode = kArchTailCallJSFunctionFromJSFunction;
         break;
       default:
         UNREACHABLE();
         return;
     }
-    opcode |= MiscField::encode(descriptor->flags());
-
-    // Emit the call instruction.
-    size_t output_count = buffer.outputs.size();
-    auto* outputs = &buffer.outputs.front();
-    Instruction* call_instr =
-        Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
-             &buffer.instruction_args.front());
-    if (instruction_selection_failed()) return;
-    call_instr->MarkAsCall();
-    Emit(kArchRet, 0, nullptr, output_count, outputs);
+    int temps_count = GetTempsCountForTailCallFromJSFunction();
+    for (int i = 0; i < temps_count; i++) {
+      temps.push_back(g.TempRegister());
+    }
+  } else {
+    switch (descriptor->kind()) {
+      case CallDescriptor::kCallCodeObject:
+        opcode = kArchTailCallCodeObject;
+        break;
+      case CallDescriptor::kCallAddress:
+        opcode = kArchTailCallAddress;
+        break;
+      default:
+        UNREACHABLE();
+        return;
+    }
   }
+  opcode |= MiscField::encode(descriptor->flags());
+
+  Emit(kArchPrepareTailCall, g.NoOutput());
+
+  int first_unused_stack_slot =
+      (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0) +
+      stack_param_delta;
+  buffer.instruction_args.push_back(g.TempImmediate(first_unused_stack_slot));
+
+  // Emit the tailcall instruction.
+  Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
+       &buffer.instruction_args.front(), temps.size(),
+       temps.empty() ? nullptr : &temps.front());
 }
 
 
@@ -1987,20 +1962,34 @@
   Emit(kArchJmp, g.NoOutput(), g.Label(target));
 }
 
-
 void InstructionSelector::VisitReturn(Node* ret) {
   OperandGenerator g(this);
-  if (linkage()->GetIncomingDescriptor()->ReturnCount() == 0) {
-    Emit(kArchRet, g.NoOutput());
-  } else {
-    const int ret_count = ret->op()->ValueInputCount();
-    auto value_locations = zone()->NewArray<InstructionOperand>(ret_count);
-    for (int i = 0; i < ret_count; ++i) {
-      value_locations[i] =
-          g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i));
-    }
-    Emit(kArchRet, 0, nullptr, ret_count, value_locations);
+  const int input_count = linkage()->GetIncomingDescriptor()->ReturnCount() == 0
+                              ? 1
+                              : ret->op()->ValueInputCount();
+  DCHECK_GE(input_count, 1);
+  auto value_locations = zone()->NewArray<InstructionOperand>(input_count);
+  Node* pop_count = ret->InputAt(0);
+  value_locations[0] = pop_count->opcode() == IrOpcode::kInt32Constant
+                           ? g.UseImmediate(pop_count)
+                           : g.UseRegister(pop_count);
+  for (int i = 1; i < input_count; ++i) {
+    value_locations[i] =
+        g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i - 1));
   }
+  Emit(kArchRet, 0, nullptr, input_count, value_locations);
+}
+
+Instruction* InstructionSelector::EmitDeoptimize(InstructionCode opcode,
+                                                 InstructionOperand output,
+                                                 InstructionOperand a,
+                                                 DeoptimizeReason reason,
+                                                 Node* frame_state) {
+  size_t output_count = output.IsInvalid() ? 0 : 1;
+  InstructionOperand inputs[] = {a};
+  size_t input_count = arraysize(inputs);
+  return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
+                        reason, frame_state);
 }
 
 Instruction* InstructionSelector::EmitDeoptimize(
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
index 2981f90..65ba8f7 100644
--- a/src/compiler/instruction-selector.h
+++ b/src/compiler/instruction-selector.h
@@ -12,6 +12,7 @@
 #include "src/compiler/instruction.h"
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node.h"
+#include "src/globals.h"
 #include "src/zone/zone-containers.h"
 
 namespace v8 {
@@ -42,7 +43,7 @@
 };
 
 // Instruction selection generates an InstructionSequence for a given Schedule.
-class InstructionSelector final {
+class V8_EXPORT_PRIVATE InstructionSelector final {
  public:
   // Forward declarations.
   class Features;
@@ -110,6 +111,9 @@
   // ===========================================================================
 
   Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
+                              InstructionOperand a, DeoptimizeReason reason,
+                              Node* frame_state);
+  Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
                               InstructionOperand a, InstructionOperand b,
                               DeoptimizeReason reason, Node* frame_state);
   Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
@@ -204,6 +208,8 @@
   // to the roots register, i.e. if both a root register is available for this
   // compilation unit and the serializer is disabled.
   bool CanAddressRelativeToRootsRegister() const;
+  // Check if we can use the roots register to access GC roots.
+  bool CanUseRootsRegister() const;
 
   Isolate* isolate() const { return sequence()->isolate(); }
 
@@ -344,6 +350,8 @@
   }
   bool instruction_selection_failed() { return instruction_selection_failed_; }
 
+  void MarkPairProjectionsAsWord32(Node* node);
+
   // ===========================================================================
 
   Zone* const zone_;
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
index 0df7ca0..3b2311a 100644
--- a/src/compiler/instruction.cc
+++ b/src/compiler/instruction.cc
@@ -12,7 +12,8 @@
 namespace internal {
 namespace compiler {
 
-const auto GetRegConfig = RegisterConfiguration::Turbofan;
+const RegisterConfiguration* (*GetRegConfig)() =
+    RegisterConfiguration::Turbofan;
 
 FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
   switch (condition) {
@@ -64,8 +65,35 @@
   return condition;
 }
 
-bool InstructionOperand::InterferesWith(const InstructionOperand& that) const {
-  return EqualsCanonicalized(that);
+bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
+  if (kSimpleFPAliasing || !this->IsFPLocationOperand() ||
+      !other.IsFPLocationOperand())
+    return EqualsCanonicalized(other);
+  // Aliasing is complex and both operands are fp locations.
+  const LocationOperand& loc = *LocationOperand::cast(this);
+  const LocationOperand& other_loc = LocationOperand::cast(other);
+  LocationOperand::LocationKind kind = loc.location_kind();
+  LocationOperand::LocationKind other_kind = other_loc.location_kind();
+  if (kind != other_kind) return false;
+  MachineRepresentation rep = loc.representation();
+  MachineRepresentation other_rep = other_loc.representation();
+  if (rep == other_rep) return EqualsCanonicalized(other);
+  if (kind == LocationOperand::REGISTER) {
+    // FP register-register interference.
+    return GetRegConfig()->AreAliases(rep, loc.register_code(), other_rep,
+                                      other_loc.register_code());
+  } else {
+    // FP slot-slot interference. Slots of different FP reps can alias because
+    // the gap resolver may break a move into 2 or 4 equivalent smaller moves.
+    DCHECK_EQ(LocationOperand::STACK_SLOT, kind);
+    int index_hi = loc.index();
+    int index_lo = index_hi - (1 << ElementSizeLog2Of(rep)) / kPointerSize + 1;
+    int other_index_hi = other_loc.index();
+    int other_index_lo =
+        other_index_hi - (1 << ElementSizeLog2Of(other_rep)) / kPointerSize + 1;
+    return other_index_hi >= index_lo && index_hi >= other_index_lo;
+  }
+  return false;
 }
 
 void InstructionOperand::Print(const RegisterConfiguration* config) const {
@@ -232,28 +260,31 @@
   return true;
 }
 
-
-MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
+void ParallelMove::PrepareInsertAfter(
+    MoveOperands* move, ZoneVector<MoveOperands*>* to_eliminate) const {
+  bool no_aliasing =
+      kSimpleFPAliasing || !move->destination().IsFPLocationOperand();
   MoveOperands* replacement = nullptr;
-  MoveOperands* to_eliminate = nullptr;
+  MoveOperands* eliminated = nullptr;
   for (MoveOperands* curr : *this) {
     if (curr->IsEliminated()) continue;
     if (curr->destination().EqualsCanonicalized(move->source())) {
+      // We must replace move's source with curr's destination in order to
+      // insert it into this ParallelMove.
       DCHECK(!replacement);
       replacement = curr;
-      if (to_eliminate != nullptr) break;
-    } else if (curr->destination().EqualsCanonicalized(move->destination())) {
-      DCHECK(!to_eliminate);
-      to_eliminate = curr;
-      if (replacement != nullptr) break;
+      if (no_aliasing && eliminated != nullptr) break;
+    } else if (curr->destination().InterferesWith(move->destination())) {
+      // We can eliminate curr, since move overwrites at least a part of its
+      // destination, implying its value is no longer live.
+      eliminated = curr;
+      to_eliminate->push_back(curr);
+      if (no_aliasing && replacement != nullptr) break;
     }
   }
-  DCHECK_IMPLIES(replacement == to_eliminate, replacement == nullptr);
   if (replacement != nullptr) move->set_source(replacement->source());
-  return to_eliminate;
 }
 
-
 ExplicitOperand::ExplicitOperand(LocationKind kind, MachineRepresentation rep,
                                  int index)
     : LocationOperand(EXPLICIT, kind, rep, index) {
@@ -589,9 +620,7 @@
       handler_(handler),
       needs_frame_(false),
       must_construct_frame_(false),
-      must_deconstruct_frame_(false),
-      last_deferred_(RpoNumber::Invalid()) {}
-
+      must_deconstruct_frame_(false) {}
 
 size_t InstructionBlock::PredecessorIndexOf(RpoNumber rpo_number) const {
   size_t j = 0;
@@ -785,6 +814,7 @@
       next_virtual_register_(0),
       reference_maps_(zone()),
       representations_(zone()),
+      representation_mask_(0),
       deoptimization_entries_(zone()),
       current_block_(nullptr) {}
 
@@ -890,6 +920,7 @@
   DCHECK_IMPLIES(representations_[virtual_register] != rep,
                  representations_[virtual_register] == DefaultRepresentation());
   representations_[virtual_register] = rep;
+  representation_mask_ |= 1 << static_cast<int>(rep);
 }
 
 int InstructionSequence::AddDeoptimizationEntry(
@@ -953,6 +984,11 @@
   PrintBlock(GetRegConfig(), block_id);
 }
 
+const RegisterConfiguration*
+InstructionSequence::GetRegisterConfigurationForTesting() {
+  return GetRegConfig();
+}
+
 FrameStateDescriptor::FrameStateDescriptor(
     Zone* zone, FrameStateType type, BailoutId bailout_id,
     OutputFrameStateCombine state_combine, size_t parameters_count,
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
index b5c5914..327c8c1 100644
--- a/src/compiler/instruction.h
+++ b/src/compiler/instruction.h
@@ -10,24 +10,27 @@
 #include <map>
 #include <set>
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/frame.h"
 #include "src/compiler/instruction-codes.h"
 #include "src/compiler/opcodes.h"
-#include "src/compiler/source-position.h"
+#include "src/globals.h"
 #include "src/macro-assembler.h"
 #include "src/register-configuration.h"
 #include "src/zone/zone-allocator.h"
 
 namespace v8 {
 namespace internal {
+
+class SourcePosition;
+
 namespace compiler {
 
-// Forward declarations.
 class Schedule;
+class SourcePositionTable;
 
-
-class InstructionOperand {
+class V8_EXPORT_PRIVATE InstructionOperand {
  public:
   static const int kInvalidVirtualRegister = -1;
 
@@ -117,7 +120,7 @@
     return this->GetCanonicalizedValue() < that.GetCanonicalizedValue();
   }
 
-  bool InterferesWith(const InstructionOperand& that) const;
+  bool InterferesWith(const InstructionOperand& other) const;
 
   // APIs to aid debugging. For general-stream APIs, use operator<<
   void Print(const RegisterConfiguration* config) const;
@@ -516,8 +519,8 @@
   class IndexField : public BitField64<int32_t, 35, 29> {};
 };
 
-
-class ExplicitOperand : public LocationOperand {
+class V8_EXPORT_PRIVATE ExplicitOperand
+    : public NON_EXPORTED_BASE(LocationOperand) {
  public:
   ExplicitOperand(LocationKind kind, MachineRepresentation rep, int index);
 
@@ -639,8 +642,14 @@
   if (IsAnyLocationOperand()) {
     MachineRepresentation canonical = MachineRepresentation::kNone;
     if (IsFPRegister()) {
-      // We treat all FP register operands the same for simple aliasing.
-      canonical = MachineRepresentation::kFloat64;
+      if (kSimpleFPAliasing) {
+        // We treat all FP register operands the same for simple aliasing.
+        canonical = MachineRepresentation::kFloat64;
+      } else {
+        // We need to distinguish FP register operands of different reps when
+        // aliasing is not simple (e.g. ARM).
+        canonical = LocationOperand::cast(this)->representation();
+      }
     }
     return InstructionOperand::KindField::update(
         LocationOperand::RepresentationField::update(this->value_, canonical),
@@ -657,8 +666,8 @@
   }
 };
 
-
-class MoveOperands final : public ZoneObject {
+class V8_EXPORT_PRIVATE MoveOperands final
+    : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   MoveOperands(const InstructionOperand& source,
                const InstructionOperand& destination)
@@ -683,11 +692,6 @@
   }
   void SetPending() { destination_ = InstructionOperand(); }
 
-  // True if this move is a move into the given destination operand.
-  bool Blocks(const InstructionOperand& destination) const {
-    return !IsEliminated() && source().InterferesWith(destination);
-  }
-
   // A move is redundant if it's been eliminated or if its source and
   // destination are the same.
   bool IsRedundant() const {
@@ -722,8 +726,9 @@
 
 std::ostream& operator<<(std::ostream& os, const PrintableMoveOperands& mo);
 
-
-class ParallelMove final : public ZoneVector<MoveOperands*>, public ZoneObject {
+class V8_EXPORT_PRIVATE ParallelMove final
+    : public NON_EXPORTED_BASE(ZoneVector<MoveOperands *>),
+      public NON_EXPORTED_BASE(ZoneObject) {
  public:
   explicit ParallelMove(Zone* zone) : ZoneVector<MoveOperands*>(zone) {
     reserve(4);
@@ -746,9 +751,10 @@
   bool IsRedundant() const;
 
   // Prepare this ParallelMove to insert move as if it happened in a subsequent
-  // ParallelMove.  move->source() may be changed.  The MoveOperand returned
-  // must be Eliminated.
-  MoveOperands* PrepareInsertAfter(MoveOperands* move) const;
+  // ParallelMove.  move->source() may be changed.  Any MoveOperands added to
+  // to_eliminate must be Eliminated.
+  void PrepareInsertAfter(MoveOperands* move,
+                          ZoneVector<MoveOperands*>* to_eliminate) const;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ParallelMove);
@@ -792,7 +798,7 @@
 
 class InstructionBlock;
 
-class Instruction final {
+class V8_EXPORT_PRIVATE Instruction final {
  public:
   size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
   const InstructionOperand* OutputAt(size_t i) const {
@@ -899,7 +905,6 @@
   bool IsTailCall() const {
     return arch_opcode() == ArchOpcode::kArchTailCallCodeObject ||
            arch_opcode() == ArchOpcode::kArchTailCallCodeObjectFromJSFunction ||
-           arch_opcode() == ArchOpcode::kArchTailCallJSFunction ||
            arch_opcode() == ArchOpcode::kArchTailCallJSFunctionFromJSFunction ||
            arch_opcode() == ArchOpcode::kArchTailCallAddress;
   }
@@ -1019,8 +1024,7 @@
 
 std::ostream& operator<<(std::ostream&, const RpoNumber&);
 
-
-class Constant final {
+class V8_EXPORT_PRIVATE Constant final {
  public:
   enum Type {
     kInt32,
@@ -1211,7 +1215,8 @@
 
 typedef ZoneVector<DeoptimizationEntry> DeoptimizationVector;
 
-class PhiInstruction final : public ZoneObject {
+class V8_EXPORT_PRIVATE PhiInstruction final
+    : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   typedef ZoneVector<InstructionOperand> Inputs;
 
@@ -1236,7 +1241,8 @@
 
 
 // Analogue of BasicBlock for Instructions instead of Nodes.
-class InstructionBlock final : public ZoneObject {
+class V8_EXPORT_PRIVATE InstructionBlock final
+    : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   InstructionBlock(Zone* zone, RpoNumber rpo_number, RpoNumber loop_header,
                    RpoNumber loop_end, bool deferred, bool handler);
@@ -1300,9 +1306,6 @@
   bool must_deconstruct_frame() const { return must_deconstruct_frame_; }
   void mark_must_deconstruct_frame() { must_deconstruct_frame_ = true; }
 
-  void set_last_deferred(RpoNumber last) { last_deferred_ = last; }
-  RpoNumber last_deferred() const { return last_deferred_; }
-
  private:
   Successors successors_;
   Predecessors predecessors_;
@@ -1318,7 +1321,6 @@
   bool needs_frame_;
   bool must_construct_frame_;
   bool must_deconstruct_frame_;
-  RpoNumber last_deferred_;
 };
 
 class InstructionSequence;
@@ -1347,7 +1349,8 @@
 
 // Represents architecture-specific generated code before, during, and after
 // register allocation.
-class InstructionSequence final : public ZoneObject {
+class V8_EXPORT_PRIVATE InstructionSequence final
+    : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   static InstructionBlocks* InstructionBlocksFor(Zone* zone,
                                                  const Schedule* schedule);
@@ -1388,6 +1391,7 @@
   }
   MachineRepresentation GetRepresentation(int virtual_register) const;
   void MarkAsRepresentation(MachineRepresentation rep, int virtual_register);
+  int representation_mask() const { return representation_mask_; }
 
   bool IsReference(int virtual_register) const {
     return CanBeTaggedPointer(GetRepresentation(virtual_register));
@@ -1395,14 +1399,6 @@
   bool IsFP(int virtual_register) const {
     return IsFloatingPoint(GetRepresentation(virtual_register));
   }
-  bool IsFloat(int virtual_register) const {
-    return GetRepresentation(virtual_register) ==
-           MachineRepresentation::kFloat32;
-  }
-  bool IsDouble(int virtual_register) const {
-    return GetRepresentation(virtual_register) ==
-           MachineRepresentation::kFloat64;
-  }
 
   Instruction* GetBlockStart(RpoNumber rpo) const;
 
@@ -1504,9 +1500,11 @@
   void ValidateDeferredBlockEntryPaths() const;
   void ValidateSSA() const;
 
+  const RegisterConfiguration* GetRegisterConfigurationForTesting();
+
  private:
-  friend std::ostream& operator<<(std::ostream& os,
-                                  const PrintableInstructionSequence& code);
+  friend V8_EXPORT_PRIVATE std::ostream& operator<<(
+      std::ostream& os, const PrintableInstructionSequence& code);
 
   typedef ZoneMap<const Instruction*, SourcePosition> SourcePositionMap;
 
@@ -1520,6 +1518,7 @@
   int next_virtual_register_;
   ReferenceMapDeque reference_maps_;
   ZoneVector<MachineRepresentation> representations_;
+  int representation_mask_;
   DeoptimizationVector deoptimization_entries_;
 
   // Used at construction time
@@ -1534,9 +1533,8 @@
   const InstructionSequence* sequence_;
 };
 
-
-std::ostream& operator<<(std::ostream& os,
-                         const PrintableInstructionSequence& code);
+V8_EXPORT_PRIVATE std::ostream& operator<<(
+    std::ostream& os, const PrintableInstructionSequence& code);
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/int64-lowering.cc b/src/compiler/int64-lowering.cc
index 539a372..62523ca 100644
--- a/src/compiler/int64-lowering.cc
+++ b/src/compiler/int64-lowering.cc
@@ -61,6 +61,8 @@
           // that they are processed after all other nodes.
           PreparePhiReplacement(input);
           stack_.push_front({input, 0});
+        } else if (input->opcode() == IrOpcode::kEffectPhi) {
+          stack_.push_front({input, 0});
         } else {
           stack_.push_back({input, 0});
         }
@@ -240,7 +242,7 @@
     case IrOpcode::kStart: {
       int parameter_count = GetParameterCountAfterLowering(signature());
       // Only exchange the node if the parameter count actually changed.
-      if (parameter_count != signature()->parameter_count()) {
+      if (parameter_count != static_cast<int>(signature()->parameter_count())) {
         int delta =
             parameter_count - static_cast<int>(signature()->parameter_count());
         int new_output_count = node->op()->ValueOutputCount() + delta;
@@ -255,7 +257,7 @@
       // the only input of a parameter node, only changes if the parameter count
       // changes.
       if (GetParameterCountAfterLowering(signature()) !=
-          signature()->parameter_count()) {
+          static_cast<int>(signature()->parameter_count())) {
         int old_index = ParameterIndexOf(node->op());
         int new_index = GetParameterIndexAfterLowering(signature(), old_index);
         NodeProperties::ChangeOp(node, common()->Parameter(new_index));
@@ -273,7 +275,7 @@
     case IrOpcode::kReturn: {
       DefaultLowering(node);
       int new_return_count = GetReturnCountAfterLowering(signature());
-      if (signature()->return_count() != new_return_count) {
+      if (static_cast<int>(signature()->return_count()) != new_return_count) {
         NodeProperties::ChangeOp(node, common()->Return(new_return_count));
       }
       break;
diff --git a/src/compiler/int64-lowering.h b/src/compiler/int64-lowering.h
index 084c07a..66a54e9 100644
--- a/src/compiler/int64-lowering.h
+++ b/src/compiler/int64-lowering.h
@@ -9,13 +9,14 @@
 #include "src/compiler/graph.h"
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node-marker.h"
+#include "src/globals.h"
 #include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-class Int64Lowering {
+class V8_EXPORT_PRIVATE Int64Lowering {
  public:
   Int64Lowering(Graph* graph, MachineOperatorBuilder* machine,
                 CommonOperatorBuilder* common, Zone* zone,
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
index 41d4a00..2962e24 100644
--- a/src/compiler/js-builtin-reducer.cc
+++ b/src/compiler/js-builtin-reducer.cc
@@ -96,15 +96,30 @@
 
 JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph,
                                    Flags flags,
-                                   CompilationDependencies* dependencies)
+                                   CompilationDependencies* dependencies,
+                                   Handle<Context> native_context)
     : AdvancedReducer(editor),
       dependencies_(dependencies),
       flags_(flags),
       jsgraph_(jsgraph),
+      native_context_(native_context),
       type_cache_(TypeCache::Get()) {}
 
 namespace {
 
+// TODO(turbofan): Shall we move this to the NodeProperties? Or some (untyped)
+// alias analyzer?
+bool IsSame(Node* a, Node* b) {
+  if (a == b) {
+    return true;
+  } else if (a->opcode() == IrOpcode::kCheckHeapObject) {
+    return IsSame(a->InputAt(0), b);
+  } else if (b->opcode() == IrOpcode::kCheckHeapObject) {
+    return IsSame(a, b->InputAt(0));
+  }
+  return false;
+}
+
 MaybeHandle<Map> GetMapWitness(Node* node) {
   Node* receiver = NodeProperties::GetValueInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
@@ -112,7 +127,7 @@
   // for the {receiver}, and if so use that map for the lowering below.
   for (Node* dominator = effect;;) {
     if (dominator->opcode() == IrOpcode::kCheckMaps &&
-        dominator->InputAt(0) == receiver) {
+        IsSame(dominator->InputAt(0), receiver)) {
       if (dominator->op()->ValueInputCount() == 2) {
         HeapObjectMatcher m(dominator->InputAt(1));
         if (m.HasValue()) return Handle<Map>::cast(m.Value());
@@ -160,8 +175,554 @@
          !IsReadOnlyLengthDescriptor(receiver_map);
 }
 
+bool CanInlineJSArrayIteration(Handle<Map> receiver_map) {
+  Isolate* const isolate = receiver_map->GetIsolate();
+  // Ensure that the [[Prototype]] is actually an exotic Array
+  if (!receiver_map->prototype()->IsJSArray()) return false;
+
+  // Don't inline JSArrays with slow elements of any kind
+  if (!IsFastElementsKind(receiver_map->elements_kind())) return false;
+
+  // If the receiver map has packed elements, no need to check the prototype.
+  // This requires a MapCheck where this is used.
+  if (!IsFastHoleyElementsKind(receiver_map->elements_kind())) return true;
+
+  Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
+                                     isolate);
+  // Ensure all prototypes of the {receiver} are stable.
+  for (PrototypeIterator it(isolate, receiver_prototype, kStartAtReceiver);
+       !it.IsAtEnd(); it.Advance()) {
+    Handle<JSReceiver> current = PrototypeIterator::GetCurrent<JSReceiver>(it);
+    if (!current->map()->is_stable()) return false;
+  }
+
+  // For holey Arrays, ensure that the array_protector cell is valid (must be
+  // a CompilationDependency), and the JSArray prototype has not been altered.
+  return receiver_map->instance_type() == JS_ARRAY_TYPE &&
+         (!receiver_map->is_dictionary_map() || receiver_map->is_stable()) &&
+         isolate->IsFastArrayConstructorPrototypeChainIntact() &&
+         isolate->IsAnyInitialArrayPrototype(receiver_prototype);
+}
+
 }  // namespace
 
+Reduction JSBuiltinReducer::ReduceArrayIterator(Node* node,
+                                                IterationKind kind) {
+  Handle<Map> receiver_map;
+  if (GetMapWitness(node).ToHandle(&receiver_map)) {
+    return ReduceArrayIterator(receiver_map, node, kind,
+                               ArrayIteratorKind::kArray);
+  }
+  return NoChange();
+}
+
+Reduction JSBuiltinReducer::ReduceTypedArrayIterator(Node* node,
+                                                     IterationKind kind) {
+  Handle<Map> receiver_map;
+  if (GetMapWitness(node).ToHandle(&receiver_map) &&
+      receiver_map->instance_type() == JS_TYPED_ARRAY_TYPE) {
+    return ReduceArrayIterator(receiver_map, node, kind,
+                               ArrayIteratorKind::kTypedArray);
+  }
+  return NoChange();
+}
+
+Reduction JSBuiltinReducer::ReduceArrayIterator(Handle<Map> receiver_map,
+                                                Node* node, IterationKind kind,
+                                                ArrayIteratorKind iter_kind) {
+  Node* receiver = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  if (iter_kind == ArrayIteratorKind::kTypedArray) {
+    // For JSTypedArray iterator methods, deopt if the buffer is neutered. This
+    // is potentially a deopt loop, but should be extremely unlikely.
+    DCHECK_EQ(JS_TYPED_ARRAY_TYPE, receiver_map->instance_type());
+    Node* buffer = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+        receiver, effect, control);
+
+    Node* check = effect = graph()->NewNode(
+        simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+    check = graph()->NewNode(simplified()->BooleanNot(), check);
+    effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+  }
+
+  int map_index = -1;
+  Node* object_map = jsgraph()->UndefinedConstant();
+  switch (receiver_map->instance_type()) {
+    case JS_ARRAY_TYPE:
+      if (kind == IterationKind::kKeys) {
+        map_index = Context::FAST_ARRAY_KEY_ITERATOR_MAP_INDEX;
+      } else {
+        map_index = kind == IterationKind::kValues
+                        ? Context::FAST_SMI_ARRAY_VALUE_ITERATOR_MAP_INDEX
+                        : Context::FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX;
+
+        if (CanInlineJSArrayIteration(receiver_map)) {
+          // Use `generic` elements for holey arrays if there may be elements
+          // on the prototype chain.
+          map_index += static_cast<int>(receiver_map->elements_kind());
+          object_map = jsgraph()->Constant(receiver_map);
+          if (IsFastHoleyElementsKind(receiver_map->elements_kind())) {
+            Handle<JSObject> initial_array_prototype(
+                native_context()->initial_array_prototype(), isolate());
+            dependencies()->AssumePrototypeMapsStable(receiver_map,
+                                                      initial_array_prototype);
+          }
+        } else {
+          map_index += (Context::GENERIC_ARRAY_VALUE_ITERATOR_MAP_INDEX -
+                        Context::FAST_SMI_ARRAY_VALUE_ITERATOR_MAP_INDEX);
+        }
+      }
+      break;
+    case JS_TYPED_ARRAY_TYPE:
+      if (kind == IterationKind::kKeys) {
+        map_index = Context::TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX;
+      } else {
+        DCHECK_GE(receiver_map->elements_kind(), UINT8_ELEMENTS);
+        DCHECK_LE(receiver_map->elements_kind(), UINT8_CLAMPED_ELEMENTS);
+        map_index = (kind == IterationKind::kValues
+                         ? Context::UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX
+                         : Context::UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX) +
+                    (receiver_map->elements_kind() - UINT8_ELEMENTS);
+      }
+      break;
+    default:
+      if (kind == IterationKind::kKeys) {
+        map_index = Context::GENERIC_ARRAY_KEY_ITERATOR_MAP_INDEX;
+      } else if (kind == IterationKind::kValues) {
+        map_index = Context::GENERIC_ARRAY_VALUE_ITERATOR_MAP_INDEX;
+      } else {
+        map_index = Context::GENERIC_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX;
+      }
+      break;
+  }
+
+  DCHECK_GE(map_index, Context::TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX);
+  DCHECK_LE(map_index, Context::GENERIC_ARRAY_VALUE_ITERATOR_MAP_INDEX);
+
+  Handle<Map> map(Map::cast(native_context()->get(map_index)), isolate());
+
+  // allocate new iterator
+  effect = graph()->NewNode(
+      common()->BeginRegion(RegionObservability::kNotObservable), effect);
+  Node* value = effect = graph()->NewNode(
+      simplified()->Allocate(NOT_TENURED),
+      jsgraph()->Constant(JSArrayIterator::kSize), effect, control);
+  effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+                            value, jsgraph()->Constant(map), effect, control);
+  effect = graph()->NewNode(
+      simplified()->StoreField(AccessBuilder::ForJSObjectProperties()), value,
+      jsgraph()->EmptyFixedArrayConstant(), effect, control);
+  effect = graph()->NewNode(
+      simplified()->StoreField(AccessBuilder::ForJSObjectElements()), value,
+      jsgraph()->EmptyFixedArrayConstant(), effect, control);
+
+  // attach the iterator to this object
+  effect = graph()->NewNode(
+      simplified()->StoreField(AccessBuilder::ForJSArrayIteratorObject()),
+      value, receiver, effect, control);
+  effect = graph()->NewNode(
+      simplified()->StoreField(AccessBuilder::ForJSArrayIteratorIndex()), value,
+      jsgraph()->ZeroConstant(), effect, control);
+  effect = graph()->NewNode(
+      simplified()->StoreField(AccessBuilder::ForJSArrayIteratorObjectMap()),
+      value, object_map, effect, control);
+
+  value = effect = graph()->NewNode(common()->FinishRegion(), value, effect);
+
+  // replace it
+  ReplaceWithValue(node, value, effect, control);
+  return Replace(value);
+}
+
+Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
+    Handle<Map> iterator_map, Node* node, IterationKind kind) {
+  Node* iterator = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  Node* context = NodeProperties::GetContextInput(node);
+
+  if (kind != IterationKind::kKeys &&
+      !isolate()->IsFastArrayIterationIntact()) {
+    // Avoid deopt loops for non-key iteration if the
+    // fast_array_iteration_protector cell has been invalidated.
+    return NoChange();
+  }
+
+  ElementsKind elements_kind = JSArrayIterator::ElementsKindForInstanceType(
+      iterator_map->instance_type());
+
+  if (IsFastHoleyElementsKind(elements_kind)) {
+    if (!isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
+      return NoChange();
+    } else {
+      Handle<JSObject> initial_array_prototype(
+          native_context()->initial_array_prototype(), isolate());
+      dependencies()->AssumePropertyCell(factory()->array_protector());
+    }
+  }
+
+  Node* array = effect = graph()->NewNode(
+      simplified()->LoadField(AccessBuilder::ForJSArrayIteratorObject()),
+      iterator, effect, control);
+  Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(), array,
+                                  jsgraph()->UndefinedConstant());
+  Node* branch0 =
+      graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+
+  Node* vdone_false0;
+  Node* vfalse0;
+  Node* efalse0 = effect;
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  {
+    // iterator.[[IteratedObject]] !== undefined, continue iterating.
+    Node* index = efalse0 = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSArrayIteratorIndex(
+            JS_ARRAY_TYPE, elements_kind)),
+        iterator, efalse0, if_false0);
+
+    Node* length = efalse0 = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSArrayLength(elements_kind)),
+        array, efalse0, if_false0);
+    Node* check1 =
+        graph()->NewNode(simplified()->NumberLessThan(), index, length);
+    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+                                     check1, if_false0);
+
+    Node* vdone_true1;
+    Node* vtrue1;
+    Node* etrue1 = efalse0;
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    {
+      // iterator.[[NextIndex]] < array.length, continue iterating
+      vdone_true1 = jsgraph()->FalseConstant();
+      if (kind == IterationKind::kKeys) {
+        vtrue1 = index;
+      } else {
+        // For value/entry iteration, first step is a mapcheck to ensure
+        // inlining is still valid.
+        Node* orig_map = etrue1 =
+            graph()->NewNode(simplified()->LoadField(
+                                 AccessBuilder::ForJSArrayIteratorObjectMap()),
+                             iterator, etrue1, if_true1);
+        etrue1 = graph()->NewNode(simplified()->CheckMaps(1), array, orig_map,
+                                  etrue1, if_true1);
+      }
+
+      if (kind != IterationKind::kKeys) {
+        Node* elements = etrue1 = graph()->NewNode(
+            simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+            array, etrue1, if_true1);
+        Node* value = etrue1 = graph()->NewNode(
+            simplified()->LoadElement(
+                AccessBuilder::ForFixedArrayElement(elements_kind)),
+            elements, index, etrue1, if_true1);
+
+        // Convert hole to undefined if needed.
+        if (elements_kind == FAST_HOLEY_ELEMENTS ||
+            elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+          value = graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(),
+                                   value);
+        } else if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
+          // TODO(bmeurer): avoid deopt if not all uses of value are truncated.
+          CheckFloat64HoleMode mode = CheckFloat64HoleMode::kAllowReturnHole;
+          value = etrue1 = graph()->NewNode(
+              simplified()->CheckFloat64Hole(mode), value, etrue1, if_true1);
+        }
+
+        if (kind == IterationKind::kEntries) {
+          // Allocate elements for key/value pair
+          vtrue1 = etrue1 =
+              graph()->NewNode(javascript()->CreateKeyValueArray(), index,
+                               value, context, etrue1);
+        } else {
+          DCHECK_EQ(kind, IterationKind::kValues);
+          vtrue1 = value;
+        }
+      }
+
+      Node* next_index = graph()->NewNode(simplified()->NumberAdd(), index,
+                                          jsgraph()->OneConstant());
+      next_index = graph()->NewNode(simplified()->NumberToUint32(), next_index);
+
+      etrue1 = graph()->NewNode(
+          simplified()->StoreField(AccessBuilder::ForJSArrayIteratorIndex(
+              JS_ARRAY_TYPE, elements_kind)),
+          iterator, next_index, etrue1, if_true1);
+    }
+
+    Node* vdone_false1;
+    Node* vfalse1;
+    Node* efalse1 = efalse0;
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    {
+      // iterator.[[NextIndex]] >= array.length, stop iterating.
+      vdone_false1 = jsgraph()->TrueConstant();
+      vfalse1 = jsgraph()->UndefinedConstant();
+      efalse1 = graph()->NewNode(
+          simplified()->StoreField(AccessBuilder::ForJSArrayIteratorObject()),
+          iterator, vfalse1, efalse1, if_false1);
+    }
+
+    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    efalse0 =
+        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+    vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                               vtrue1, vfalse1, if_false0);
+    vdone_false0 =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                         vdone_true1, vdone_false1, if_false0);
+  }
+
+  Node* vdone_true0;
+  Node* vtrue0;
+  Node* etrue0 = effect;
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  {
+    // iterator.[[IteratedObject]] === undefined, the iterator is done.
+    vdone_true0 = jsgraph()->TrueConstant();
+    vtrue0 = jsgraph()->UndefinedConstant();
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_false0, if_true0);
+  effect = graph()->NewNode(common()->EffectPhi(2), efalse0, etrue0, control);
+  Node* value =
+      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                       vfalse0, vtrue0, control);
+  Node* done =
+      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                       vdone_false0, vdone_true0, control);
+
+  // Create IteratorResult object.
+  value = effect = graph()->NewNode(javascript()->CreateIterResultObject(),
+                                    value, done, context, effect);
+  ReplaceWithValue(node, value, effect, control);
+  return Replace(value);
+}
+
+Reduction JSBuiltinReducer::ReduceTypedArrayIteratorNext(
+    Handle<Map> iterator_map, Node* node, IterationKind kind) {
+  Node* iterator = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  Node* context = NodeProperties::GetContextInput(node);
+
+  ElementsKind elements_kind = JSArrayIterator::ElementsKindForInstanceType(
+      iterator_map->instance_type());
+
+  Node* array = effect = graph()->NewNode(
+      simplified()->LoadField(AccessBuilder::ForJSArrayIteratorObject()),
+      iterator, effect, control);
+  Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(), array,
+                                  jsgraph()->UndefinedConstant());
+  Node* branch0 =
+      graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+
+  Node* vdone_false0;
+  Node* vfalse0;
+  Node* efalse0 = effect;
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  {
+    // iterator.[[IteratedObject]] !== undefined, continue iterating.
+    Node* index = efalse0 = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSArrayIteratorIndex(
+            JS_TYPED_ARRAY_TYPE, elements_kind)),
+        iterator, efalse0, if_false0);
+
+    // typedarray.[[ViewedArrayBuffer]]
+    Node* buffer = efalse0 = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+        array, efalse0, if_false0);
+
+    Node* check1 = efalse0 = graph()->NewNode(
+        simplified()->ArrayBufferWasNeutered(), buffer, efalse0, if_false0);
+    check1 = graph()->NewNode(simplified()->BooleanNot(), check1);
+    efalse0 =
+        graph()->NewNode(simplified()->CheckIf(), check1, efalse0, if_false0);
+
+    Node* length = efalse0 = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()), array,
+        efalse0, if_false0);
+
+    Node* check2 =
+        graph()->NewNode(simplified()->NumberLessThan(), index, length);
+    Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+                                     check2, if_false0);
+
+    Node* vdone_true2;
+    Node* vtrue2;
+    Node* etrue2 = efalse0;
+    Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+    {
+      // iterator.[[NextIndex]] < array.length, continue iterating
+      vdone_true2 = jsgraph()->FalseConstant();
+      if (kind == IterationKind::kKeys) {
+        vtrue2 = index;
+      }
+
+      Node* next_index = graph()->NewNode(simplified()->NumberAdd(), index,
+                                          jsgraph()->OneConstant());
+      next_index = graph()->NewNode(simplified()->NumberToUint32(), next_index);
+
+      etrue2 = graph()->NewNode(
+          simplified()->StoreField(AccessBuilder::ForJSArrayIteratorIndex(
+              JS_TYPED_ARRAY_TYPE, elements_kind)),
+          iterator, next_index, etrue2, if_true2);
+
+      if (kind != IterationKind::kKeys) {
+        Node* elements = etrue2 = graph()->NewNode(
+            simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+            array, etrue2, if_true2);
+        Node* base_ptr = etrue2 = graph()->NewNode(
+            simplified()->LoadField(
+                AccessBuilder::ForFixedTypedArrayBaseBasePointer()),
+            elements, etrue2, if_true2);
+        Node* external_ptr = etrue2 = graph()->NewNode(
+            simplified()->LoadField(
+                AccessBuilder::ForFixedTypedArrayBaseExternalPointer()),
+            elements, etrue2, if_true2);
+
+        ExternalArrayType array_type = kExternalInt8Array;
+        switch (elements_kind) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+  case TYPE##_ELEMENTS:                                 \
+    array_type = kExternal##Type##Array;                \
+    break;
+          TYPED_ARRAYS(TYPED_ARRAY_CASE)
+          default:
+            UNREACHABLE();
+#undef TYPED_ARRAY_CASE
+        }
+
+        Node* value = etrue2 =
+            graph()->NewNode(simplified()->LoadTypedElement(array_type), buffer,
+                             base_ptr, external_ptr, index, etrue2, if_true2);
+
+        if (kind == IterationKind::kEntries) {
+          // Allocate elements for key/value pair
+          vtrue2 = etrue2 =
+              graph()->NewNode(javascript()->CreateKeyValueArray(), index,
+                               value, context, etrue2);
+        } else {
+          DCHECK(kind == IterationKind::kValues);
+          vtrue2 = value;
+        }
+      }
+    }
+
+    Node* vdone_false2;
+    Node* vfalse2;
+    Node* efalse2 = efalse0;
+    Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+    {
+      // iterator.[[NextIndex]] >= array.length, stop iterating.
+      vdone_false2 = jsgraph()->TrueConstant();
+      vfalse2 = jsgraph()->UndefinedConstant();
+      efalse2 = graph()->NewNode(
+          simplified()->StoreField(AccessBuilder::ForJSArrayIteratorObject()),
+          iterator, vfalse2, efalse2, if_false2);
+    }
+
+    if_false0 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+    efalse0 =
+        graph()->NewNode(common()->EffectPhi(2), etrue2, efalse2, if_false0);
+    vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                               vtrue2, vfalse2, if_false0);
+    vdone_false0 =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                         vdone_true2, vdone_false2, if_false0);
+  }
+
+  Node* vdone_true0;
+  Node* vtrue0;
+  Node* etrue0 = effect;
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  {
+    // iterator.[[IteratedObject]] === undefined, the iterator is done.
+    vdone_true0 = jsgraph()->TrueConstant();
+    vtrue0 = jsgraph()->UndefinedConstant();
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_false0, if_true0);
+  effect = graph()->NewNode(common()->EffectPhi(2), efalse0, etrue0, control);
+  Node* value =
+      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                       vfalse0, vtrue0, control);
+  Node* done =
+      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                       vdone_false0, vdone_true0, control);
+
+  // Create IteratorResult object.
+  value = effect = graph()->NewNode(javascript()->CreateIterResultObject(),
+                                    value, done, context, effect);
+  ReplaceWithValue(node, value, effect, control);
+  return Replace(value);
+}
+
+Reduction JSBuiltinReducer::ReduceArrayIteratorNext(Node* node) {
+  Handle<Map> receiver_map;
+  if (GetMapWitness(node).ToHandle(&receiver_map)) {
+    switch (receiver_map->instance_type()) {
+      case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+        return ReduceTypedArrayIteratorNext(receiver_map, node,
+                                            IterationKind::kKeys);
+
+      case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+        return ReduceFastArrayIteratorNext(receiver_map, node,
+                                           IterationKind::kKeys);
+
+      case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+      case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+      case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+      case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+      case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+      case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+      case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+      case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+      case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+        return ReduceTypedArrayIteratorNext(receiver_map, node,
+                                            IterationKind::kEntries);
+
+      case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+      case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+      case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+      case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+      case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+      case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+        return ReduceFastArrayIteratorNext(receiver_map, node,
+                                           IterationKind::kEntries);
+
+      case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+      case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+      case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+      case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+      case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+      case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+      case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+      case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+      case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+        return ReduceTypedArrayIteratorNext(receiver_map, node,
+                                            IterationKind::kValues);
+
+      case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+      case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+      case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+      case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+      case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+      case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+        return ReduceFastArrayIteratorNext(receiver_map, node,
+                                           IterationKind::kValues);
+
+      default:
+        // Slow array iterators are not reduced
+        return NoChange();
+    }
+  }
+  return NoChange();
+}
+
 // ES6 section 22.1.3.17 Array.prototype.pop ( )
 Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
   Handle<Map> receiver_map;
@@ -329,14 +890,14 @@
                             InstanceType instance_type) {
   for (Node* dominator = effect;;) {
     if (dominator->opcode() == IrOpcode::kCheckMaps &&
-        dominator->InputAt(0) == receiver) {
+        IsSame(dominator->InputAt(0), receiver)) {
       // Check if all maps have the given {instance_type}.
       for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
         Node* const map = NodeProperties::GetValueInput(dominator, i);
         Type* const map_type = NodeProperties::GetType(map);
-        if (!map_type->IsConstant()) return false;
+        if (!map_type->IsHeapConstant()) return false;
         Handle<Map> const map_value =
-            Handle<Map>::cast(map_type->AsConstant()->Value());
+            Handle<Map>::cast(map_type->AsHeapConstant()->Value());
         if (map_value->instance_type() != instance_type) return false;
       }
       return true;
@@ -915,11 +1476,10 @@
       r.InputsMatchTwo(type_cache_.kSafeInteger,
                        type_cache_.kZeroOrUndefined) ||
       r.InputsMatchTwo(type_cache_.kSafeInteger, type_cache_.kTenOrUndefined)) {
-    // Number.parseInt(a:safe-integer) -> NumberToInt32(a)
-    // Number.parseInt(a:safe-integer,b:#0\/undefined) -> NumberToInt32(a)
-    // Number.parseInt(a:safe-integer,b:#10\/undefined) -> NumberToInt32(a)
-    Node* input = r.GetJSCallInput(0);
-    Node* value = graph()->NewNode(simplified()->NumberToInt32(), input);
+    // Number.parseInt(a:safe-integer) -> a
+    // Number.parseInt(a:safe-integer,b:#0\/undefined) -> a
+    // Number.parseInt(a:safe-integer,b:#10\/undefined) -> a
+    Node* value = r.GetJSCallInput(0);
     return Replace(value);
   }
   return NoChange();
@@ -949,7 +1509,7 @@
   // the lowering below.
   for (Node* dominator = effect;;) {
     if (dominator->opcode() == IrOpcode::kCheckString &&
-        dominator->InputAt(0) == receiver) {
+        IsSame(dominator->InputAt(0), receiver)) {
       return dominator;
     }
     if (dominator->op()->EffectInputCount() != 1) {
@@ -1058,6 +1618,46 @@
   return NoChange();
 }
 
+Reduction JSBuiltinReducer::ReduceStringIterator(Node* node) {
+  if (Node* receiver = GetStringWitness(node)) {
+    Node* effect = NodeProperties::GetEffectInput(node);
+    Node* control = NodeProperties::GetControlInput(node);
+
+    Node* map = jsgraph()->HeapConstant(
+        handle(native_context()->string_iterator_map(), isolate()));
+
+    // allocate new iterator
+    effect = graph()->NewNode(
+        common()->BeginRegion(RegionObservability::kNotObservable), effect);
+    Node* value = effect = graph()->NewNode(
+        simplified()->Allocate(NOT_TENURED),
+        jsgraph()->Constant(JSStringIterator::kSize), effect, control);
+    effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+                              value, map, effect, control);
+    effect = graph()->NewNode(
+        simplified()->StoreField(AccessBuilder::ForJSObjectProperties()), value,
+        jsgraph()->EmptyFixedArrayConstant(), effect, control);
+    effect = graph()->NewNode(
+        simplified()->StoreField(AccessBuilder::ForJSObjectElements()), value,
+        jsgraph()->EmptyFixedArrayConstant(), effect, control);
+
+    // attach the iterator to this string
+    effect = graph()->NewNode(
+        simplified()->StoreField(AccessBuilder::ForJSStringIteratorString()),
+        value, receiver, effect, control);
+    effect = graph()->NewNode(
+        simplified()->StoreField(AccessBuilder::ForJSStringIteratorIndex()),
+        value, jsgraph()->SmiConstant(0), effect, control);
+
+    value = effect = graph()->NewNode(common()->FinishRegion(), value, effect);
+
+    // replace it
+    ReplaceWithValue(node, value, effect, control);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
 Reduction JSBuiltinReducer::ReduceStringIteratorNext(Node* node) {
   Node* receiver = NodeProperties::GetValueInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
@@ -1090,11 +1690,11 @@
                                     index, if_true0);
 
       // branch1: if ((lead & 0xFC00) === 0xD800)
-      Node* check1 = graph()->NewNode(
-          simplified()->NumberEqual(),
-          graph()->NewNode(simplified()->NumberBitwiseAnd(), lead,
-                           jsgraph()->Int32Constant(0xFC00)),
-          jsgraph()->Int32Constant(0xD800));
+      Node* check1 =
+          graph()->NewNode(simplified()->NumberEqual(),
+                           graph()->NewNode(simplified()->NumberBitwiseAnd(),
+                                            lead, jsgraph()->Constant(0xFC00)),
+                           jsgraph()->Constant(0xD800));
       Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
                                        check1, if_true0);
       Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
@@ -1116,8 +1716,8 @@
           Node* check3 = graph()->NewNode(
               simplified()->NumberEqual(),
               graph()->NewNode(simplified()->NumberBitwiseAnd(), trail,
-                               jsgraph()->Int32Constant(0xFC00)),
-              jsgraph()->Int32Constant(0xDC00));
+                               jsgraph()->Constant(0xFC00)),
+              jsgraph()->Constant(0xDC00));
           Node* branch3 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
                                            check3, if_true2);
           Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
@@ -1128,11 +1728,11 @@
 // Need to swap the order for big-endian platforms
 #if V8_TARGET_BIG_ENDIAN
                 graph()->NewNode(simplified()->NumberShiftLeft(), lead,
-                                 jsgraph()->Int32Constant(16)),
+                                 jsgraph()->Constant(16)),
                 trail);
 #else
                 graph()->NewNode(simplified()->NumberShiftLeft(), trail,
-                                 jsgraph()->Int32Constant(16)),
+                                 jsgraph()->Constant(16)),
                 lead);
 #endif
           }
@@ -1234,6 +1834,14 @@
   // Dispatch according to the BuiltinFunctionId if present.
   if (!r.HasBuiltinFunctionId()) return NoChange();
   switch (r.GetBuiltinFunctionId()) {
+    case kArrayEntries:
+      return ReduceArrayIterator(node, IterationKind::kEntries);
+    case kArrayKeys:
+      return ReduceArrayIterator(node, IterationKind::kKeys);
+    case kArrayValues:
+      return ReduceArrayIterator(node, IterationKind::kValues);
+    case kArrayIteratorNext:
+      return ReduceArrayIteratorNext(node);
     case kArrayPop:
       return ReduceArrayPop(node);
     case kArrayPush:
@@ -1370,6 +1978,8 @@
       return ReduceStringCharAt(node);
     case kStringCharCodeAt:
       return ReduceStringCharCodeAt(node);
+    case kStringIterator:
+      return ReduceStringIterator(node);
     case kStringIteratorNext:
       return ReduceStringIteratorNext(node);
     case kDataViewByteLength:
@@ -1391,6 +2001,12 @@
     case kTypedArrayLength:
       return ReduceArrayBufferViewAccessor(
           node, JS_TYPED_ARRAY_TYPE, AccessBuilder::ForJSTypedArrayLength());
+    case kTypedArrayEntries:
+      return ReduceTypedArrayIterator(node, IterationKind::kEntries);
+    case kTypedArrayKeys:
+      return ReduceTypedArrayIterator(node, IterationKind::kKeys);
+    case kTypedArrayValues:
+      return ReduceTypedArrayIterator(node, IterationKind::kValues);
     default:
       break;
   }
diff --git a/src/compiler/js-builtin-reducer.h b/src/compiler/js-builtin-reducer.h
index 524d006..4af3084 100644
--- a/src/compiler/js-builtin-reducer.h
+++ b/src/compiler/js-builtin-reducer.h
@@ -5,8 +5,10 @@
 #ifndef V8_COMPILER_JS_BUILTIN_REDUCER_H_
 #define V8_COMPILER_JS_BUILTIN_REDUCER_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/base/flags.h"
 #include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -25,7 +27,8 @@
 class SimplifiedOperatorBuilder;
 class TypeCache;
 
-class JSBuiltinReducer final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE JSBuiltinReducer final
+    : public NON_EXPORTED_BASE(AdvancedReducer) {
  public:
   // Flags that control the mode of operation.
   enum Flag {
@@ -35,12 +38,23 @@
   typedef base::Flags<Flag> Flags;
 
   JSBuiltinReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
-                   CompilationDependencies* dependencies);
+                   CompilationDependencies* dependencies,
+                   Handle<Context> native_context);
   ~JSBuiltinReducer() final {}
 
   Reduction Reduce(Node* node) final;
 
  private:
+  Reduction ReduceArrayIterator(Node* node, IterationKind kind);
+  Reduction ReduceTypedArrayIterator(Node* node, IterationKind kind);
+  Reduction ReduceArrayIterator(Handle<Map> receiver_map, Node* node,
+                                IterationKind kind,
+                                ArrayIteratorKind iter_kind);
+  Reduction ReduceArrayIteratorNext(Node* node);
+  Reduction ReduceFastArrayIteratorNext(Handle<Map> iterator_map, Node* node,
+                                        IterationKind kind);
+  Reduction ReduceTypedArrayIteratorNext(Handle<Map> iterator_map, Node* node,
+                                         IterationKind kind);
   Reduction ReduceArrayPop(Node* node);
   Reduction ReduceArrayPush(Node* node);
   Reduction ReduceDateGetTime(Node* node);
@@ -88,6 +102,7 @@
   Reduction ReduceStringCharAt(Node* node);
   Reduction ReduceStringCharCodeAt(Node* node);
   Reduction ReduceStringFromCharCode(Node* node);
+  Reduction ReduceStringIterator(Node* node);
   Reduction ReduceStringIteratorNext(Node* node);
   Reduction ReduceArrayBufferViewAccessor(Node* node,
                                           InstanceType instance_type,
@@ -101,6 +116,7 @@
   Factory* factory() const;
   JSGraph* jsgraph() const { return jsgraph_; }
   Isolate* isolate() const;
+  Handle<Context> native_context() const { return native_context_; }
   CommonOperatorBuilder* common() const;
   SimplifiedOperatorBuilder* simplified() const;
   JSOperatorBuilder* javascript() const;
@@ -109,6 +125,7 @@
   CompilationDependencies* const dependencies_;
   Flags const flags_;
   JSGraph* const jsgraph_;
+  Handle<Context> const native_context_;
   TypeCache const& type_cache_;
 };
 
diff --git a/src/compiler/js-call-reducer.cc b/src/compiler/js-call-reducer.cc
index dd8f064..e48fce9 100644
--- a/src/compiler/js-call-reducer.cc
+++ b/src/compiler/js-call-reducer.cc
@@ -189,12 +189,72 @@
   return reduction.Changed() ? reduction : Changed(node);
 }
 
+namespace {
+
+// TODO(turbofan): Shall we move this to the NodeProperties? Or some (untyped)
+// alias analyzer?
+bool IsSame(Node* a, Node* b) {
+  if (a == b) {
+    return true;
+  } else if (a->opcode() == IrOpcode::kCheckHeapObject) {
+    return IsSame(a->InputAt(0), b);
+  } else if (b->opcode() == IrOpcode::kCheckHeapObject) {
+    return IsSame(a, b->InputAt(0));
+  }
+  return false;
+}
+
+// TODO(turbofan): Share with similar functionality in JSInliningHeuristic
+// and JSNativeContextSpecialization, i.e. move to NodeProperties helper?!
+MaybeHandle<Map> InferReceiverMap(Node* node) {
+  Node* receiver = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  // Check if the {node} is dominated by a CheckMaps with a single map
+  // for the {receiver}, and if so use that map for the lowering below.
+  for (Node* dominator = effect;;) {
+    if (dominator->opcode() == IrOpcode::kCheckMaps &&
+        IsSame(dominator->InputAt(0), receiver)) {
+      if (dominator->op()->ValueInputCount() == 2) {
+        HeapObjectMatcher m(dominator->InputAt(1));
+        if (m.HasValue()) return Handle<Map>::cast(m.Value());
+      }
+      return MaybeHandle<Map>();
+    }
+    if (dominator->op()->EffectInputCount() != 1) {
+      // Didn't find any appropriate CheckMaps node.
+      return MaybeHandle<Map>();
+    }
+    dominator = NodeProperties::GetEffectInput(dominator);
+  }
+}
+
+}  // namespace
+
+// ES6 section B.2.2.1.1 get Object.prototype.__proto__
+Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+
+  // Try to determine the {receiver} map.
+  Handle<Map> receiver_map;
+  if (InferReceiverMap(node).ToHandle(&receiver_map)) {
+    // Check if we can constant-fold the {receiver} map.
+    if (!receiver_map->IsJSProxyMap() &&
+        !receiver_map->has_hidden_prototype() &&
+        !receiver_map->is_access_check_needed()) {
+      Handle<Object> receiver_prototype(receiver_map->prototype(), isolate());
+      Node* value = jsgraph()->Constant(receiver_prototype);
+      ReplaceWithValue(node, value);
+      return Replace(value);
+    }
+  }
+
+  return NoChange();
+}
 
 Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
   DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
   CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
   Node* target = NodeProperties::GetValueInput(node, 0);
-  Node* context = NodeProperties::GetContextInput(node);
   Node* control = NodeProperties::GetControlInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
 
@@ -215,26 +275,23 @@
       }
 
       // Check for known builtin functions.
-      if (shared->HasBuiltinFunctionId()) {
-        switch (shared->builtin_function_id()) {
-          case kFunctionApply:
-            return ReduceFunctionPrototypeApply(node);
-          case kFunctionCall:
-            return ReduceFunctionPrototypeCall(node);
-          default:
-            break;
-        }
+      switch (shared->code()->builtin_index()) {
+        case Builtins::kFunctionPrototypeApply:
+          return ReduceFunctionPrototypeApply(node);
+        case Builtins::kFunctionPrototypeCall:
+          return ReduceFunctionPrototypeCall(node);
+        case Builtins::kNumberConstructor:
+          return ReduceNumberConstructor(node);
+        case Builtins::kObjectPrototypeGetProto:
+          return ReduceObjectPrototypeGetProto(node);
+        default:
+          break;
       }
 
       // Check for the Array constructor.
       if (*function == function->native_context()->array_function()) {
         return ReduceArrayConstructor(node);
       }
-
-      // Check for the Number constructor.
-      if (*function == function->native_context()->number_function()) {
-        return ReduceNumberConstructor(node);
-      }
     } else if (m.Value()->IsJSBoundFunction()) {
       Handle<JSBoundFunction> function =
           Handle<JSBoundFunction>::cast(m.Value());
@@ -298,19 +355,8 @@
   Handle<Object> feedback(nexus.GetFeedback(), isolate());
   if (feedback->IsAllocationSite()) {
     // Retrieve the Array function from the {node}.
-    Node* array_function;
-    Handle<Context> native_context;
-    if (GetNativeContext(node).ToHandle(&native_context)) {
-      array_function = jsgraph()->HeapConstant(
-          handle(native_context->array_function(), isolate()));
-    } else {
-      Node* native_context = effect = graph()->NewNode(
-          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-          context, context, effect);
-      array_function = effect = graph()->NewNode(
-          javascript()->LoadContext(0, Context::ARRAY_FUNCTION_INDEX, true),
-          native_context, native_context, effect);
-    }
+    Node* array_function = jsgraph()->HeapConstant(
+        handle(native_context()->array_function(), isolate()));
 
     // Check that the {target} is still the {array_function}.
     Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
@@ -353,7 +399,6 @@
   int const arity = static_cast<int>(p.arity() - 2);
   Node* target = NodeProperties::GetValueInput(node, 0);
   Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
-  Node* context = NodeProperties::GetContextInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
@@ -413,19 +458,8 @@
     Handle<AllocationSite> site = Handle<AllocationSite>::cast(feedback);
 
     // Retrieve the Array function from the {node}.
-    Node* array_function;
-    Handle<Context> native_context;
-    if (GetNativeContext(node).ToHandle(&native_context)) {
-      array_function = jsgraph()->HeapConstant(
-          handle(native_context->array_function(), isolate()));
-    } else {
-      Node* native_context = effect = graph()->NewNode(
-          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-          context, context, effect);
-      array_function = effect = graph()->NewNode(
-          javascript()->LoadContext(0, Context::ARRAY_FUNCTION_INDEX, true),
-          native_context, native_context, effect);
-    }
+    Node* array_function = jsgraph()->HeapConstant(
+        handle(native_context()->array_function(), isolate()));
 
     // Check that the {target} is still the {array_function}.
     Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
@@ -469,25 +503,14 @@
   return NoChange();
 }
 
-
-MaybeHandle<Context> JSCallReducer::GetNativeContext(Node* node) {
-  Node* const context = NodeProperties::GetContextInput(node);
-  return NodeProperties::GetSpecializationNativeContext(context,
-                                                        native_context());
-}
-
-
 Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
 
-
 Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
 
-
 CommonOperatorBuilder* JSCallReducer::common() const {
   return jsgraph()->common();
 }
 
-
 JSOperatorBuilder* JSCallReducer::javascript() const {
   return jsgraph()->javascript();
 }
diff --git a/src/compiler/js-call-reducer.h b/src/compiler/js-call-reducer.h
index 0c3835c..81153f9 100644
--- a/src/compiler/js-call-reducer.h
+++ b/src/compiler/js-call-reducer.h
@@ -31,7 +31,7 @@
   typedef base::Flags<Flag> Flags;
 
   JSCallReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
-                MaybeHandle<Context> native_context)
+                Handle<Context> native_context)
       : AdvancedReducer(editor),
         jsgraph_(jsgraph),
         flags_(flags),
@@ -44,23 +44,22 @@
   Reduction ReduceNumberConstructor(Node* node);
   Reduction ReduceFunctionPrototypeApply(Node* node);
   Reduction ReduceFunctionPrototypeCall(Node* node);
+  Reduction ReduceObjectPrototypeGetProto(Node* node);
   Reduction ReduceJSCallConstruct(Node* node);
   Reduction ReduceJSCallFunction(Node* node);
 
-  MaybeHandle<Context> GetNativeContext(Node* node);
-
   Graph* graph() const;
   Flags flags() const { return flags_; }
   JSGraph* jsgraph() const { return jsgraph_; }
   Isolate* isolate() const;
-  MaybeHandle<Context> native_context() const { return native_context_; }
+  Handle<Context> native_context() const { return native_context_; }
   CommonOperatorBuilder* common() const;
   JSOperatorBuilder* javascript() const;
   SimplifiedOperatorBuilder* simplified() const;
 
   JSGraph* const jsgraph_;
   Flags const flags_;
-  MaybeHandle<Context> const native_context_;
+  Handle<Context> const native_context_;
 };
 
 DEFINE_OPERATORS_FOR_FLAGS(JSCallReducer::Flags)
diff --git a/src/compiler/js-create-lowering.cc b/src/compiler/js-create-lowering.cc
index b68bb70..c54b76b 100644
--- a/src/compiler/js-create-lowering.cc
+++ b/src/compiler/js-create-lowering.cc
@@ -36,12 +36,16 @@
         control_(control) {}
 
   // Primitive allocation of static size.
-  void Allocate(int size, PretenureFlag pretenure = NOT_TENURED) {
+  void Allocate(int size, PretenureFlag pretenure = NOT_TENURED,
+                Type* type = Type::Any()) {
     effect_ = graph()->NewNode(
         common()->BeginRegion(RegionObservability::kNotObservable), effect_);
     allocation_ =
         graph()->NewNode(simplified()->Allocate(pretenure),
                          jsgraph()->Constant(size), effect_, control_);
+    // TODO(turbofan): Maybe we should put the Type* onto the Allocate operator
+    // at some point, or maybe we should have a completely differnt story.
+    NodeProperties::SetType(allocation_, type);
     effect_ = allocation_;
   }
 
@@ -65,7 +69,7 @@
     int size = (map->instance_type() == FIXED_ARRAY_TYPE)
                    ? FixedArray::SizeFor(length)
                    : FixedDoubleArray::SizeFor(length);
-    Allocate(size, pretenure);
+    Allocate(size, pretenure, Type::OtherInternal());
     Store(AccessBuilder::ForMap(), map);
     Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
   }
@@ -206,6 +210,8 @@
       return ReduceJSCreateClosure(node);
     case IrOpcode::kJSCreateIterResultObject:
       return ReduceJSCreateIterResultObject(node);
+    case IrOpcode::kJSCreateKeyValueArray:
+      return ReduceJSCreateKeyValueArray(node);
     case IrOpcode::kJSCreateLiteralArray:
     case IrOpcode::kJSCreateLiteralObject:
       return ReduceJSCreateLiteral(node);
@@ -231,13 +237,12 @@
   Type* const new_target_type = NodeProperties::GetType(new_target);
   Node* const effect = NodeProperties::GetEffectInput(node);
   // Extract constructor and original constructor function.
-  if (target_type->IsConstant() &&
-      new_target_type->IsConstant() &&
-      new_target_type->AsConstant()->Value()->IsJSFunction()) {
+  if (target_type->IsHeapConstant() && new_target_type->IsHeapConstant() &&
+      new_target_type->AsHeapConstant()->Value()->IsJSFunction()) {
     Handle<JSFunction> constructor =
-        Handle<JSFunction>::cast(target_type->AsConstant()->Value());
+        Handle<JSFunction>::cast(target_type->AsHeapConstant()->Value());
     Handle<JSFunction> original_constructor =
-        Handle<JSFunction>::cast(new_target_type->AsConstant()->Value());
+        Handle<JSFunction>::cast(new_target_type->AsHeapConstant()->Value());
     DCHECK(constructor->IsConstructor());
     DCHECK(original_constructor->IsConstructor());
 
@@ -354,22 +359,18 @@
       Node* const elements = AllocateAliasedArguments(
           effect, control, args_state, context, shared, &has_aliased_arguments);
       effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
-      // Load the arguments object map from the current native context.
-      Node* const load_native_context = effect = graph()->NewNode(
-          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-          context, context, effect);
-      Node* const load_arguments_map = effect = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForContextSlot(
-              has_aliased_arguments ? Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX
-                                    : Context::SLOPPY_ARGUMENTS_MAP_INDEX)),
-          load_native_context, effect, control);
+      // Load the arguments object map.
+      Node* const arguments_map = jsgraph()->HeapConstant(handle(
+          has_aliased_arguments ? native_context()->fast_aliased_arguments_map()
+                                : native_context()->sloppy_arguments_map(),
+          isolate()));
       // Actually allocate and initialize the arguments object.
       AllocationBuilder a(jsgraph(), effect, control);
       Node* properties = jsgraph()->EmptyFixedArrayConstant();
       int length = args_state_info.parameter_count() - 1;  // Minus receiver.
       STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kPointerSize);
       a.Allocate(JSSloppyArgumentsObject::kSize);
-      a.Store(AccessBuilder::ForMap(), load_arguments_map);
+      a.Store(AccessBuilder::ForMap(), arguments_map);
       a.Store(AccessBuilder::ForJSObjectProperties(), properties);
       a.Store(AccessBuilder::ForJSObjectElements(), elements);
       a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
@@ -380,7 +381,6 @@
     } else if (type == CreateArgumentsType::kUnmappedArguments) {
       // Use inline allocation for all unmapped arguments objects within inlined
       // (i.e. non-outermost) frames, independent of the object size.
-      Node* const context = NodeProperties::GetContextInput(node);
       Node* effect = NodeProperties::GetEffectInput(node);
       // Choose the correct frame state and frame state info depending on
       // whether there conceptually is an arguments adaptor frame in the call
@@ -390,21 +390,16 @@
       // Prepare element backing store to be used by arguments object.
       Node* const elements = AllocateArguments(effect, control, args_state);
       effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
-      // Load the arguments object map from the current native context.
-      Node* const load_native_context = effect = graph()->NewNode(
-          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-          context, context, effect);
-      Node* const load_arguments_map = effect = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForContextSlot(
-              Context::STRICT_ARGUMENTS_MAP_INDEX)),
-          load_native_context, effect, control);
+      // Load the arguments object map.
+      Node* const arguments_map = jsgraph()->HeapConstant(
+          handle(native_context()->strict_arguments_map(), isolate()));
       // Actually allocate and initialize the arguments object.
       AllocationBuilder a(jsgraph(), effect, control);
       Node* properties = jsgraph()->EmptyFixedArrayConstant();
       int length = args_state_info.parameter_count() - 1;  // Minus receiver.
       STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
       a.Allocate(JSStrictArgumentsObject::kSize);
-      a.Store(AccessBuilder::ForMap(), load_arguments_map);
+      a.Store(AccessBuilder::ForMap(), arguments_map);
       a.Store(AccessBuilder::ForJSObjectProperties(), properties);
       a.Store(AccessBuilder::ForJSObjectElements(), elements);
       a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
@@ -417,7 +412,6 @@
       int start_index = shared->internal_formal_parameter_count();
       // Use inline allocation for all unmapped arguments objects within inlined
       // (i.e. non-outermost) frames, independent of the object size.
-      Node* const context = NodeProperties::GetContextInput(node);
       Node* effect = NodeProperties::GetEffectInput(node);
       // Choose the correct frame state and frame state info depending on
       // whether there conceptually is an arguments adaptor frame in the call
@@ -428,14 +422,9 @@
       Node* const elements =
           AllocateRestArguments(effect, control, args_state, start_index);
       effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
-      // Load the JSArray object map from the current native context.
-      Node* const load_native_context = effect = graph()->NewNode(
-          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-          context, context, effect);
-      Node* const load_jsarray_map = effect = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForContextSlot(
-              Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX)),
-          load_native_context, effect, control);
+      // Load the JSArray object map.
+      Node* const jsarray_map = jsgraph()->HeapConstant(handle(
+          native_context()->js_array_fast_elements_map_index(), isolate()));
       // Actually allocate and initialize the jsarray.
       AllocationBuilder a(jsgraph(), effect, control);
       Node* properties = jsgraph()->EmptyFixedArrayConstant();
@@ -445,7 +434,7 @@
       int length = std::max(0, argument_count - start_index);
       STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
       a.Allocate(JSArray::kSize);
-      a.Store(AccessBuilder::ForMap(), load_jsarray_map);
+      a.Store(AccessBuilder::ForMap(), jsarray_map);
       a.Store(AccessBuilder::ForJSObjectProperties(), properties);
       a.Store(AccessBuilder::ForJSObjectElements(), elements);
       a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS),
@@ -463,7 +452,6 @@
                                            int capacity,
                                            Handle<AllocationSite> site) {
   DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
-  Node* context = NodeProperties::GetContextInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
@@ -479,13 +467,10 @@
   dependencies()->AssumeTenuringDecision(site);
   dependencies()->AssumeTransitionStable(site);
 
-  // Retrieve the initial map for the array from the appropriate native context.
-  Node* native_context = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-      context, context, effect);
-  Node* js_array_map = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::ArrayMapIndex(elements_kind), true),
-      native_context, native_context, effect);
+  // Retrieve the initial map for the array.
+  int const array_map_index = Context::ArrayMapIndex(elements_kind);
+  Node* js_array_map = jsgraph()->HeapConstant(
+      handle(Map::cast(native_context()->get(array_map_index)), isolate()));
 
   // Setup elements and properties.
   Node* elements;
@@ -528,7 +513,7 @@
         CallDescriptor::kNeedsFrameState);
     node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
     node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
-    node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(0));
+    node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(0));
     node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
     NodeProperties::ChangeOp(node, common()->Call(desc));
     return Changed(node);
@@ -546,7 +531,7 @@
           CallDescriptor::kNeedsFrameState);
       node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
       node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
-      node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(1));
+      node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(1));
       node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
       NodeProperties::ChangeOp(node, common()->Call(desc));
       return Changed(node);
@@ -577,7 +562,7 @@
       Node* inputs[] = {jsgraph()->HeapConstant(stub.GetCode()),
                         node->InputAt(1),
                         jsgraph()->HeapConstant(site),
-                        jsgraph()->Int32Constant(1),
+                        jsgraph()->Constant(1),
                         jsgraph()->UndefinedConstant(),
                         length,
                         context,
@@ -601,7 +586,7 @@
       Node* inputs[] = {jsgraph()->HeapConstant(stub.GetCode()),
                         node->InputAt(1),
                         jsgraph()->HeapConstant(site),
-                        jsgraph()->Int32Constant(1),
+                        jsgraph()->Constant(1),
                         jsgraph()->UndefinedConstant(),
                         length,
                         context,
@@ -632,7 +617,7 @@
       CallDescriptor::kNeedsFrameState);
   node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
   node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
-  node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+  node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
   node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
   NodeProperties::ChangeOp(node, common()->Call(desc));
   return Changed(node);
@@ -685,18 +670,14 @@
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
   Node* context = NodeProperties::GetContextInput(node);
-  Node* native_context = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-      context, context, effect);
-  int function_map_index =
+  int const function_map_index =
       Context::FunctionMapIndex(shared->language_mode(), shared->kind());
-  Node* function_map = effect =
-      graph()->NewNode(javascript()->LoadContext(0, function_map_index, true),
-                       native_context, native_context, effect);
+  Node* function_map = jsgraph()->HeapConstant(
+      handle(Map::cast(native_context()->get(function_map_index)), isolate()));
   // Note that it is only safe to embed the raw entry point of the compile
   // lazy stub into the code, because that stub is immortal and immovable.
-  Node* compile_entry = jsgraph()->IntPtrConstant(reinterpret_cast<intptr_t>(
-      jsgraph()->isolate()->builtins()->CompileLazy()->entry()));
+  Node* compile_entry = jsgraph()->PointerConstant(
+      jsgraph()->isolate()->builtins()->CompileLazy()->entry());
   Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
   Node* empty_literals_array = jsgraph()->EmptyLiteralsArrayConstant();
   Node* the_hole = jsgraph()->TheHoleConstant();
@@ -724,23 +705,8 @@
   Node* done = NodeProperties::GetValueInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
 
-  Node* iterator_result_map;
-  Handle<Context> native_context;
-  if (GetSpecializationNativeContext(node).ToHandle(&native_context)) {
-    // Specialize to the constant JSIteratorResult map to enable map check
-    // elimination to eliminate subsequent checks in case of inlining.
-    iterator_result_map = jsgraph()->HeapConstant(
-        handle(native_context->iterator_result_map(), isolate()));
-  } else {
-    // Load the JSIteratorResult map for the {context}.
-    Node* context = NodeProperties::GetContextInput(node);
-    Node* native_context = effect = graph()->NewNode(
-        javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-        context, context, effect);
-    iterator_result_map = effect = graph()->NewNode(
-        javascript()->LoadContext(0, Context::ITERATOR_RESULT_MAP_INDEX, true),
-        native_context, native_context, effect);
-  }
+  Node* iterator_result_map = jsgraph()->HeapConstant(
+      handle(native_context()->iterator_result_map(), isolate()));
 
   // Emit code to allocate the JSIteratorResult instance.
   AllocationBuilder a(jsgraph(), effect, graph()->start());
@@ -757,6 +723,36 @@
   return Changed(node);
 }
 
+Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCreateKeyValueArray, node->opcode());
+  Node* key = NodeProperties::GetValueInput(node, 0);
+  Node* value = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+
+  Node* array_map = jsgraph()->HeapConstant(
+      handle(native_context()->js_array_fast_elements_map_index()));
+  Node* properties = jsgraph()->EmptyFixedArrayConstant();
+  Node* length = jsgraph()->Constant(2);
+
+  AllocationBuilder aa(jsgraph(), effect, graph()->start());
+  aa.AllocateArray(2, factory()->fixed_array_map());
+  aa.Store(AccessBuilder::ForFixedArrayElement(FAST_ELEMENTS),
+           jsgraph()->Constant(0), key);
+  aa.Store(AccessBuilder::ForFixedArrayElement(FAST_ELEMENTS),
+           jsgraph()->Constant(1), value);
+  Node* elements = aa.Finish();
+
+  AllocationBuilder a(jsgraph(), elements, graph()->start());
+  a.Allocate(JSArray::kSize);
+  a.Store(AccessBuilder::ForMap(), array_map);
+  a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+  a.Store(AccessBuilder::ForJSObjectElements(), elements);
+  a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), length);
+  STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+  a.FinishAndChange(node);
+  return Changed(node);
+}
+
 Reduction JSCreateLowering::ReduceJSCreateLiteral(Node* node) {
   DCHECK(node->opcode() == IrOpcode::kJSCreateLiteralArray ||
          node->opcode() == IrOpcode::kJSCreateLiteralObject);
@@ -799,9 +795,6 @@
     Node* control = NodeProperties::GetControlInput(node);
     Node* context = NodeProperties::GetContextInput(node);
     Node* extension = jsgraph()->TheHoleConstant();
-    Node* native_context = effect = graph()->NewNode(
-        javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-        context, context, effect);
     AllocationBuilder a(jsgraph(), effect, control);
     STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
     int context_length = slot_count + Context::MIN_CONTEXT_SLOTS;
@@ -810,7 +803,7 @@
     a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
     a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
     a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
-            native_context);
+            jsgraph()->HeapConstant(native_context()));
     for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
       a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
     }
@@ -830,9 +823,6 @@
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
   Node* context = NodeProperties::GetContextInput(node);
-  Node* native_context = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-      context, context, effect);
 
   AllocationBuilder aa(jsgraph(), effect, control);
   aa.Allocate(ContextExtension::kSize);
@@ -848,7 +838,7 @@
   a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
   a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
   a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
-          native_context);
+          jsgraph()->HeapConstant(native_context()));
   RelaxControls(node);
   a.FinishAndChange(node);
   return Changed(node);
@@ -863,9 +853,6 @@
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
   Node* context = NodeProperties::GetContextInput(node);
-  Node* native_context = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-      context, context, effect);
 
   AllocationBuilder aa(jsgraph(), effect, control);
   aa.Allocate(ContextExtension::kSize);
@@ -884,7 +871,7 @@
   a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
   a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
   a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
-          native_context);
+          jsgraph()->HeapConstant(native_context()));
   a.Store(AccessBuilder::ForContextSlot(Context::THROWN_OBJECT_INDEX),
           exception);
   RelaxControls(node);
@@ -905,9 +892,7 @@
     Node* control = NodeProperties::GetControlInput(node);
     Node* context = NodeProperties::GetContextInput(node);
     Node* extension = jsgraph()->Constant(scope_info);
-    Node* native_context = effect = graph()->NewNode(
-        javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-        context, context, effect);
+
     AllocationBuilder a(jsgraph(), effect, control);
     STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
     a.AllocateArray(context_length, factory()->block_context_map());
@@ -915,7 +900,7 @@
     a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
     a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
     a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
-            native_context);
+            jsgraph()->HeapConstant(native_context()));
     for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
       a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
     }
@@ -1160,7 +1145,8 @@
 
   // Actually allocate and initialize the object.
   AllocationBuilder builder(jsgraph(), effect, control);
-  builder.Allocate(boilerplate_map->instance_size(), pretenure);
+  builder.Allocate(boilerplate_map->instance_size(), pretenure,
+                   Type::OtherObject());
   builder.Store(AccessBuilder::ForMap(), boilerplate_map);
   builder.Store(AccessBuilder::ForJSObjectProperties(), properties);
   builder.Store(AccessBuilder::ForJSObjectElements(), elements);
@@ -1225,7 +1211,7 @@
     Handle<FixedArray> elements =
         Handle<FixedArray>::cast(boilerplate_elements);
     for (int i = 0; i < elements_length; ++i) {
-      if (elements->is_the_hole(i)) {
+      if (elements->is_the_hole(isolate(), i)) {
         elements_values[i] = jsgraph()->TheHoleConstant();
       } else {
         Handle<Object> element_value(elements->get(i), isolate());
@@ -1280,13 +1266,6 @@
   return MaybeHandle<LiteralsArray>();
 }
 
-MaybeHandle<Context> JSCreateLowering::GetSpecializationNativeContext(
-    Node* node) {
-  Node* const context = NodeProperties::GetContextInput(node);
-  return NodeProperties::GetSpecializationNativeContext(context,
-                                                        native_context_);
-}
-
 Factory* JSCreateLowering::factory() const { return isolate()->factory(); }
 
 Graph* JSCreateLowering::graph() const { return jsgraph()->graph(); }
diff --git a/src/compiler/js-create-lowering.h b/src/compiler/js-create-lowering.h
index 6248ca2..b5390f1 100644
--- a/src/compiler/js-create-lowering.h
+++ b/src/compiler/js-create-lowering.h
@@ -5,7 +5,9 @@
 #ifndef V8_COMPILER_JS_CREATE_LOWERING_H_
 #define V8_COMPILER_JS_CREATE_LOWERING_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -27,11 +29,12 @@
 
 
 // Lowers JSCreate-level operators to fast (inline) allocations.
-class JSCreateLowering final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE JSCreateLowering final
+    : public NON_EXPORTED_BASE(AdvancedReducer) {
  public:
   JSCreateLowering(Editor* editor, CompilationDependencies* dependencies,
                    JSGraph* jsgraph, MaybeHandle<LiteralsArray> literals_array,
-                   MaybeHandle<Context> native_context, Zone* zone)
+                   Handle<Context> native_context, Zone* zone)
       : AdvancedReducer(editor),
         dependencies_(dependencies),
         jsgraph_(jsgraph),
@@ -48,6 +51,7 @@
   Reduction ReduceJSCreateArray(Node* node);
   Reduction ReduceJSCreateClosure(Node* node);
   Reduction ReduceJSCreateIterResultObject(Node* node);
+  Reduction ReduceJSCreateKeyValueArray(Node* node);
   Reduction ReduceJSCreateLiteral(Node* node);
   Reduction ReduceJSCreateFunctionContext(Node* node);
   Reduction ReduceJSCreateWithContext(Node* node);
@@ -77,13 +81,12 @@
 
   // Infers the LiteralsArray to use for a given {node}.
   MaybeHandle<LiteralsArray> GetSpecializationLiterals(Node* node);
-  // Infers the native context to use for a given {node}.
-  MaybeHandle<Context> GetSpecializationNativeContext(Node* node);
 
   Factory* factory() const;
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
   Isolate* isolate() const;
+  Handle<Context> native_context() const { return native_context_; }
   JSOperatorBuilder* javascript() const;
   CommonOperatorBuilder* common() const;
   SimplifiedOperatorBuilder* simplified() const;
@@ -94,7 +97,7 @@
   CompilationDependencies* const dependencies_;
   JSGraph* const jsgraph_;
   MaybeHandle<LiteralsArray> const literals_array_;
-  MaybeHandle<Context> const native_context_;
+  Handle<Context> const native_context_;
   Zone* const zone_;
 };
 
diff --git a/src/compiler/js-frame-specialization.cc b/src/compiler/js-frame-specialization.cc
index 769d615..55ec1bf 100644
--- a/src/compiler/js-frame-specialization.cc
+++ b/src/compiler/js-frame-specialization.cc
@@ -16,6 +16,8 @@
   switch (node->opcode()) {
     case IrOpcode::kOsrValue:
       return ReduceOsrValue(node);
+    case IrOpcode::kOsrGuard:
+      return ReduceOsrGuard(node);
     case IrOpcode::kParameter:
       return ReduceParameter(node);
     default:
@@ -24,11 +26,10 @@
   return NoChange();
 }
 
-
 Reduction JSFrameSpecialization::ReduceOsrValue(Node* node) {
   DCHECK_EQ(IrOpcode::kOsrValue, node->opcode());
   Handle<Object> value;
-  int const index = OpParameter<int>(node);
+  int index = OsrValueIndexOf(node->op());
   int const parameters_count = frame()->ComputeParametersCount() + 1;
   if (index == Linkage::kOsrContextSpillSlotIndex) {
     value = handle(frame()->context(), isolate());
@@ -43,6 +44,12 @@
   return Replace(jsgraph()->Constant(value));
 }
 
+Reduction JSFrameSpecialization::ReduceOsrGuard(Node* node) {
+  DCHECK_EQ(IrOpcode::kOsrGuard, node->opcode());
+  ReplaceWithValue(node, node->InputAt(0),
+                   NodeProperties::GetEffectInput(node));
+  return Changed(node);
+}
 
 Reduction JSFrameSpecialization::ReduceParameter(Node* node) {
   DCHECK_EQ(IrOpcode::kParameter, node->opcode());
diff --git a/src/compiler/js-frame-specialization.h b/src/compiler/js-frame-specialization.h
index 90b3ca5..daf6992 100644
--- a/src/compiler/js-frame-specialization.h
+++ b/src/compiler/js-frame-specialization.h
@@ -18,17 +18,18 @@
 // Forward declarations.
 class JSGraph;
 
-
-class JSFrameSpecialization final : public Reducer {
+class JSFrameSpecialization final : public AdvancedReducer {
  public:
-  JSFrameSpecialization(JavaScriptFrame const* frame, JSGraph* jsgraph)
-      : frame_(frame), jsgraph_(jsgraph) {}
+  JSFrameSpecialization(Editor* editor, JavaScriptFrame const* frame,
+                        JSGraph* jsgraph)
+      : AdvancedReducer(editor), frame_(frame), jsgraph_(jsgraph) {}
   ~JSFrameSpecialization() final {}
 
   Reduction Reduce(Node* node) final;
 
  private:
   Reduction ReduceOsrValue(Node* node);
+  Reduction ReduceOsrGuard(Node* node);
   Reduction ReduceParameter(Node* node);
 
   Isolate* isolate() const;
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
index 22d6c86..250a9c2 100644
--- a/src/compiler/js-generic-lowering.cc
+++ b/src/compiler/js-generic-lowering.cc
@@ -116,6 +116,8 @@
 }
 
 void JSGenericLowering::LowerJSStrictEqual(Node* node) {
+  // The === operator doesn't need the current context.
+  NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
   Callable callable = CodeFactory::StrictEqual(isolate());
   node->RemoveInput(4);  // control
   ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags,
@@ -123,6 +125,8 @@
 }
 
 void JSGenericLowering::LowerJSStrictNotEqual(Node* node) {
+  // The !== operator doesn't need the current context.
+  NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
   Callable callable = CodeFactory::StrictNotEqual(isolate());
   node->RemoveInput(4);  // control
   ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags,
@@ -130,6 +134,8 @@
 }
 
 void JSGenericLowering::LowerJSToBoolean(Node* node) {
+  // The ToBoolean conversion doesn't need the current context.
+  NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
   Callable callable = CodeFactory::ToBoolean(isolate());
   node->AppendInput(zone(), graph()->start());
   ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
@@ -137,6 +143,8 @@
 }
 
 void JSGenericLowering::LowerJSTypeOf(Node* node) {
+  // The typeof operator doesn't need the current context.
+  NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
   Callable callable = CodeFactory::Typeof(isolate());
   node->AppendInput(zone(), graph()->start());
   ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
@@ -460,6 +468,9 @@
   ReplaceWithRuntimeCall(node, Runtime::kCreateIterResultObject);
 }
 
+void JSGenericLowering::LowerJSCreateKeyValueArray(Node* node) {
+  ReplaceWithRuntimeCall(node, Runtime::kCreateKeyValueArray);
+}
 
 void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
   CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
@@ -622,6 +633,14 @@
   NodeProperties::ChangeOp(node, machine()->Store(representation));
 }
 
+void JSGenericLowering::LowerJSLoadModule(Node* node) {
+  UNREACHABLE();  // Eliminated in typed lowering.
+}
+
+void JSGenericLowering::LowerJSStoreModule(Node* node) {
+  UNREACHABLE();  // Eliminated in typed lowering.
+}
+
 void JSGenericLowering::LowerJSGeneratorStore(Node* node) {
   UNREACHABLE();  // Eliminated in typed lowering.
 }
diff --git a/src/compiler/js-global-object-specialization.cc b/src/compiler/js-global-object-specialization.cc
index 10130f4..e9ff060 100644
--- a/src/compiler/js-global-object-specialization.cc
+++ b/src/compiler/js-global-object-specialization.cc
@@ -25,17 +25,15 @@
   int index;
 };
 
-
 JSGlobalObjectSpecialization::JSGlobalObjectSpecialization(
-    Editor* editor, JSGraph* jsgraph,
-    MaybeHandle<Context> native_context, CompilationDependencies* dependencies)
+    Editor* editor, JSGraph* jsgraph, Handle<JSGlobalObject> global_object,
+    CompilationDependencies* dependencies)
     : AdvancedReducer(editor),
       jsgraph_(jsgraph),
-      native_context_(native_context),
+      global_object_(global_object),
       dependencies_(dependencies),
       type_cache_(TypeCache::Get()) {}
 
-
 Reduction JSGlobalObjectSpecialization::Reduce(Node* node) {
   switch (node->opcode()) {
     case IrOpcode::kJSLoadGlobal:
@@ -71,14 +69,10 @@
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
-  // Retrieve the global object from the given {node}.
-  Handle<JSGlobalObject> global_object;
-  if (!GetGlobalObject(node).ToHandle(&global_object)) return NoChange();
-
   // Try to lookup the name on the script context table first (lexical scoping).
   ScriptContextTableLookupResult result;
-  if (LookupInScriptContextTable(global_object, name, &result)) {
-    if (result.context->is_the_hole(result.index)) return NoChange();
+  if (LookupInScriptContextTable(name, &result)) {
+    if (result.context->is_the_hole(isolate(), result.index)) return NoChange();
     Node* context = jsgraph()->HeapConstant(result.context);
     Node* value = effect = graph()->NewNode(
         javascript()->LoadContext(0, result.index, result.immutable), context,
@@ -89,7 +83,7 @@
 
   // Lookup on the global object instead.  We only deal with own data
   // properties of the global object here (represented as PropertyCell).
-  LookupIterator it(global_object, name, LookupIterator::OWN);
+  LookupIterator it(global_object(), name, LookupIterator::OWN);
   if (it.state() != LookupIterator::DATA) return NoChange();
   if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
   Handle<PropertyCell> property_cell = it.GetPropertyCell();
@@ -126,12 +120,10 @@
   if (property_details.cell_type() == PropertyCellType::kConstantType) {
     // Compute proper type based on the current value in the cell.
     if (property_cell_value->IsSmi()) {
-      property_cell_value_type = type_cache_.kSmi;
+      property_cell_value_type = Type::SignedSmall();
       representation = MachineRepresentation::kTaggedSigned;
     } else if (property_cell_value->IsNumber()) {
-      // TODO(mvstanton): Remove kHeapNumber from type cache, it's just
-      // Type::Number().
-      property_cell_value_type = type_cache_.kHeapNumber;
+      property_cell_value_type = Type::Number();
       representation = MachineRepresentation::kTaggedPointer;
     } else {
       // TODO(turbofan): Track the property_cell_value_map on the FieldAccess
@@ -158,14 +150,10 @@
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
-  // Retrieve the global object from the given {node}.
-  Handle<JSGlobalObject> global_object;
-  if (!GetGlobalObject(node).ToHandle(&global_object)) return NoChange();
-
   // Try to lookup the name on the script context table first (lexical scoping).
   ScriptContextTableLookupResult result;
-  if (LookupInScriptContextTable(global_object, name, &result)) {
-    if (result.context->is_the_hole(result.index)) return NoChange();
+  if (LookupInScriptContextTable(name, &result)) {
+    if (result.context->is_the_hole(isolate(), result.index)) return NoChange();
     if (result.immutable) return NoChange();
     Node* context = jsgraph()->HeapConstant(result.context);
     effect = graph()->NewNode(javascript()->StoreContext(0, result.index),
@@ -176,7 +164,7 @@
 
   // Lookup on the global object instead.  We only deal with own data
   // properties of the global object here (represented as PropertyCell).
-  LookupIterator it(global_object, name, LookupIterator::OWN);
+  LookupIterator it(global_object(), name, LookupIterator::OWN);
   if (it.state() != LookupIterator::DATA) return NoChange();
   if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
   Handle<PropertyCell> property_cell = it.GetPropertyCell();
@@ -256,21 +244,11 @@
   return Replace(value);
 }
 
-
-MaybeHandle<JSGlobalObject> JSGlobalObjectSpecialization::GetGlobalObject(
-    Node* node) {
-  Node* const context = NodeProperties::GetContextInput(node);
-  return NodeProperties::GetSpecializationGlobalObject(context,
-                                                       native_context());
-}
-
-
 bool JSGlobalObjectSpecialization::LookupInScriptContextTable(
-    Handle<JSGlobalObject> global_object, Handle<Name> name,
-    ScriptContextTableLookupResult* result) {
+    Handle<Name> name, ScriptContextTableLookupResult* result) {
   if (!name->IsString()) return false;
   Handle<ScriptContextTable> script_context_table(
-      global_object->native_context()->script_context_table(), isolate());
+      global_object()->native_context()->script_context_table(), isolate());
   ScriptContextTable::LookupResult lookup_result;
   if (!ScriptContextTable::Lookup(script_context_table,
                                   Handle<String>::cast(name), &lookup_result)) {
@@ -284,27 +262,22 @@
   return true;
 }
 
-
 Graph* JSGlobalObjectSpecialization::graph() const {
   return jsgraph()->graph();
 }
 
-
 Isolate* JSGlobalObjectSpecialization::isolate() const {
   return jsgraph()->isolate();
 }
 
-
 CommonOperatorBuilder* JSGlobalObjectSpecialization::common() const {
   return jsgraph()->common();
 }
 
-
 JSOperatorBuilder* JSGlobalObjectSpecialization::javascript() const {
   return jsgraph()->javascript();
 }
 
-
 SimplifiedOperatorBuilder* JSGlobalObjectSpecialization::simplified() const {
   return jsgraph()->simplified();
 }
diff --git a/src/compiler/js-global-object-specialization.h b/src/compiler/js-global-object-specialization.h
index a6c511e..50bdd80 100644
--- a/src/compiler/js-global-object-specialization.h
+++ b/src/compiler/js-global-object-specialization.h
@@ -28,7 +28,7 @@
 class JSGlobalObjectSpecialization final : public AdvancedReducer {
  public:
   JSGlobalObjectSpecialization(Editor* editor, JSGraph* jsgraph,
-                               MaybeHandle<Context> native_context,
+                               Handle<JSGlobalObject> global_object,
                                CompilationDependencies* dependencies);
 
   Reduction Reduce(Node* node) final;
@@ -37,12 +37,8 @@
   Reduction ReduceJSLoadGlobal(Node* node);
   Reduction ReduceJSStoreGlobal(Node* node);
 
-  // Retrieve the global object from the given {node} if known.
-  MaybeHandle<JSGlobalObject> GetGlobalObject(Node* node);
-
   struct ScriptContextTableLookupResult;
-  bool LookupInScriptContextTable(Handle<JSGlobalObject> global_object,
-                                  Handle<Name> name,
+  bool LookupInScriptContextTable(Handle<Name> name,
                                   ScriptContextTableLookupResult* result);
 
   Graph* graph() const;
@@ -51,11 +47,11 @@
   CommonOperatorBuilder* common() const;
   JSOperatorBuilder* javascript() const;
   SimplifiedOperatorBuilder* simplified() const;
-  MaybeHandle<Context> native_context() const { return native_context_; }
+  Handle<JSGlobalObject> global_object() const { return global_object_; }
   CompilationDependencies* dependencies() const { return dependencies_; }
 
   JSGraph* const jsgraph_;
-  MaybeHandle<Context> native_context_;
+  Handle<JSGlobalObject> const global_object_;
   CompilationDependencies* const dependencies_;
   TypeCache const& type_cache_;
 
diff --git a/src/compiler/js-graph.cc b/src/compiler/js-graph.cc
index cafd047..8626cd1 100644
--- a/src/compiler/js-graph.cc
+++ b/src/compiler/js-graph.cc
@@ -242,6 +242,13 @@
   return *loc;
 }
 
+Node* JSGraph::PointerConstant(intptr_t value) {
+  Node** loc = cache_.FindPointerConstant(value);
+  if (*loc == nullptr) {
+    *loc = graph()->NewNode(common()->PointerConstant(value));
+  }
+  return *loc;
+}
 
 Node* JSGraph::ExternalConstant(ExternalReference reference) {
   Node** loc = cache_.FindExternalConstant(reference);
diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h
index 9d6f27d..c2c0c77 100644
--- a/src/compiler/js-graph.h
+++ b/src/compiler/js-graph.h
@@ -5,12 +5,14 @@
 #ifndef V8_COMPILER_JS_GRAPH_H_
 #define V8_COMPILER_JS_GRAPH_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/common-node-cache.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/js-operator.h"
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node-properties.h"
+#include "src/globals.h"
 #include "src/isolate.h"
 
 namespace v8 {
@@ -23,7 +25,7 @@
 // Implements a facade on a Graph, enhancing the graph with JS-specific
 // notions, including various builders for operators, canonicalized global
 // constants, and various helper methods.
-class JSGraph : public ZoneObject {
+class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   JSGraph(Isolate* isolate, Graph* graph, CommonOperatorBuilder* common,
           JSOperatorBuilder* javascript, SimplifiedOperatorBuilder* simplified,
@@ -106,10 +108,6 @@
     return machine()->Is32() ? Int32Constant(static_cast<int32_t>(value))
                              : Int64Constant(static_cast<int64_t>(value));
   }
-  template <typename T>
-  Node* PointerConstant(T* value) {
-    return IntPtrConstant(bit_cast<intptr_t>(value));
-  }
 
   Node* RelocatableInt32Constant(int32_t value, RelocInfo::Mode rmode);
   Node* RelocatableInt64Constant(int64_t value, RelocInfo::Mode rmode);
@@ -121,6 +119,13 @@
   // Creates a Float64Constant node, usually canonicalized.
   Node* Float64Constant(double value);
 
+  // Creates a PointerConstant node (asm.js only).
+  Node* PointerConstant(intptr_t value);
+  template <typename T>
+  Node* PointerConstant(T* value) {
+    return PointerConstant(bit_cast<intptr_t>(value));
+  }
+
   // Creates an ExternalConstant node, usually canonicalized.
   Node* ExternalConstant(ExternalReference ref);
   Node* ExternalConstant(Runtime::FunctionId function_id);
diff --git a/src/compiler/js-inlining-heuristic.cc b/src/compiler/js-inlining-heuristic.cc
index 5c626d1..d6229c2 100644
--- a/src/compiler/js-inlining-heuristic.cc
+++ b/src/compiler/js-inlining-heuristic.cc
@@ -23,7 +23,7 @@
 
 int CollectFunctions(Node* node, Handle<JSFunction>* functions,
                      int functions_size) {
-  DCHECK_NE(0u, functions_size);
+  DCHECK_NE(0, functions_size);
   HeapObjectMatcher m(node);
   if (m.HasValue() && m.Value()->IsJSFunction()) {
     functions[0] = Handle<JSFunction>::cast(m.Value());
diff --git a/src/compiler/js-inlining-heuristic.h b/src/compiler/js-inlining-heuristic.h
index 367e35a..aca8011 100644
--- a/src/compiler/js-inlining-heuristic.h
+++ b/src/compiler/js-inlining-heuristic.h
@@ -15,10 +15,11 @@
  public:
   enum Mode { kGeneralInlining, kRestrictedInlining, kStressInlining };
   JSInliningHeuristic(Editor* editor, Mode mode, Zone* local_zone,
-                      CompilationInfo* info, JSGraph* jsgraph)
+                      CompilationInfo* info, JSGraph* jsgraph,
+                      SourcePositionTable* source_positions)
       : AdvancedReducer(editor),
         mode_(mode),
-        inliner_(editor, local_zone, info, jsgraph),
+        inliner_(editor, local_zone, info, jsgraph, source_positions),
         candidates_(local_zone),
         seen_(local_zone),
         jsgraph_(jsgraph) {}
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
index 58e5a27..0e122a6 100644
--- a/src/compiler/js-inlining.cc
+++ b/src/compiler/js-inlining.cc
@@ -116,7 +116,7 @@
           Replace(use, new_target);
         } else if (index == inlinee_arity_index) {
           // The projection is requesting the number of arguments.
-          Replace(use, jsgraph()->Int32Constant(inliner_inputs - 2));
+          Replace(use, jsgraph()->Constant(inliner_inputs - 2));
         } else if (index == inlinee_context_index) {
           // The projection is requesting the inlinee function context.
           Replace(use, context);
@@ -184,7 +184,7 @@
   for (Node* const input : end->inputs()) {
     switch (input->opcode()) {
       case IrOpcode::kReturn:
-        values.push_back(NodeProperties::GetValueInput(input, 0));
+        values.push_back(NodeProperties::GetValueInput(input, 1));
         effects.push_back(NodeProperties::GetEffectInput(input));
         controls.push_back(NodeProperties::GetControlInput(input));
         break;
@@ -282,6 +282,19 @@
 
 namespace {
 
+// TODO(turbofan): Shall we move this to the NodeProperties? Or some (untyped)
+// alias analyzer?
+bool IsSame(Node* a, Node* b) {
+  if (a == b) {
+    return true;
+  } else if (a->opcode() == IrOpcode::kCheckHeapObject) {
+    return IsSame(a->InputAt(0), b);
+  } else if (b->opcode() == IrOpcode::kCheckHeapObject) {
+    return IsSame(a, b->InputAt(0));
+  }
+  return false;
+}
+
 // TODO(bmeurer): Unify this with the witness helper functions in the
 // js-builtin-reducer.cc once we have a better understanding of the
 // map tracking we want to do, and eventually changed the CheckMaps
@@ -296,7 +309,7 @@
 bool NeedsConvertReceiver(Node* receiver, Node* effect) {
   for (Node* dominator = effect;;) {
     if (dominator->opcode() == IrOpcode::kCheckMaps &&
-        dominator->InputAt(0) == receiver) {
+        IsSame(dominator->InputAt(0), receiver)) {
       // Check if all maps have the given {instance_type}.
       for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
         HeapObjectMatcher m(NodeProperties::GetValueInput(dominator, i));
@@ -471,8 +484,8 @@
     }
   }
 
-  Zone zone(info_->isolate()->allocator());
-  ParseInfo parse_info(&zone, function);
+  Zone zone(info_->isolate()->allocator(), ZONE_NAME);
+  ParseInfo parse_info(&zone, shared_info);
   CompilationInfo info(&parse_info, function);
   if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
   if (info_->is_type_feedback_enabled()) info.MarkAsTypeFeedbackEnabled();
@@ -510,7 +523,8 @@
   // Remember that we inlined this function. This needs to be called right
   // after we ensure deoptimization support so that the code flusher
   // does not remove the code with the deoptimization support.
-  info_->AddInlinedFunction(shared_info);
+  int inlining_id = info_->AddInlinedFunction(
+      shared_info, source_positions_->GetSourcePosition(node));
 
   // ----------------------------------------------------------------
   // After this point, we've made a decision to inline this function.
@@ -530,8 +544,9 @@
     // Run the BytecodeGraphBuilder to create the subgraph.
     Graph::SubgraphScope scope(graph());
     BytecodeGraphBuilder graph_builder(&zone, &info, jsgraph(),
-                                       call.frequency());
-    graph_builder.CreateGraph();
+                                       call.frequency(), source_positions_,
+                                       inlining_id);
+    graph_builder.CreateGraph(false);
 
     // Extract the inlinee start/end nodes.
     start = graph()->start();
@@ -549,8 +564,9 @@
 
     // Run the AstGraphBuilder to create the subgraph.
     Graph::SubgraphScope scope(graph());
-    AstGraphBuilder graph_builder(&zone, &info, jsgraph(), call.frequency(),
-                                  loop_assignment, type_hint_analysis);
+    AstGraphBuilderWithPositions graph_builder(
+        &zone, &info, jsgraph(), call.frequency(), loop_assignment,
+        type_hint_analysis, source_positions_, inlining_id);
     graph_builder.CreateGraph(false);
 
     // Extract the inlinee start/end nodes.
@@ -590,7 +606,7 @@
     // constructor dispatch (allocate implicit receiver and check return value).
     // This models the behavior usually accomplished by our {JSConstructStub}.
     // Note that the context has to be the callers context (input to call node).
-    Node* receiver = jsgraph()->UndefinedConstant();  // Implicit receiver.
+    Node* receiver = jsgraph()->TheHoleConstant();  // Implicit receiver.
     if (NeedsImplicitReceiver(shared_info)) {
       Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
       Node* effect = NodeProperties::GetEffectInput(node);
diff --git a/src/compiler/js-inlining.h b/src/compiler/js-inlining.h
index 323c3ae..9bb8ec4 100644
--- a/src/compiler/js-inlining.h
+++ b/src/compiler/js-inlining.h
@@ -5,8 +5,8 @@
 #ifndef V8_COMPILER_JS_INLINING_H_
 #define V8_COMPILER_JS_INLINING_H_
 
-#include "src/compiler/js-graph.h"
 #include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
 
 namespace v8 {
 namespace internal {
@@ -16,17 +16,20 @@
 
 namespace compiler {
 
+class SourcePositionTable;
+
 // The JSInliner provides the core graph inlining machinery. Note that this
 // class only deals with the mechanics of how to inline one graph into another,
 // heuristics that decide what and how much to inline are beyond its scope.
 class JSInliner final : public AdvancedReducer {
  public:
   JSInliner(Editor* editor, Zone* local_zone, CompilationInfo* info,
-            JSGraph* jsgraph)
+            JSGraph* jsgraph, SourcePositionTable* source_positions)
       : AdvancedReducer(editor),
         local_zone_(local_zone),
         info_(info),
-        jsgraph_(jsgraph) {}
+        jsgraph_(jsgraph),
+        source_positions_(source_positions) {}
 
   // Reducer interface, eagerly inlines everything.
   Reduction Reduce(Node* node) final;
@@ -45,6 +48,7 @@
   Zone* const local_zone_;
   CompilationInfo* info_;
   JSGraph* const jsgraph_;
+  SourcePositionTable* const source_positions_;
 
   Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
                                    int parameter_count,
diff --git a/src/compiler/js-intrinsic-lowering.cc b/src/compiler/js-intrinsic-lowering.cc
index 7fc50e5..5290323 100644
--- a/src/compiler/js-intrinsic-lowering.cc
+++ b/src/compiler/js-intrinsic-lowering.cc
@@ -54,14 +54,8 @@
       return ReduceFixedArrayGet(node);
     case Runtime::kInlineFixedArraySet:
       return ReduceFixedArraySet(node);
-    case Runtime::kInlineRegExpConstructResult:
-      return ReduceRegExpConstructResult(node);
     case Runtime::kInlineRegExpExec:
       return ReduceRegExpExec(node);
-    case Runtime::kInlineRegExpFlags:
-      return ReduceRegExpFlags(node);
-    case Runtime::kInlineRegExpSource:
-      return ReduceRegExpSource(node);
     case Runtime::kInlineSubString:
       return ReduceSubString(node);
     case Runtime::kInlineToInteger:
@@ -234,37 +228,11 @@
 }
 
 
-Reduction JSIntrinsicLowering::ReduceRegExpConstructResult(Node* node) {
-  // TODO(bmeurer): Introduce JSCreateRegExpResult?
-  return Change(node, CodeFactory::RegExpConstructResult(isolate()), 0);
-}
-
-
 Reduction JSIntrinsicLowering::ReduceRegExpExec(Node* node) {
   return Change(node, CodeFactory::RegExpExec(isolate()), 4);
 }
 
 
-Reduction JSIntrinsicLowering::ReduceRegExpFlags(Node* node) {
-  Node* const receiver = NodeProperties::GetValueInput(node, 0);
-  Node* const effect = NodeProperties::GetEffectInput(node);
-  Node* const control = NodeProperties::GetControlInput(node);
-  Operator const* const op =
-      simplified()->LoadField(AccessBuilder::ForJSRegExpFlags());
-  return Change(node, op, receiver, effect, control);
-}
-
-
-Reduction JSIntrinsicLowering::ReduceRegExpSource(Node* node) {
-  Node* const receiver = NodeProperties::GetValueInput(node, 0);
-  Node* const effect = NodeProperties::GetEffectInput(node);
-  Node* const control = NodeProperties::GetControlInput(node);
-  Operator const* const op =
-      simplified()->LoadField(AccessBuilder::ForJSRegExpSource());
-  return Change(node, op, receiver, effect, control);
-}
-
-
 Reduction JSIntrinsicLowering::ReduceSubString(Node* node) {
   return Change(node, CodeFactory::SubString(isolate()), 3);
 }
diff --git a/src/compiler/js-intrinsic-lowering.h b/src/compiler/js-intrinsic-lowering.h
index 6835a52..6e984ff 100644
--- a/src/compiler/js-intrinsic-lowering.h
+++ b/src/compiler/js-intrinsic-lowering.h
@@ -5,8 +5,10 @@
 #ifndef V8_COMPILER_JS_INTRINSIC_LOWERING_H_
 #define V8_COMPILER_JS_INTRINSIC_LOWERING_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -25,7 +27,8 @@
 
 
 // Lowers certain JS-level runtime calls.
-class JSIntrinsicLowering final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE JSIntrinsicLowering final
+    : public NON_EXPORTED_BASE(AdvancedReducer) {
  public:
   enum DeoptimizationMode { kDeoptimizationEnabled, kDeoptimizationDisabled };
 
@@ -46,10 +49,7 @@
   Reduction ReduceIsSmi(Node* node);
   Reduction ReduceFixedArrayGet(Node* node);
   Reduction ReduceFixedArraySet(Node* node);
-  Reduction ReduceRegExpConstructResult(Node* node);
   Reduction ReduceRegExpExec(Node* node);
-  Reduction ReduceRegExpFlags(Node* node);
-  Reduction ReduceRegExpSource(Node* node);
   Reduction ReduceSubString(Node* node);
   Reduction ReduceToInteger(Node* node);
   Reduction ReduceToLength(Node* node);
diff --git a/src/compiler/js-native-context-specialization.cc b/src/compiler/js-native-context-specialization.cc
index ab20d93..a849fec 100644
--- a/src/compiler/js-native-context-specialization.cc
+++ b/src/compiler/js-native-context-specialization.cc
@@ -57,7 +57,7 @@
 
 JSNativeContextSpecialization::JSNativeContextSpecialization(
     Editor* editor, JSGraph* jsgraph, Flags flags,
-    MaybeHandle<Context> native_context, CompilationDependencies* dependencies,
+    Handle<Context> native_context, CompilationDependencies* dependencies,
     Zone* zone)
     : AdvancedReducer(editor),
       jsgraph_(jsgraph),
@@ -67,7 +67,6 @@
       zone_(zone),
       type_cache_(TypeCache::Get()) {}
 
-
 Reduction JSNativeContextSpecialization::Reduce(Node* node) {
   switch (node->opcode()) {
     case IrOpcode::kJSInstanceOf:
@@ -96,10 +95,6 @@
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
-  // Retrieve the native context from the given {node}.
-  Handle<Context> native_context;
-  if (!GetNativeContext(node).ToHandle(&native_context)) return NoChange();
-
   // If deoptimization is disabled, we cannot optimize.
   if (!(flags() & kDeoptimizationEnabled)) return NoChange();
 
@@ -111,7 +106,7 @@
 
   // Compute property access info for @@hasInstance on {receiver}.
   PropertyAccessInfo access_info;
-  AccessInfoFactory access_info_factory(dependencies(), native_context,
+  AccessInfoFactory access_info_factory(dependencies(), native_context(),
                                         graph()->zone());
   if (!access_info_factory.ComputePropertyAccessInfo(
           receiver_map, factory()->has_instance_symbol(), AccessMode::kLoad,
@@ -126,8 +121,7 @@
       // Determine actual holder and perform prototype chain checks.
       Handle<JSObject> holder;
       if (access_info.holder().ToHandle(&holder)) {
-        AssumePrototypesStable(access_info.receiver_maps(), native_context,
-                               holder);
+        AssumePrototypesStable(access_info.receiver_maps(), holder);
       }
 
       // Monomorphic property access.
@@ -147,8 +141,7 @@
     // Determine actual holder and perform prototype chain checks.
     Handle<JSObject> holder;
     if (access_info.holder().ToHandle(&holder)) {
-      AssumePrototypesStable(access_info.receiver_maps(), native_context,
-                             holder);
+      AssumePrototypesStable(access_info.receiver_maps(), holder);
     }
 
     // Monomorphic property access.
@@ -184,13 +177,11 @@
 Reduction JSNativeContextSpecialization::ReduceJSLoadContext(Node* node) {
   DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
   ContextAccess const& access = ContextAccessOf(node->op());
-  Handle<Context> native_context;
   // Specialize JSLoadContext(NATIVE_CONTEXT_INDEX) to the known native
   // context (if any), so we can constant-fold those fields, which is
   // safe, since the NATIVE_CONTEXT_INDEX slot is always immutable.
-  if (access.index() == Context::NATIVE_CONTEXT_INDEX &&
-      GetNativeContext(node).ToHandle(&native_context)) {
-    Node* value = jsgraph()->HeapConstant(native_context);
+  if (access.index() == Context::NATIVE_CONTEXT_INDEX) {
+    Node* value = jsgraph()->HeapConstant(native_context());
     ReplaceWithValue(node, value);
     return Replace(value);
   }
@@ -200,7 +191,7 @@
 Reduction JSNativeContextSpecialization::ReduceNamedAccess(
     Node* node, Node* value, MapHandleList const& receiver_maps,
     Handle<Name> name, AccessMode access_mode, LanguageMode language_mode,
-    Node* index) {
+    Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot, Node* index) {
   DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
          node->opcode() == IrOpcode::kJSStoreNamed ||
          node->opcode() == IrOpcode::kJSLoadProperty ||
@@ -215,12 +206,8 @@
   // Not much we can do if deoptimization support is disabled.
   if (!(flags() & kDeoptimizationEnabled)) return NoChange();
 
-  // Retrieve the native context from the given {node}.
-  Handle<Context> native_context;
-  if (!GetNativeContext(node).ToHandle(&native_context)) return NoChange();
-
   // Compute property access infos for the receiver maps.
-  AccessInfoFactory access_info_factory(dependencies(), native_context,
+  AccessInfoFactory access_info_factory(dependencies(), native_context(),
                                         graph()->zone());
   ZoneVector<PropertyAccessInfo> access_infos(zone());
   if (!access_info_factory.ComputePropertyAccessInfos(
@@ -229,10 +216,20 @@
   }
 
   // TODO(turbofan): Add support for inlining into try blocks.
-  if (NodeProperties::IsExceptionalCall(node) ||
-      !(flags() & kAccessorInliningEnabled)) {
-    for (auto access_info : access_infos) {
-      if (access_info.IsAccessorConstant()) return NoChange();
+  bool is_exceptional = NodeProperties::IsExceptionalCall(node);
+  for (auto access_info : access_infos) {
+    if (access_info.IsAccessorConstant()) {
+      // Accessor in try-blocks are not supported yet.
+      if (is_exceptional || !(flags() & kAccessorInliningEnabled)) {
+        return NoChange();
+      }
+    } else if (access_info.IsGeneric()) {
+      // We do not handle generic calls in try blocks.
+      if (is_exceptional) return NoChange();
+      // We only handle the generic store IC case.
+      if (vector->GetKind(slot) != FeedbackVectorSlotKind::STORE_IC) {
+        return NoChange();
+      }
     }
   }
 
@@ -263,7 +260,8 @@
                                            receiver, effect, control);
     } else {
       // Monomorphic property access.
-      effect = BuildCheckHeapObject(receiver, effect, control);
+      receiver = effect = graph()->NewNode(simplified()->CheckHeapObject(),
+                                           receiver, effect, control);
       effect = BuildCheckMaps(receiver, effect, control,
                               access_info.receiver_maps());
     }
@@ -271,7 +269,7 @@
     // Generate the actual property access.
     ValueEffectControl continuation = BuildPropertyAccess(
         receiver, value, context, frame_state_lazy, effect, control, name,
-        native_context, access_info, access_mode);
+        access_info, access_mode, language_mode, vector, slot);
     value = continuation.value();
     effect = continuation.effect();
     control = continuation.control();
@@ -301,7 +299,8 @@
       receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
       receiverissmi_effect = effect;
     } else {
-      effect = BuildCheckHeapObject(receiver, effect, control);
+      receiver = effect = graph()->NewNode(simplified()->CheckHeapObject(),
+                                           receiver, effect, control);
     }
 
     // Load the {receiver} map. The resulting effect is the dominating effect
@@ -317,7 +316,7 @@
       Node* this_value = value;
       Node* this_receiver = receiver;
       Node* this_effect = effect;
-      Node* this_control;
+      Node* this_control = fallthrough_control;
 
       // Perform map check on {receiver}.
       MapList const& receiver_maps = access_info.receiver_maps();
@@ -325,19 +324,19 @@
         // Emit a (sequence of) map checks for other {receiver}s.
         ZoneVector<Node*> this_controls(zone());
         ZoneVector<Node*> this_effects(zone());
-        size_t num_classes = receiver_maps.size();
-        for (auto map : receiver_maps) {
-          DCHECK_LT(0u, num_classes);
-          Node* check =
-              graph()->NewNode(simplified()->ReferenceEqual(), receiver_map,
-                               jsgraph()->Constant(map));
-          if (--num_classes == 0 && j == access_infos.size() - 1) {
-            check = graph()->NewNode(simplified()->CheckIf(), check,
-                                     this_effect, fallthrough_control);
-            this_controls.push_back(fallthrough_control);
-            this_effects.push_back(check);
-            fallthrough_control = nullptr;
-          } else {
+        if (j == access_infos.size() - 1) {
+          // Last map check on the fallthrough control path, do a
+          // conditional eager deoptimization exit here.
+          this_effect = BuildCheckMaps(receiver, this_effect, this_control,
+                                       receiver_maps);
+          this_effects.push_back(this_effect);
+          this_controls.push_back(fallthrough_control);
+          fallthrough_control = nullptr;
+        } else {
+          for (auto map : receiver_maps) {
+            Node* check =
+                graph()->NewNode(simplified()->ReferenceEqual(), receiver_map,
+                                 jsgraph()->Constant(map));
             Node* branch = graph()->NewNode(common()->Branch(), check,
                                             fallthrough_control);
             fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
@@ -382,7 +381,8 @@
       // Generate the actual property access.
       ValueEffectControl continuation = BuildPropertyAccess(
           this_receiver, this_value, context, frame_state_lazy, this_effect,
-          this_control, name, native_context, access_info, access_mode);
+          this_control, name, access_info, access_mode, language_mode, vector,
+          slot);
       values.push_back(continuation.value());
       effects.push_back(continuation.effect());
       controls.push_back(continuation.control());
@@ -449,7 +449,7 @@
 
   // Try to lower the named access based on the {receiver_maps}.
   return ReduceNamedAccess(node, value, receiver_maps, name, access_mode,
-                           language_mode);
+                           language_mode, nexus.vector_handle(), nexus.slot());
 }
 
 
@@ -462,9 +462,9 @@
   // Check if we have a constant receiver.
   HeapObjectMatcher m(receiver);
   if (m.HasValue()) {
-    // Optimize "prototype" property of functions.
     if (m.Value()->IsJSFunction() &&
         p.name().is_identical_to(factory()->prototype_string())) {
+      // Optimize "prototype" property of functions.
       Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
       if (function->has_initial_map()) {
         // We need to add a code dependency on the initial map of the
@@ -480,6 +480,13 @@
           return Replace(value);
         }
       }
+    } else if (m.Value()->IsString() &&
+               p.name().is_identical_to(factory()->length_string())) {
+      // Constant-fold "length" property on constant strings.
+      Handle<String> string = Handle<String>::cast(m.Value());
+      Node* value = jsgraph()->Constant(string->length());
+      ReplaceWithValue(node, value);
+      return Replace(value);
     }
   }
 
@@ -548,11 +555,8 @@
     value = graph()->NewNode(simplified()->StringFromCharCode(), value);
   } else {
     // Retrieve the native context from the given {node}.
-    Handle<Context> native_context;
-    if (!GetNativeContext(node).ToHandle(&native_context)) return NoChange();
-
     // Compute element access infos for the receiver maps.
-    AccessInfoFactory access_info_factory(dependencies(), native_context,
+    AccessInfoFactory access_info_factory(dependencies(), native_context(),
                                           graph()->zone());
     ZoneVector<ElementAccessInfo> access_infos(zone());
     if (!access_info_factory.ComputeElementAccessInfos(
@@ -605,7 +609,8 @@
     }
 
     // Ensure that {receiver} is a heap object.
-    effect = BuildCheckHeapObject(receiver, effect, control);
+    receiver = effect = graph()->NewNode(simplified()->CheckHeapObject(),
+                                         receiver, effect, control);
 
     // Check for the monomorphic case.
     if (access_infos.size() == 1) {
@@ -638,9 +643,9 @@
                               access_info.receiver_maps());
 
       // Access the actual element.
-      ValueEffectControl continuation = BuildElementAccess(
-          receiver, index, value, effect, control, native_context, access_info,
-          access_mode, store_mode);
+      ValueEffectControl continuation =
+          BuildElementAccess(receiver, index, value, effect, control,
+                             access_info, access_mode, store_mode);
       value = continuation.value();
       effect = continuation.effect();
       control = continuation.control();
@@ -684,35 +689,25 @@
 
         // Perform map check(s) on {receiver}.
         MapList const& receiver_maps = access_info.receiver_maps();
-        {
+        if (j == access_infos.size() - 1) {
+          // Last map check on the fallthrough control path, do a
+          // conditional eager deoptimization exit here.
+          this_effect = BuildCheckMaps(receiver, this_effect, this_control,
+                                       receiver_maps);
+          fallthrough_control = nullptr;
+        } else {
           ZoneVector<Node*> this_controls(zone());
           ZoneVector<Node*> this_effects(zone());
-          size_t num_classes = receiver_maps.size();
           for (Handle<Map> map : receiver_maps) {
-            DCHECK_LT(0u, num_classes);
             Node* check =
                 graph()->NewNode(simplified()->ReferenceEqual(), receiver_map,
                                  jsgraph()->Constant(map));
-            if (--num_classes == 0 && j == access_infos.size() - 1) {
-              // Last map check on the fallthrough control path, do a
-              // conditional eager deoptimization exit here.
-              // TODO(turbofan): This is ugly as hell! We should probably
-              // introduce macro-ish operators for property access that
-              // encapsulate this whole mess.
-              check = graph()->NewNode(simplified()->CheckIf(), check,
-                                       this_effect, this_control);
-              this_controls.push_back(this_control);
-              this_effects.push_back(check);
-              fallthrough_control = nullptr;
-            } else {
-              Node* branch = graph()->NewNode(common()->Branch(), check,
-                                              fallthrough_control);
-              this_controls.push_back(
-                  graph()->NewNode(common()->IfTrue(), branch));
-              this_effects.push_back(this_effect);
-              fallthrough_control =
-                  graph()->NewNode(common()->IfFalse(), branch);
-            }
+            Node* branch = graph()->NewNode(common()->Branch(), check,
+                                            fallthrough_control);
+            this_controls.push_back(
+                graph()->NewNode(common()->IfTrue(), branch));
+            this_effects.push_back(this_effect);
+            fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
           }
 
           // Create single chokepoint for the control.
@@ -739,7 +734,7 @@
         // Access the actual element.
         ValueEffectControl continuation = BuildElementAccess(
             this_receiver, this_index, this_value, this_effect, this_control,
-            native_context, access_info, access_mode, store_mode);
+            access_info, access_mode, store_mode);
         values.push_back(continuation.value());
         effects.push_back(continuation.effect());
         controls.push_back(continuation.control());
@@ -780,8 +775,48 @@
     KeyedAccessStoreMode store_mode) {
   DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
          node->opcode() == IrOpcode::kJSStoreProperty);
-  Node* const receiver = NodeProperties::GetValueInput(node, 0);
-  Node* const effect = NodeProperties::GetEffectInput(node);
+  Node* receiver = NodeProperties::GetValueInput(node, 0);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  // Optimize access for constant {receiver}.
+  HeapObjectMatcher mreceiver(receiver);
+  if (mreceiver.HasValue() && mreceiver.Value()->IsString()) {
+    Handle<String> string = Handle<String>::cast(mreceiver.Value());
+
+    // We can only assume that the {index} is a valid array index if the IC
+    // is in element access mode and not MEGAMORPHIC, otherwise there's no
+    // guard for the bounds check below.
+    if (nexus.ic_state() != MEGAMORPHIC && nexus.GetKeyType() == ELEMENT) {
+      // Strings are immutable in JavaScript.
+      if (access_mode == AccessMode::kStore) return NoChange();
+
+      // Properly deal with constant {index}.
+      NumberMatcher mindex(index);
+      if (mindex.IsInteger() && mindex.IsInRange(0.0, string->length() - 1)) {
+        // Constant-fold the {index} access to {string}.
+        Node* value = jsgraph()->HeapConstant(
+            factory()->LookupSingleCharacterStringFromCode(
+                string->Get(static_cast<int>(mindex.Value()))));
+        ReplaceWithValue(node, value, effect, control);
+        return Replace(value);
+      } else if (flags() & kDeoptimizationEnabled) {
+        // Ensure that {index} is less than {receiver} length.
+        Node* length = jsgraph()->Constant(string->length());
+        index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
+                                          length, effect, control);
+
+        // Load the character from the {receiver}.
+        value = graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
+                                 index, control);
+
+        // Return it as a single character string.
+        value = graph()->NewNode(simplified()->StringFromCharCode(), value);
+        ReplaceWithValue(node, value, effect, control);
+        return Replace(value);
+      }
+    }
+  }
 
   // Check if the {nexus} reports type feedback for the IC.
   if (nexus.IsUninitialized()) {
@@ -824,21 +859,28 @@
       } else {
         name = factory()->InternalizeName(name);
         return ReduceNamedAccess(node, value, receiver_maps, name, access_mode,
-                                 language_mode);
+                                 language_mode, nexus.vector_handle(),
+                                 nexus.slot());
       }
     }
   }
 
   // Check if we have feedback for a named access.
   if (Name* name = nexus.FindFirstName()) {
-    return ReduceNamedAccess(node, value, receiver_maps,
-                             handle(name, isolate()), access_mode,
-                             language_mode, index);
+    return ReduceNamedAccess(
+        node, value, receiver_maps, handle(name, isolate()), access_mode,
+        language_mode, nexus.vector_handle(), nexus.slot(), index);
   } else if (nexus.GetKeyType() != ELEMENT) {
     // The KeyedLoad/StoreIC has seen non-element accesses, so we cannot assume
     // that the {index} is a valid array index, thus we just let the IC continue
     // to deal with this load/store.
     return NoChange();
+  } else if (nexus.ic_state() == MEGAMORPHIC) {
+    // The KeyedLoad/StoreIC uses the MEGAMORPHIC state to guard the assumption
+    // that a numeric {index} is within the valid bounds for {receiver}, i.e.
+    // it transitions to MEGAMORPHIC once it sees an out-of-bounds access. Thus
+    // we cannot continue here if the IC state is MEGAMORPHIC.
+    return NoChange();
   }
 
   // Try to lower the element access based on the {receiver_maps}.
@@ -900,12 +942,13 @@
 JSNativeContextSpecialization::ValueEffectControl
 JSNativeContextSpecialization::BuildPropertyAccess(
     Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
-    Node* control, Handle<Name> name, Handle<Context> native_context,
-    PropertyAccessInfo const& access_info, AccessMode access_mode) {
+    Node* control, Handle<Name> name, PropertyAccessInfo const& access_info,
+    AccessMode access_mode, LanguageMode language_mode,
+    Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot) {
   // Determine actual holder and perform prototype chain checks.
   Handle<JSObject> holder;
   if (access_info.holder().ToHandle(&holder)) {
-    AssumePrototypesStable(access_info.receiver_maps(), native_context, holder);
+    AssumePrototypesStable(access_info.receiver_maps(), holder);
   }
 
   // Generate the actual property access.
@@ -943,12 +986,26 @@
             context, target, frame_state);
 
         // Introduce the call to the getter function.
-        value = effect = graph()->NewNode(
-            javascript()->CallFunction(
-                2, 0.0f, VectorSlotPair(),
-                ConvertReceiverMode::kNotNullOrUndefined),
-            target, receiver, context, frame_state0, effect, control);
-        control = graph()->NewNode(common()->IfSuccess(), value);
+        if (access_info.constant()->IsJSFunction()) {
+          value = effect = graph()->NewNode(
+              javascript()->CallFunction(
+                  2, 0.0f, VectorSlotPair(),
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              target, receiver, context, frame_state0, effect, control);
+          control = graph()->NewNode(common()->IfSuccess(), value);
+        } else {
+          DCHECK(access_info.constant()->IsFunctionTemplateInfo());
+          Handle<FunctionTemplateInfo> function_template_info(
+              Handle<FunctionTemplateInfo>::cast(access_info.constant()));
+          DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
+          ZoneVector<Node*> stack_parameters(graph()->zone());
+          ValueEffectControl value_effect_control = InlineApiCall(
+              receiver, context, target, frame_state0, &stack_parameters,
+              effect, control, shared_info, function_template_info);
+          value = value_effect_control.value();
+          effect = value_effect_control.effect();
+          control = value_effect_control.control();
+        }
         break;
       }
       case AccessMode::kStore: {
@@ -966,17 +1023,31 @@
             context, target, frame_state);
 
         // Introduce the call to the setter function.
-        effect = graph()->NewNode(javascript()->CallFunction(
-                                      3, 0.0f, VectorSlotPair(),
-                                      ConvertReceiverMode::kNotNullOrUndefined),
-                                  target, receiver, value, context,
-                                  frame_state0, effect, control);
-        control = graph()->NewNode(common()->IfSuccess(), effect);
+        if (access_info.constant()->IsJSFunction()) {
+          effect = graph()->NewNode(
+              javascript()->CallFunction(
+                  3, 0.0f, VectorSlotPair(),
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              target, receiver, value, context, frame_state0, effect, control);
+          control = graph()->NewNode(common()->IfSuccess(), effect);
+        } else {
+          DCHECK(access_info.constant()->IsFunctionTemplateInfo());
+          Handle<FunctionTemplateInfo> function_template_info(
+              Handle<FunctionTemplateInfo>::cast(access_info.constant()));
+          DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
+          ZoneVector<Node*> stack_parameters(graph()->zone());
+          stack_parameters.push_back(value);
+          ValueEffectControl value_effect_control = InlineApiCall(
+              receiver, context, target, frame_state0, &stack_parameters,
+              effect, control, shared_info, function_template_info);
+          value = value_effect_control.value();
+          effect = value_effect_control.effect();
+          control = value_effect_control.control();
+        }
         break;
       }
     }
-  } else {
-    DCHECK(access_info.IsDataField());
+  } else if (access_info.IsDataField()) {
     FieldIndex const field_index = access_info.field_index();
     Type* const field_type = access_info.field_type();
     MachineRepresentation const field_representation =
@@ -1128,6 +1199,28 @@
                                   jsgraph()->UndefinedConstant(), effect);
       }
     }
+  } else {
+    DCHECK(access_info.IsGeneric());
+    DCHECK_EQ(AccessMode::kStore, access_mode);
+    DCHECK_EQ(FeedbackVectorSlotKind::STORE_IC, vector->GetKind(slot));
+    Callable callable =
+        CodeFactory::StoreICInOptimizedCode(isolate(), language_mode);
+    const CallInterfaceDescriptor& descriptor = callable.descriptor();
+    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+        isolate(), graph()->zone(), descriptor,
+        descriptor.GetStackParameterCount(), CallDescriptor::kNeedsFrameState,
+        Operator::kNoProperties);
+    Node* stub_code = jsgraph()->HeapConstant(callable.code());
+    Node* name_node = jsgraph()->HeapConstant(name);
+    Node* slot_node = jsgraph()->Constant(vector->GetIndex(slot));
+    Node* vector_node = jsgraph()->HeapConstant(vector);
+
+    Node* inputs[] = {stub_code,   receiver, name_node,   value,  slot_node,
+                      vector_node, context,  frame_state, effect, control};
+
+    value = effect = control =
+        graph()->NewNode(common()->Call(desc), arraysize(inputs), inputs);
+    control = graph()->NewNode(common()->IfSuccess(), control);
   }
 
   return ValueEffectControl(value, effect, control);
@@ -1154,8 +1247,8 @@
 JSNativeContextSpecialization::ValueEffectControl
 JSNativeContextSpecialization::BuildElementAccess(
     Node* receiver, Node* index, Node* value, Node* effect, Node* control,
-    Handle<Context> native_context, ElementAccessInfo const& access_info,
-    AccessMode access_mode, KeyedAccessStoreMode store_mode) {
+    ElementAccessInfo const& access_info, AccessMode access_mode,
+    KeyedAccessStoreMode store_mode) {
   // TODO(bmeurer): We currently specialize based on elements kind. We should
   // also be able to properly support strings and other JSObjects here.
   ElementsKind elements_kind = access_info.elements_kind();
@@ -1232,6 +1325,14 @@
         value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
                                           effect, control);
 
+        // Introduce the appropriate truncation for {value}. Currently we
+        // only need to do this for ClamedUint8Array {receiver}s, as the
+        // other truncations are implicit in the StoreTypedElement, but we
+        // might want to change that at some point.
+        if (external_array_type == kExternalUint8ClampedArray) {
+          value = graph()->NewNode(simplified()->NumberToUint8Clamped(), value);
+        }
+
         // Check if we can skip the out-of-bounds store.
         if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
           Node* check =
@@ -1304,7 +1405,7 @@
       element_type = Type::Number();
       element_machine_type = MachineType::Float64();
     } else if (IsFastSmiElementsKind(elements_kind)) {
-      element_type = type_cache_.kSmi;
+      element_type = Type::SignedSmall();
       element_machine_type = MachineType::TaggedSigned();
     }
     ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
@@ -1330,7 +1431,7 @@
       if (elements_kind == FAST_HOLEY_ELEMENTS ||
           elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
         // Check if we are allowed to turn the hole into undefined.
-        if (CanTreatHoleAsUndefined(receiver_maps, native_context)) {
+        if (CanTreatHoleAsUndefined(receiver_maps)) {
           // Turn the hole into undefined.
           value = graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(),
                                    value);
@@ -1343,7 +1444,7 @@
         // Perform the hole check on the result.
         CheckFloat64HoleMode mode = CheckFloat64HoleMode::kNeverReturnHole;
         // Check if we are allowed to return the hole directly.
-        if (CanTreatHoleAsUndefined(receiver_maps, native_context)) {
+        if (CanTreatHoleAsUndefined(receiver_maps)) {
           // Return the signaling NaN hole directly if all uses are truncating.
           mode = CheckFloat64HoleMode::kAllowReturnHole;
         }
@@ -1397,6 +1498,65 @@
   return ValueEffectControl(value, effect, control);
 }
 
+JSNativeContextSpecialization::ValueEffectControl
+JSNativeContextSpecialization::InlineApiCall(
+    Node* receiver, Node* context, Node* target, Node* frame_state,
+    ZoneVector<Node*>* stack_parameters, Node* effect, Node* control,
+    Handle<SharedFunctionInfo> shared_info,
+    Handle<FunctionTemplateInfo> function_template_info) {
+  Handle<CallHandlerInfo> call_handler_info = handle(
+      CallHandlerInfo::cast(function_template_info->call_code()), isolate());
+  Handle<Object> call_data_object(call_handler_info->data(), isolate());
+
+  // The stub always expects the receiver as the first param on the stack.
+  CallApiCallbackStub stub(
+      isolate(), static_cast<int>(stack_parameters->size()),
+      call_data_object->IsUndefined(isolate()),
+      true /* TODO(epertoso): similar to CallOptimization */);
+  CallInterfaceDescriptor call_interface_descriptor =
+      stub.GetCallInterfaceDescriptor();
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), graph()->zone(), call_interface_descriptor,
+      call_interface_descriptor.GetStackParameterCount() +
+          static_cast<int>(stack_parameters->size()) + 1,
+      CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
+      MachineType::AnyTagged(), 1);
+
+  Node* data = jsgraph()->Constant(call_data_object);
+  ApiFunction function(v8::ToCData<Address>(call_handler_info->callback()));
+  Node* function_reference =
+      graph()->NewNode(common()->ExternalConstant(ExternalReference(
+          &function, ExternalReference::DIRECT_API_CALL, isolate())));
+  Node* code = jsgraph()->HeapConstant(stub.GetCode());
+
+  ZoneVector<Node*> inputs(zone());
+  inputs.push_back(code);
+
+  // CallApiCallbackStub's register arguments.
+  inputs.push_back(target);
+  inputs.push_back(data);
+  inputs.push_back(receiver);
+  inputs.push_back(function_reference);
+
+  // Stack parameters: CallApiCallbackStub expects the first one to be the
+  // receiver.
+  inputs.push_back(receiver);
+  for (Node* node : *stack_parameters) {
+    inputs.push_back(node);
+  }
+  inputs.push_back(context);
+  inputs.push_back(frame_state);
+  inputs.push_back(effect);
+  inputs.push_back(control);
+
+  Node* effect0;
+  Node* value0 = effect0 =
+      graph()->NewNode(common()->Call(call_descriptor),
+                       static_cast<int>(inputs.size()), inputs.data());
+  Node* control0 = graph()->NewNode(common()->IfSuccess(), value0);
+  return ValueEffectControl(value0, effect0, control0);
+}
+
 Node* JSNativeContextSpecialization::BuildCheckMaps(
     Node* receiver, Node* effect, Node* control,
     std::vector<Handle<Map>> const& maps) {
@@ -1425,42 +1585,14 @@
                           inputs);
 }
 
-Node* JSNativeContextSpecialization::BuildCheckHeapObject(Node* receiver,
-                                                          Node* effect,
-                                                          Node* control) {
-  switch (receiver->opcode()) {
-    case IrOpcode::kHeapConstant:
-    case IrOpcode::kJSCreate:
-    case IrOpcode::kJSCreateArguments:
-    case IrOpcode::kJSCreateArray:
-    case IrOpcode::kJSCreateClosure:
-    case IrOpcode::kJSCreateIterResultObject:
-    case IrOpcode::kJSCreateLiteralArray:
-    case IrOpcode::kJSCreateLiteralObject:
-    case IrOpcode::kJSCreateLiteralRegExp:
-    case IrOpcode::kJSConvertReceiver:
-    case IrOpcode::kJSToName:
-    case IrOpcode::kJSToString:
-    case IrOpcode::kJSToObject:
-    case IrOpcode::kJSTypeOf: {
-      return effect;
-    }
-    default: {
-      return graph()->NewNode(simplified()->CheckHeapObject(), receiver, effect,
-                              control);
-    }
-  }
-}
-
 void JSNativeContextSpecialization::AssumePrototypesStable(
-    std::vector<Handle<Map>> const& receiver_maps,
-    Handle<Context> native_context, Handle<JSObject> holder) {
+    std::vector<Handle<Map>> const& receiver_maps, Handle<JSObject> holder) {
   // Determine actual holder and perform prototype chain checks.
   for (auto map : receiver_maps) {
     // Perform the implicit ToObject for primitives here.
     // Implemented according to ES6 section 7.3.2 GetV (V, P).
     Handle<JSFunction> constructor;
-    if (Map::GetConstructorFunction(map, native_context)
+    if (Map::GetConstructorFunction(map, native_context())
             .ToHandle(&constructor)) {
       map = handle(constructor->initial_map(), isolate());
     }
@@ -1469,16 +1601,15 @@
 }
 
 bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
-    std::vector<Handle<Map>> const& receiver_maps,
-    Handle<Context> native_context) {
+    std::vector<Handle<Map>> const& receiver_maps) {
   // Check if the array prototype chain is intact.
   if (!isolate()->IsFastArrayConstructorPrototypeChainIntact()) return false;
 
   // Make sure both the initial Array and Object prototypes are stable.
   Handle<JSObject> initial_array_prototype(
-      native_context->initial_array_prototype(), isolate());
+      native_context()->initial_array_prototype(), isolate());
   Handle<JSObject> initial_object_prototype(
-      native_context->initial_object_prototype(), isolate());
+      native_context()->initial_object_prototype(), isolate());
   if (!initial_array_prototype->map()->is_stable() ||
       !initial_object_prototype->map()->is_stable()) {
     return false;
@@ -1587,44 +1718,30 @@
   return MaybeHandle<Map>();
 }
 
-MaybeHandle<Context> JSNativeContextSpecialization::GetNativeContext(
-    Node* node) {
-  Node* const context = NodeProperties::GetContextInput(node);
-  return NodeProperties::GetSpecializationNativeContext(context,
-                                                        native_context());
-}
-
-
 Graph* JSNativeContextSpecialization::graph() const {
   return jsgraph()->graph();
 }
 
-
 Isolate* JSNativeContextSpecialization::isolate() const {
   return jsgraph()->isolate();
 }
 
-
 Factory* JSNativeContextSpecialization::factory() const {
   return isolate()->factory();
 }
 
-
 MachineOperatorBuilder* JSNativeContextSpecialization::machine() const {
   return jsgraph()->machine();
 }
 
-
 CommonOperatorBuilder* JSNativeContextSpecialization::common() const {
   return jsgraph()->common();
 }
 
-
 JSOperatorBuilder* JSNativeContextSpecialization::javascript() const {
   return jsgraph()->javascript();
 }
 
-
 SimplifiedOperatorBuilder* JSNativeContextSpecialization::simplified() const {
   return jsgraph()->simplified();
 }
diff --git a/src/compiler/js-native-context-specialization.h b/src/compiler/js-native-context-specialization.h
index c015de0..2d07061 100644
--- a/src/compiler/js-native-context-specialization.h
+++ b/src/compiler/js-native-context-specialization.h
@@ -8,6 +8,7 @@
 #include "src/base/flags.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/deoptimize-reason.h"
+#include "src/type-feedback-vector.h"
 
 namespace v8 {
 namespace internal {
@@ -15,7 +16,6 @@
 // Forward declarations.
 class CompilationDependencies;
 class Factory;
-class FeedbackNexus;
 
 namespace compiler {
 
@@ -46,7 +46,7 @@
   typedef base::Flags<Flag> Flags;
 
   JSNativeContextSpecialization(Editor* editor, JSGraph* jsgraph, Flags flags,
-                                MaybeHandle<Context> native_context,
+                                Handle<Context> native_context,
                                 CompilationDependencies* dependencies,
                                 Zone* zone);
 
@@ -79,7 +79,8 @@
                               MapHandleList const& receiver_maps,
                               Handle<Name> name, AccessMode access_mode,
                               LanguageMode language_mode,
-                              Node* index = nullptr);
+                              Handle<TypeFeedbackVector> vector,
+                              FeedbackVectorSlot slot, Node* index = nullptr);
 
   Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
 
@@ -100,38 +101,34 @@
   };
 
   // Construct the appropriate subgraph for property access.
-  ValueEffectControl BuildPropertyAccess(Node* receiver, Node* value,
-                                         Node* context, Node* frame_state,
-                                         Node* effect, Node* control,
-                                         Handle<Name> name,
-                                         Handle<Context> native_context,
-                                         PropertyAccessInfo const& access_info,
-                                         AccessMode access_mode);
+  ValueEffectControl BuildPropertyAccess(
+      Node* receiver, Node* value, Node* context, Node* frame_state,
+      Node* effect, Node* control, Handle<Name> name,
+      PropertyAccessInfo const& access_info, AccessMode access_mode,
+      LanguageMode language_mode, Handle<TypeFeedbackVector> vector,
+      FeedbackVectorSlot slot);
 
   // Construct the appropriate subgraph for element access.
-  ValueEffectControl BuildElementAccess(
-      Node* receiver, Node* index, Node* value, Node* effect, Node* control,
-      Handle<Context> native_context, ElementAccessInfo const& access_info,
-      AccessMode access_mode, KeyedAccessStoreMode store_mode);
+  ValueEffectControl BuildElementAccess(Node* receiver, Node* index,
+                                        Node* value, Node* effect,
+                                        Node* control,
+                                        ElementAccessInfo const& access_info,
+                                        AccessMode access_mode,
+                                        KeyedAccessStoreMode store_mode);
 
   // Construct an appropriate map check.
   Node* BuildCheckMaps(Node* receiver, Node* effect, Node* control,
                        std::vector<Handle<Map>> const& maps);
 
-  // Construct an appropriate heap object check.
-  Node* BuildCheckHeapObject(Node* receiver, Node* effect, Node* control);
-
   // Adds stability dependencies on all prototypes of every class in
   // {receiver_type} up to (and including) the {holder}.
   void AssumePrototypesStable(std::vector<Handle<Map>> const& receiver_maps,
-                              Handle<Context> native_context,
                               Handle<JSObject> holder);
 
   // Checks if we can turn the hole into undefined when loading an element
   // from an object with one of the {receiver_maps}; sets up appropriate
   // code dependencies and might use the array protector cell.
-  bool CanTreatHoleAsUndefined(std::vector<Handle<Map>> const& receiver_maps,
-                               Handle<Context> native_context);
+  bool CanTreatHoleAsUndefined(std::vector<Handle<Map>> const& receiver_maps);
 
   // Extract receiver maps from {nexus} and filter based on {receiver} if
   // possible.
@@ -147,8 +144,11 @@
   // program location.
   MaybeHandle<Map> InferReceiverRootMap(Node* receiver);
 
-  // Retrieve the native context from the given {node} if known.
-  MaybeHandle<Context> GetNativeContext(Node* node);
+  ValueEffectControl InlineApiCall(
+      Node* receiver, Node* context, Node* target, Node* frame_state,
+      ZoneVector<Node*>* stack_parameters, Node* effect, Node* control,
+      Handle<SharedFunctionInfo> shared_info,
+      Handle<FunctionTemplateInfo> function_template_info);
 
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
@@ -159,13 +159,13 @@
   SimplifiedOperatorBuilder* simplified() const;
   MachineOperatorBuilder* machine() const;
   Flags flags() const { return flags_; }
-  MaybeHandle<Context> native_context() const { return native_context_; }
+  Handle<Context> native_context() const { return native_context_; }
   CompilationDependencies* dependencies() const { return dependencies_; }
   Zone* zone() const { return zone_; }
 
   JSGraph* const jsgraph_;
   Flags const flags_;
-  MaybeHandle<Context> native_context_;
+  Handle<Context> native_context_;
   CompilationDependencies* const dependencies_;
   Zone* const zone_;
   TypeCache const& type_cache_;
diff --git a/src/compiler/js-operator.cc b/src/compiler/js-operator.cc
index 21e905a..f64630c 100644
--- a/src/compiler/js-operator.cc
+++ b/src/compiler/js-operator.cc
@@ -445,6 +445,7 @@
   V(ToString, Operator::kNoProperties, 1, 1)                \
   V(Create, Operator::kEliminatable, 2, 1)                  \
   V(CreateIterResultObject, Operator::kEliminatable, 2, 1)  \
+  V(CreateKeyValueArray, Operator::kEliminatable, 2, 1)     \
   V(HasProperty, Operator::kNoProperties, 2, 1)             \
   V(TypeOf, Operator::kPure, 1, 1)                          \
   V(InstanceOf, Operator::kNoProperties, 2, 1)              \
@@ -766,6 +767,23 @@
       access);                                   // parameter
 }
 
+const Operator* JSOperatorBuilder::LoadModule(int32_t cell_index) {
+  return new (zone()) Operator1<int32_t>(       // --
+      IrOpcode::kJSLoadModule,                  // opcode
+      Operator::kNoWrite | Operator::kNoThrow,  // flags
+      "JSLoadModule",                           // name
+      1, 1, 1, 1, 1, 0,                         // counts
+      cell_index);                              // parameter
+}
+
+const Operator* JSOperatorBuilder::StoreModule(int32_t cell_index) {
+  return new (zone()) Operator1<int32_t>(      // --
+      IrOpcode::kJSStoreModule,                // opcode
+      Operator::kNoRead | Operator::kNoThrow,  // flags
+      "JSStoreModule",                         // name
+      2, 1, 1, 0, 1, 0,                        // counts
+      cell_index);                             // parameter
+}
 
 const Operator* JSOperatorBuilder::CreateArguments(CreateArgumentsType type) {
   return new (zone()) Operator1<CreateArgumentsType>(         // --
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
index 2374ae6..9cdd305 100644
--- a/src/compiler/js-operator.h
+++ b/src/compiler/js-operator.h
@@ -5,6 +5,8 @@
 #ifndef V8_COMPILER_JS_OPERATOR_H_
 #define V8_COMPILER_JS_OPERATOR_H_
 
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
 #include "src/runtime/runtime.h"
 #include "src/type-hints.h"
 
@@ -19,7 +21,7 @@
 
 // Defines a pair of {TypeFeedbackVector} and {TypeFeedbackVectorSlot}, which
 // is used to access the type feedback for a certain {Node}.
-class VectorSlotPair {
+class V8_EXPORT_PRIVATE VectorSlotPair {
  public:
   VectorSlotPair();
   VectorSlotPair(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
@@ -182,7 +184,7 @@
 
 size_t hash_value(ContextAccess const&);
 
-std::ostream& operator<<(std::ostream&, ContextAccess const&);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ContextAccess const&);
 
 ContextAccess const& ContextAccessOf(Operator const*);
 
@@ -416,7 +418,8 @@
 // Interface for building JavaScript-level operators, e.g. directly from the
 // AST. Most operators have no parameters, thus can be globally shared for all
 // graphs.
-class JSOperatorBuilder final : public ZoneObject {
+class V8_EXPORT_PRIVATE JSOperatorBuilder final
+    : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   explicit JSOperatorBuilder(Zone* zone);
 
@@ -455,6 +458,7 @@
   const Operator* CreateClosure(Handle<SharedFunctionInfo> shared_info,
                                 PretenureFlag pretenure);
   const Operator* CreateIterResultObject();
+  const Operator* CreateKeyValueArray();
   const Operator* CreateLiteralArray(Handle<FixedArray> constant_elements,
                                      int literal_flags, int literal_index,
                                      int number_of_elements);
@@ -499,6 +503,9 @@
   const Operator* LoadContext(size_t depth, size_t index, bool immutable);
   const Operator* StoreContext(size_t depth, size_t index);
 
+  const Operator* LoadModule(int32_t cell_index);
+  const Operator* StoreModule(int32_t cell_index);
+
   const Operator* TypeOf();
   const Operator* InstanceOf();
   const Operator* OrdinaryHasInstance();
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
index 82df4ed..dbbeca6 100644
--- a/src/compiler/js-typed-lowering.cc
+++ b/src/compiler/js-typed-lowering.cc
@@ -4,6 +4,7 @@
 
 #include "src/compiler/js-typed-lowering.h"
 
+#include "src/ast/modules.h"
 #include "src/builtins/builtins-utils.h"
 #include "src/code-factory.h"
 #include "src/compilation-dependencies.h"
@@ -82,16 +83,13 @@
     if (BothInputsAre(Type::String()) ||
         ((lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) &&
          BinaryOperationHintOf(node_->op()) == BinaryOperationHint::kString)) {
-      if (right_type()->IsConstant() &&
-          right_type()->AsConstant()->Value()->IsString()) {
-        Handle<String> right_string =
-            Handle<String>::cast(right_type()->AsConstant()->Value());
+      HeapObjectBinopMatcher m(node_);
+      if (m.right().HasValue() && m.right().Value()->IsString()) {
+        Handle<String> right_string = Handle<String>::cast(m.right().Value());
         if (right_string->length() >= ConsString::kMinLength) return true;
       }
-      if (left_type()->IsConstant() &&
-          left_type()->AsConstant()->Value()->IsString()) {
-        Handle<String> left_string =
-            Handle<String>::cast(left_type()->AsConstant()->Value());
+      if (m.left().HasValue() && m.left().Value()->IsString()) {
+        Handle<String> left_string = Handle<String>::cast(m.left().Value());
         if (left_string->length() >= ConsString::kMinLength) {
           // The invariant for ConsString requires the left hand side to be
           // a sequential or external string if the right hand side is the
@@ -454,7 +452,6 @@
 // - immediately put in type bounds for all new nodes
 // - relax effects from generic but not-side-effecting operations
 
-
 JSTypedLowering::JSTypedLowering(Editor* editor,
                                  CompilationDependencies* dependencies,
                                  Flags flags, JSGraph* jsgraph, Zone* zone)
@@ -463,7 +460,7 @@
       flags_(flags),
       jsgraph_(jsgraph),
       the_hole_type_(
-          Type::Constant(factory()->the_hole_value(), graph()->zone())),
+          Type::HeapConstant(factory()->the_hole_value(), graph()->zone())),
       type_cache_(TypeCache::Get()) {
   for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
     double min = kMinInt / (1 << k);
@@ -529,7 +526,7 @@
   NumberOperationHint hint;
   if (r.GetBinaryNumberOperationHint(&hint)) {
     if (hint == NumberOperationHint::kNumberOrOddball &&
-        r.BothInputsAre(Type::PlainPrimitive())) {
+        r.BothInputsAre(Type::NumberOrOddball())) {
       r.ConvertInputsToNumber();
       return r.ChangeToPureOperator(r.NumberOp(), Type::Number());
     }
@@ -604,21 +601,20 @@
   }
 
   // Determine the {first} length.
+  HeapObjectBinopMatcher m(node);
   Node* first_length =
-      first_type->IsConstant()
+      (m.left().HasValue() && m.left().Value()->IsString())
           ? jsgraph()->Constant(
-                Handle<String>::cast(first_type->AsConstant()->Value())
-                    ->length())
+                Handle<String>::cast(m.left().Value())->length())
           : effect = graph()->NewNode(
                 simplified()->LoadField(AccessBuilder::ForStringLength()),
                 first, effect, control);
 
   // Determine the {second} length.
   Node* second_length =
-      second_type->IsConstant()
+      (m.right().HasValue() && m.right().Value()->IsString())
           ? jsgraph()->Constant(
-                Handle<String>::cast(second_type->AsConstant()->Value())
-                    ->length())
+                Handle<String>::cast(m.right().Value())->length())
           : effect = graph()->NewNode(
                 simplified()->LoadField(AccessBuilder::ForStringLength()),
                 second, effect, control);
@@ -630,33 +626,44 @@
   // Check if we would overflow the allowed maximum string length.
   Node* check = graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
                                  jsgraph()->Constant(String::kMaxLength));
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  {
-    // Throw a RangeError in case of overflow.
-    Node* vfalse = efalse = graph()->NewNode(
-        javascript()->CallRuntime(Runtime::kThrowInvalidStringLength), context,
-        frame_state, efalse, if_false);
-    if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
-    if_false = graph()->NewNode(common()->Throw(), vfalse, efalse, if_false);
-    // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-    NodeProperties::MergeControlToEnd(graph(), common(), if_false);
-    Revisit(graph()->end());
+  if (isolate()->IsStringLengthOverflowIntact()) {
+    // Add a code dependency on the string length overflow protector.
+    dependencies()->AssumePropertyCell(factory()->string_length_protector());
 
-    // Update potential {IfException} uses of {node} to point to the
-    // %ThrowInvalidStringLength runtime call node instead.
-    for (Edge edge : node->use_edges()) {
-      if (edge.from()->opcode() == IrOpcode::kIfException) {
-        DCHECK(NodeProperties::IsControlEdge(edge) ||
-               NodeProperties::IsEffectEdge(edge));
-        edge.UpdateTo(vfalse);
-        Revisit(edge.from());
+    // We can just deoptimize if the {check} fails. Besides generating a
+    // shorter code sequence than the version below, this has the additional
+    // benefit of not holding on to the lazy {frame_state} and thus potentially
+    // reduces the number of live ranges and allows for more truncations.
+    effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+  } else {
+    Node* branch =
+        graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+    Node* efalse = effect;
+    {
+      // Throw a RangeError in case of overflow.
+      Node* vfalse = efalse = graph()->NewNode(
+          javascript()->CallRuntime(Runtime::kThrowInvalidStringLength),
+          context, frame_state, efalse, if_false);
+      if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
+      if_false = graph()->NewNode(common()->Throw(), vfalse, efalse, if_false);
+      // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+      NodeProperties::MergeControlToEnd(graph(), common(), if_false);
+      Revisit(graph()->end());
+
+      // Update potential {IfException} uses of {node} to point to the
+      // %ThrowInvalidStringLength runtime call node instead.
+      for (Edge edge : node->use_edges()) {
+        if (edge.from()->opcode() == IrOpcode::kIfException) {
+          DCHECK(NodeProperties::IsControlEdge(edge) ||
+                 NodeProperties::IsEffectEdge(edge));
+          edge.UpdateTo(vfalse);
+          Revisit(edge.from());
+        }
       }
     }
+    control = graph()->NewNode(common()->IfTrue(), branch);
   }
-  control = graph()->NewNode(common()->IfTrue(), branch);
 
   // Figure out the map for the resulting ConsString.
   // TODO(turbofan): We currently just use the cons_string_map here for
@@ -676,7 +683,7 @@
                             value, value_map, effect, control);
   effect = graph()->NewNode(
       simplified()->StoreField(AccessBuilder::ForNameHashField()), value,
-      jsgraph()->Uint32Constant(Name::kEmptyHashField), effect, control);
+      jsgraph()->Constant(Name::kEmptyHashField), effect, control);
   effect = graph()->NewNode(
       simplified()->StoreField(AccessBuilder::ForStringLength()), value, length,
       effect, control);
@@ -768,6 +775,35 @@
   }
 }
 
+Reduction JSTypedLowering::ReduceJSTypeOf(Node* node) {
+  Node* const input = node->InputAt(0);
+  Type* type = NodeProperties::GetType(input);
+  Factory* const f = factory();
+  if (type->Is(Type::Boolean())) {
+    return Replace(jsgraph()->Constant(f->boolean_string()));
+  } else if (type->Is(Type::Number())) {
+    return Replace(jsgraph()->Constant(f->number_string()));
+  } else if (type->Is(Type::String())) {
+    return Replace(jsgraph()->Constant(f->string_string()));
+  } else if (type->Is(Type::Symbol())) {
+    return Replace(jsgraph()->Constant(f->symbol_string()));
+  } else if (type->Is(Type::Union(Type::Undefined(), Type::OtherUndetectable(),
+                                  graph()->zone()))) {
+    return Replace(jsgraph()->Constant(f->undefined_string()));
+  } else if (type->Is(Type::Null())) {
+    return Replace(jsgraph()->Constant(f->object_string()));
+  } else if (type->Is(Type::Function())) {
+    return Replace(jsgraph()->Constant(f->function_string()));
+  } else if (type->IsHeapConstant()) {
+    return Replace(jsgraph()->Constant(
+        Object::TypeOf(isolate(), type->AsHeapConstant()->Value())));
+  } else if (type->IsOtherNumberConstant()) {
+    return Replace(jsgraph()->Constant(f->number_string()));
+  }
+
+  return NoChange();
+}
+
 Reduction JSTypedLowering::ReduceJSEqualTypeOf(Node* node, bool invert) {
   HeapObjectBinopMatcher m(node);
   if (m.left().IsJSTypeOf() && m.right().HasValue() &&
@@ -949,6 +985,17 @@
   return NoChange();
 }
 
+Reduction JSTypedLowering::ReduceJSToName(Node* node) {
+  Node* const input = NodeProperties::GetValueInput(node, 0);
+  Type* const input_type = NodeProperties::GetType(input);
+  if (input_type->Is(Type::Name())) {
+    // JSToName(x:name) => x
+    ReplaceWithValue(node, input);
+    return Replace(input);
+  }
+  return NoChange();
+}
+
 Reduction JSTypedLowering::ReduceJSToLength(Node* node) {
   Node* input = NodeProperties::GetValueInput(node, 0);
   Type* input_type = NodeProperties::GetType(input);
@@ -976,12 +1023,17 @@
 Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
   // Try constant-folding of JSToNumber with constant inputs.
   Type* input_type = NodeProperties::GetType(input);
-  if (input_type->IsConstant()) {
-    Handle<Object> input_value = input_type->AsConstant()->Value();
-    if (input_value->IsString()) {
+  if (input_type->Is(Type::String())) {
+    HeapObjectMatcher m(input);
+    if (m.HasValue() && m.Value()->IsString()) {
+      Handle<Object> input_value = m.Value();
       return Replace(jsgraph()->Constant(
           String::ToNumber(Handle<String>::cast(input_value))));
-    } else if (input_value->IsOddball()) {
+    }
+  }
+  if (input_type->IsHeapConstant()) {
+    Handle<Object> input_value = input_type->AsHeapConstant()->Value();
+    if (input_value->IsOddball()) {
       return Replace(jsgraph()->Constant(
           Oddball::ToNumber(Handle<Oddball>::cast(input_value))));
     }
@@ -1270,12 +1322,12 @@
   Node* control = NodeProperties::GetControlInput(node);
 
   // Check if the {constructor} is a (known) JSFunction.
-  if (!constructor_type->IsConstant() ||
-      !constructor_type->AsConstant()->Value()->IsJSFunction()) {
+  if (!constructor_type->IsHeapConstant() ||
+      !constructor_type->AsHeapConstant()->Value()->IsJSFunction()) {
     return NoChange();
   }
   Handle<JSFunction> function =
-      Handle<JSFunction>::cast(constructor_type->AsConstant()->Value());
+      Handle<JSFunction>::cast(constructor_type->AsHeapConstant()->Value());
 
   // Check if the {function} already has an initial map (i.e. the
   // {function} has been used as a constructor at least once).
@@ -1457,6 +1509,81 @@
   return Changed(node);
 }
 
+Reduction JSTypedLowering::ReduceJSLoadModule(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSLoadModule, node->opcode());
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  int32_t cell_index = OpParameter<int32_t>(node);
+  Node* module = NodeProperties::GetValueInput(node, 0);
+
+  Node* array;
+  int index;
+  if (ModuleDescriptor::GetCellIndexKind(cell_index) ==
+      ModuleDescriptor::kExport) {
+    array = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForModuleRegularExports()),
+        module, effect, control);
+    index = cell_index - 1;
+  } else {
+    DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
+              ModuleDescriptor::kImport);
+    array = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForModuleRegularImports()),
+        module, effect, control);
+    index = -cell_index - 1;
+  }
+
+  Node* cell = effect = graph()->NewNode(
+      simplified()->LoadField(AccessBuilder::ForFixedArraySlot(index)), array,
+      effect, control);
+
+  Node* value = effect =
+      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForCellValue()),
+                       cell, effect, control);
+
+  ReplaceWithValue(node, value, effect, control);
+  return Changed(value);
+}
+
+Reduction JSTypedLowering::ReduceJSStoreModule(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSStoreModule, node->opcode());
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  int32_t cell_index = OpParameter<int32_t>(node);
+  Node* module = NodeProperties::GetValueInput(node, 0);
+  Node* value = NodeProperties::GetValueInput(node, 1);
+
+  Node* array;
+  int index;
+  if (ModuleDescriptor::GetCellIndexKind(cell_index) ==
+      ModuleDescriptor::kExport) {
+    array = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForModuleRegularExports()),
+        module, effect, control);
+    index = cell_index - 1;
+  } else {
+    DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
+              ModuleDescriptor::kImport);
+    array = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForModuleRegularImports()),
+        module, effect, control);
+    index = -cell_index - 1;
+  }
+
+  Node* cell = effect = graph()->NewNode(
+      simplified()->LoadField(AccessBuilder::ForFixedArraySlot(index)), array,
+      effect, control);
+
+  effect =
+      graph()->NewNode(simplified()->StoreField(AccessBuilder::ForCellValue()),
+                       cell, value, effect, control);
+
+  ReplaceWithValue(node, effect, effect, control);
+  return Changed(value);
+}
+
 Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
   DCHECK_EQ(IrOpcode::kJSConvertReceiver, node->opcode());
   ConvertReceiverMode mode = ConvertReceiverModeOf(node->op());
@@ -1478,9 +1605,9 @@
   // with the global proxy unconditionally.
   if (receiver_type->Is(Type::NullOrUndefined()) ||
       mode == ConvertReceiverMode::kNullOrUndefined) {
-    if (context_type->IsConstant()) {
+    if (context_type->IsHeapConstant()) {
       Handle<JSObject> global_proxy(
-          Handle<Context>::cast(context_type->AsConstant()->Value())
+          Handle<Context>::cast(context_type->AsHeapConstant()->Value())
               ->global_proxy(),
           isolate());
       receiver = jsgraph()->Constant(global_proxy);
@@ -1583,9 +1710,9 @@
   Node* eglobal = effect;
   Node* rglobal;
   {
-    if (context_type->IsConstant()) {
+    if (context_type->IsHeapConstant()) {
       Handle<JSObject> global_proxy(
-          Handle<Context>::cast(context_type->AsConstant()->Value())
+          Handle<Context>::cast(context_type->AsHeapConstant()->Value())
               ->global_proxy(),
           isolate());
       rglobal = jsgraph()->Constant(global_proxy);
@@ -1640,6 +1767,7 @@
   const bool is_construct = (node->opcode() == IrOpcode::kJSCallConstruct);
 
   DCHECK(Builtins::HasCppImplementation(builtin_index));
+  DCHECK_EQ(0, flags & CallDescriptor::kSupportsTailCalls);
 
   Node* target = NodeProperties::GetValueInput(node, 0);
   Node* new_target = is_construct
@@ -1664,7 +1792,7 @@
   }
 
   const int argc = arity + BuiltinArguments::kNumExtraArgsWithReceiver;
-  Node* argc_node = jsgraph->Int32Constant(argc);
+  Node* argc_node = jsgraph->Constant(argc);
 
   static const int kStubAndReceiver = 2;
   int cursor = arity + kStubAndReceiver;
@@ -1708,10 +1836,10 @@
   Node* control = NodeProperties::GetControlInput(node);
 
   // Check if {target} is a known JSFunction.
-  if (target_type->IsConstant() &&
-      target_type->AsConstant()->Value()->IsJSFunction()) {
+  if (target_type->IsHeapConstant() &&
+      target_type->AsHeapConstant()->Value()->IsJSFunction()) {
     Handle<JSFunction> function =
-        Handle<JSFunction>::cast(target_type->AsConstant()->Value());
+        Handle<JSFunction>::cast(target_type->AsHeapConstant()->Value());
     Handle<SharedFunctionInfo> shared(function->shared(), isolate());
     const int builtin_index = shared->construct_stub()->builtin_index();
     const bool is_builtin = (builtin_index != -1);
@@ -1740,7 +1868,7 @@
       node->InsertInput(graph()->zone(), 0,
                         jsgraph()->HeapConstant(callable.code()));
       node->InsertInput(graph()->zone(), 2, new_target);
-      node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+      node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
       node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
       node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
       NodeProperties::ChangeOp(
@@ -1759,7 +1887,7 @@
     node->InsertInput(graph()->zone(), 0,
                       jsgraph()->HeapConstant(callable.code()));
     node->InsertInput(graph()->zone(), 2, new_target);
-    node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+    node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
     node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
     NodeProperties::ChangeOp(
         node, common()->Call(Linkage::GetStubCallDescriptor(
@@ -1793,10 +1921,10 @@
   }
 
   // Check if {target} is a known JSFunction.
-  if (target_type->IsConstant() &&
-      target_type->AsConstant()->Value()->IsJSFunction()) {
+  if (target_type->IsHeapConstant() &&
+      target_type->AsHeapConstant()->Value()->IsJSFunction()) {
     Handle<JSFunction> function =
-        Handle<JSFunction>::cast(target_type->AsConstant()->Value());
+        Handle<JSFunction>::cast(target_type->AsHeapConstant()->Value());
     Handle<SharedFunctionInfo> shared(function->shared(), isolate());
     const int builtin_index = shared->code()->builtin_index();
     const bool is_builtin = (builtin_index != -1);
@@ -1830,7 +1958,7 @@
     }
 
     Node* new_target = jsgraph()->UndefinedConstant();
-    Node* argument_count = jsgraph()->Int32Constant(arity);
+    Node* argument_count = jsgraph()->Constant(arity);
     if (NeedsArgumentAdaptorFrame(shared, arity)) {
       // Patch {node} to an indirect call via the ArgumentsAdaptorTrampoline.
       Callable callable = CodeFactory::ArgumentAdaptor(isolate());
@@ -1840,12 +1968,13 @@
       node->InsertInput(graph()->zone(), 3, argument_count);
       node->InsertInput(
           graph()->zone(), 4,
-          jsgraph()->Int32Constant(shared->internal_formal_parameter_count()));
+          jsgraph()->Constant(shared->internal_formal_parameter_count()));
       NodeProperties::ChangeOp(
           node, common()->Call(Linkage::GetStubCallDescriptor(
                     isolate(), graph()->zone(), callable.descriptor(),
                     1 + arity, flags)));
-    } else if (is_builtin && Builtins::HasCppImplementation(builtin_index)) {
+    } else if (is_builtin && Builtins::HasCppImplementation(builtin_index) &&
+               ((flags & CallDescriptor::kSupportsTailCalls) == 0)) {
       // Patch {node} to a direct CEntryStub call.
       ReduceBuiltin(isolate(), jsgraph(), node, builtin_index, arity, flags);
     } else {
@@ -1871,7 +2000,7 @@
     Callable callable = CodeFactory::CallFunction(isolate(), convert_mode);
     node->InsertInput(graph()->zone(), 0,
                       jsgraph()->HeapConstant(callable.code()));
-    node->InsertInput(graph()->zone(), 2, jsgraph()->Int32Constant(arity));
+    node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(arity));
     NodeProperties::ChangeOp(
         node, common()->Call(Linkage::GetStubCallDescriptor(
                   isolate(), graph()->zone(), callable.descriptor(), 1 + arity,
@@ -2074,12 +2203,16 @@
       return ReduceJSToInteger(node);
     case IrOpcode::kJSToLength:
       return ReduceJSToLength(node);
+    case IrOpcode::kJSToName:
+      return ReduceJSToName(node);
     case IrOpcode::kJSToNumber:
       return ReduceJSToNumber(node);
     case IrOpcode::kJSToString:
       return ReduceJSToString(node);
     case IrOpcode::kJSToObject:
       return ReduceJSToObject(node);
+    case IrOpcode::kJSTypeOf:
+      return ReduceJSTypeOf(node);
     case IrOpcode::kJSLoadNamed:
       return ReduceJSLoadNamed(node);
     case IrOpcode::kJSLoadProperty:
@@ -2090,6 +2223,10 @@
       return ReduceJSLoadContext(node);
     case IrOpcode::kJSStoreContext:
       return ReduceJSStoreContext(node);
+    case IrOpcode::kJSLoadModule:
+      return ReduceJSLoadModule(node);
+    case IrOpcode::kJSStoreModule:
+      return ReduceJSStoreModule(node);
     case IrOpcode::kJSConvertReceiver:
       return ReduceJSConvertReceiver(node);
     case IrOpcode::kJSCallConstruct:
diff --git a/src/compiler/js-typed-lowering.h b/src/compiler/js-typed-lowering.h
index b0cf1f4..3e71022 100644
--- a/src/compiler/js-typed-lowering.h
+++ b/src/compiler/js-typed-lowering.h
@@ -5,9 +5,11 @@
 #ifndef V8_COMPILER_JS_TYPED_LOWERING_H_
 #define V8_COMPILER_JS_TYPED_LOWERING_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/base/flags.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/compiler/opcodes.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -26,7 +28,8 @@
 class TypeCache;
 
 // Lowers JS-level operators to simplified operators based on types.
-class JSTypedLowering final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE JSTypedLowering final
+    : public NON_EXPORTED_BASE(AdvancedReducer) {
  public:
   // Flags that control the mode of operation.
   enum Flag {
@@ -52,12 +55,15 @@
   Reduction ReduceJSOrdinaryHasInstance(Node* node);
   Reduction ReduceJSLoadContext(Node* node);
   Reduction ReduceJSStoreContext(Node* node);
+  Reduction ReduceJSLoadModule(Node* node);
+  Reduction ReduceJSStoreModule(Node* node);
   Reduction ReduceJSEqualTypeOf(Node* node, bool invert);
   Reduction ReduceJSEqual(Node* node, bool invert);
   Reduction ReduceJSStrictEqual(Node* node, bool invert);
   Reduction ReduceJSToBoolean(Node* node);
   Reduction ReduceJSToInteger(Node* node);
   Reduction ReduceJSToLength(Node* node);
+  Reduction ReduceJSToName(Node* node);
   Reduction ReduceJSToNumberInput(Node* input);
   Reduction ReduceJSToNumber(Node* node);
   Reduction ReduceJSToStringInput(Node* input);
@@ -70,6 +76,7 @@
   Reduction ReduceJSGeneratorStore(Node* node);
   Reduction ReduceJSGeneratorRestoreContinuation(Node* node);
   Reduction ReduceJSGeneratorRestoreRegister(Node* node);
+  Reduction ReduceJSTypeOf(Node* node);
   Reduction ReduceNumberBinop(Node* node);
   Reduction ReduceInt32Binop(Node* node);
   Reduction ReduceUI32Shift(Node* node, Signedness signedness);
diff --git a/src/compiler/jump-threading.cc b/src/compiler/jump-threading.cc
index 5554282..d7d4f91 100644
--- a/src/compiler/jump-threading.cc
+++ b/src/compiler/jump-threading.cc
@@ -143,7 +143,7 @@
                                     InstructionSequence* code) {
   if (!FLAG_turbo_jt) return;
 
-  Zone local_zone(code->isolate()->allocator());
+  Zone local_zone(code->isolate()->allocator(), ZONE_NAME);
   ZoneVector<bool> skip(static_cast<int>(result.size()), false, &local_zone);
 
   // Skip empty blocks when the previous block doesn't fall through.
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index 523ce47..971ea72 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -107,6 +107,23 @@
   return HasSameReturnLocationsAs(CallDescriptorOf(node->op()));
 }
 
+int CallDescriptor::CalculateFixedFrameSize() const {
+  switch (kind_) {
+    case kCallJSFunction:
+      return PushArgumentCount()
+                 ? OptimizedBuiltinFrameConstants::kFixedSlotCount
+                 : StandardFrameConstants::kFixedSlotCount;
+      break;
+    case kCallAddress:
+      return CommonFrameConstants::kFixedSlotCountAboveFp +
+             CommonFrameConstants::kCPSlotCount;
+      break;
+    case kCallCodeObject:
+      return TypedFrameConstants::kFixedSlotCount;
+  }
+  UNREACHABLE();
+  return 0;
+}
 
 CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
   DCHECK(!info->IsStub());
@@ -168,8 +185,6 @@
     case Runtime::kInlineIsRegExp:
     case Runtime::kInlineIsSmi:
     case Runtime::kInlineIsTypedArray:
-    case Runtime::kInlineRegExpFlags:
-    case Runtime::kInlineRegExpSource:
       return false;
 
     default:
diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h
index 6f302bc..b515aca 100644
--- a/src/compiler/linkage.h
+++ b/src/compiler/linkage.h
@@ -5,10 +5,12 @@
 #ifndef V8_COMPILER_LINKAGE_H_
 #define V8_COMPILER_LINKAGE_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/base/flags.h"
 #include "src/compiler/frame.h"
 #include "src/compiler/operator.h"
 #include "src/frames.h"
+#include "src/globals.h"
 #include "src/machine-type.h"
 #include "src/runtime/runtime.h"
 #include "src/zone/zone.h"
@@ -161,7 +163,8 @@
 
 // Describes a call to various parts of the compiler. Every call has the notion
 // of a "target", which is the first input to the call.
-class CallDescriptor final : public ZoneObject {
+class V8_EXPORT_PRIVATE CallDescriptor final
+    : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   // Describes the kind of this call, which determines the target.
   enum Kind {
@@ -184,7 +187,9 @@
     // Causes the code generator to initialize the root register.
     kInitializeRootRegister = 1u << 7,
     // Does not ever try to allocate space on our heap.
-    kNoAllocate = 1u << 8
+    kNoAllocate = 1u << 8,
+    // Push argument count as part of function prologue.
+    kPushArgumentCount = 1u << 9
   };
   typedef base::Flags<Flag> Flags;
 
@@ -246,6 +251,7 @@
   bool NeedsFrameState() const { return flags() & kNeedsFrameState; }
   bool SupportsTailCalls() const { return flags() & kSupportsTailCalls; }
   bool UseNativeStack() const { return flags() & kUseNativeStack; }
+  bool PushArgumentCount() const { return flags() & kPushArgumentCount; }
   bool InitializeRootRegister() const {
     return flags() & kInitializeRootRegister;
   }
@@ -293,6 +299,8 @@
 
   bool CanTailCall(const Node* call) const;
 
+  int CalculateFixedFrameSize() const;
+
  private:
   friend class Linkage;
 
@@ -313,7 +321,8 @@
 DEFINE_OPERATORS_FOR_FLAGS(CallDescriptor::Flags)
 
 std::ostream& operator<<(std::ostream& os, const CallDescriptor& d);
-std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+                                           const CallDescriptor::Kind& k);
 
 // Defines the linkage for a compilation, including the calling conventions
 // for incoming parameters and return value(s) as well as the outgoing calling
@@ -329,7 +338,7 @@
 // Call[JSFunction]       function,   rcvr,  arg 1, [...], new, #arg, context
 // Call[Runtime]          CEntryStub, arg 1, arg 2, [...], fun, #arg, context
 // Call[BytecodeDispatch] address,    arg 1, arg 2, [...]
-class Linkage : public ZoneObject {
+class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   explicit Linkage(CallDescriptor* incoming) : incoming_(incoming) {}
 
diff --git a/src/compiler/liveness-analyzer.cc b/src/compiler/liveness-analyzer.cc
index fe458b8..0cf1333 100644
--- a/src/compiler/liveness-analyzer.cc
+++ b/src/compiler/liveness-analyzer.cc
@@ -13,10 +13,13 @@
 namespace internal {
 namespace compiler {
 
-
-LivenessAnalyzer::LivenessAnalyzer(size_t local_count, Zone* zone)
-    : zone_(zone), blocks_(zone), local_count_(local_count), queue_(zone) {}
-
+LivenessAnalyzer::LivenessAnalyzer(size_t local_count, bool has_accumulator,
+                                   Zone* zone)
+    : zone_(zone),
+      blocks_(zone),
+      local_count_(local_count),
+      has_accumulator_(has_accumulator),
+      queue_(zone) {}
 
 void LivenessAnalyzer::Print(std::ostream& os) {
   for (auto block : blocks_) {
@@ -28,8 +31,8 @@
 
 LivenessAnalyzerBlock* LivenessAnalyzer::NewBlock() {
   LivenessAnalyzerBlock* result =
-      new (zone()->New(sizeof(LivenessAnalyzerBlock)))
-          LivenessAnalyzerBlock(blocks_.size(), local_count_, zone());
+      new (zone()->New(sizeof(LivenessAnalyzerBlock))) LivenessAnalyzerBlock(
+          blocks_.size(), local_count_, has_accumulator_, zone());
   blocks_.push_back(result);
   return result;
 }
@@ -52,8 +55,8 @@
 
 
 void LivenessAnalyzer::Run(NonLiveFrameStateSlotReplacer* replacer) {
-  if (local_count_ == 0) {
-    // No local variables => nothing to do.
+  if (local_count_ == 0 && !has_accumulator_) {
+    // No variables => nothing to do.
     return;
   }
 
@@ -64,7 +67,8 @@
   }
 
   // Compute the fix-point.
-  BitVector working_area(static_cast<int>(local_count_), zone_);
+  BitVector working_area(
+      static_cast<int>(local_count_) + (has_accumulator_ ? 1 : 0), zone_);
   while (!queue_.empty()) {
     LivenessAnalyzerBlock* block = queue_.front();
     queue_.pop();
@@ -84,11 +88,12 @@
 }
 
 LivenessAnalyzerBlock::LivenessAnalyzerBlock(size_t id, size_t local_count,
-                                             Zone* zone)
+                                             bool has_accumulator, Zone* zone)
     : entries_(zone),
       predecessors_(zone),
-      live_(local_count == 0 ? 1 : static_cast<int>(local_count), zone),
+      live_(static_cast<int>(local_count) + (has_accumulator ? 1 : 0), zone),
       queued_(false),
+      has_accumulator_(has_accumulator),
       id_(id) {}
 
 void LivenessAnalyzerBlock::Process(BitVector* result,
@@ -123,32 +128,52 @@
 
 void NonLiveFrameStateSlotReplacer::ClearNonLiveFrameStateSlots(
     Node* frame_state, BitVector* liveness) {
+  DCHECK_EQ(liveness->length(), permanently_live_.length());
+
   DCHECK_EQ(frame_state->opcode(), IrOpcode::kFrameState);
   Node* locals_state = frame_state->InputAt(1);
   DCHECK_EQ(locals_state->opcode(), IrOpcode::kStateValues);
-  int count = static_cast<int>(StateValuesAccess(locals_state).size());
-  DCHECK_EQ(count == 0 ? 1 : count, liveness->length());
+  int count = liveness->length() - (has_accumulator_ ? 1 : 0);
+  DCHECK_EQ(count, static_cast<int>(StateValuesAccess(locals_state).size()));
   for (int i = 0; i < count; i++) {
-    bool live = liveness->Contains(i) || permanently_live_.Contains(i);
-    if (!live || locals_state->InputAt(i) != replacement_node_) {
+    if (!liveness->Contains(i) && !permanently_live_.Contains(i)) {
       Node* new_values = ClearNonLiveStateValues(locals_state, liveness);
       frame_state->ReplaceInput(1, new_values);
       break;
     }
   }
+
+  if (has_accumulator_) {
+    DCHECK_EQ(frame_state->InputAt(2)->opcode(), IrOpcode::kStateValues);
+    DCHECK_EQ(
+        static_cast<int>(StateValuesAccess(frame_state->InputAt(2)).size()), 1);
+    int index = liveness->length() - 1;
+    if (!liveness->Contains(index) && !permanently_live_.Contains(index)) {
+      Node* new_value =
+          state_values_cache()->GetNodeForValues(&replacement_node_, 1);
+      frame_state->ReplaceInput(2, new_value);
+    }
+  }
 }
 
 
 Node* NonLiveFrameStateSlotReplacer::ClearNonLiveStateValues(
     Node* values, BitVector* liveness) {
   DCHECK(inputs_buffer_.empty());
-  for (StateValuesAccess::TypedNode node : StateValuesAccess(values)) {
+
+  int var = 0;
+  for (Node* value_node : values->inputs()) {
+    // Make sure this isn't a state value tree
+    DCHECK(value_node->opcode() != IrOpcode::kStateValues);
+
     // Index of the next variable is its furure index in the inputs buffer,
     // i.e., the buffer's size.
-    int var = static_cast<int>(inputs_buffer_.size());
     bool live = liveness->Contains(var) || permanently_live_.Contains(var);
-    inputs_buffer_.push_back(live ? node.node : replacement_node_);
+    inputs_buffer_.push_back(live ? value_node : replacement_node_);
+
+    var++;
   }
+
   Node* result = state_values_cache()->GetNodeForValues(
       inputs_buffer_.empty() ? nullptr : &(inputs_buffer_.front()),
       inputs_buffer_.size());
@@ -175,10 +200,18 @@
     os << "    ";
     switch (entry.kind()) {
       case Entry::kLookup:
-        os << "- Lookup " << entry.var() << std::endl;
+        if (has_accumulator_ && entry.var() == live_.length() - 1) {
+          os << "- Lookup accumulator" << std::endl;
+        } else {
+          os << "- Lookup " << entry.var() << std::endl;
+        }
         break;
       case Entry::kBind:
-        os << "- Bind " << entry.var() << std::endl;
+        if (has_accumulator_ && entry.var() == live_.length() - 1) {
+          os << "- Bind accumulator" << std::endl;
+        } else {
+          os << "- Bind " << entry.var() << std::endl;
+        }
         break;
       case Entry::kCheckpoint:
         os << "- Checkpoint " << entry.node()->id() << std::endl;
diff --git a/src/compiler/liveness-analyzer.h b/src/compiler/liveness-analyzer.h
index 8a3d715..63fc52c 100644
--- a/src/compiler/liveness-analyzer.h
+++ b/src/compiler/liveness-analyzer.h
@@ -7,6 +7,7 @@
 
 #include "src/bit-vector.h"
 #include "src/compiler/node.h"
+#include "src/globals.h"
 #include "src/zone/zone-containers.h"
 
 namespace v8 {
@@ -17,20 +18,22 @@
 class Node;
 class StateValuesCache;
 
-
 class NonLiveFrameStateSlotReplacer {
  public:
   void ClearNonLiveFrameStateSlots(Node* frame_state, BitVector* liveness);
   NonLiveFrameStateSlotReplacer(StateValuesCache* state_values_cache,
                                 Node* replacement, size_t local_count,
-                                Zone* local_zone)
+                                bool has_accumulator, Zone* local_zone)
       : replacement_node_(replacement),
         state_values_cache_(state_values_cache),
         local_zone_(local_zone),
-        permanently_live_(local_count == 0 ? 1 : static_cast<int>(local_count),
-                          local_zone),
-        inputs_buffer_(local_zone) {}
+        permanently_live_(
+            static_cast<int>(local_count) + (has_accumulator ? 1 : 0),
+            local_zone),
+        inputs_buffer_(local_zone),
+        has_accumulator_(has_accumulator) {}
 
+  // TODO(leszeks): Not used by bytecode, remove once AST graph builder is gone.
   void MarkPermanentlyLive(int var) { permanently_live_.Add(var); }
 
  private:
@@ -48,12 +51,13 @@
   Zone* local_zone_;
   BitVector permanently_live_;
   NodeVector inputs_buffer_;
+
+  bool has_accumulator_;
 };
 
-
-class LivenessAnalyzer {
+class V8_EXPORT_PRIVATE LivenessAnalyzer {
  public:
-  LivenessAnalyzer(size_t local_count, Zone* zone);
+  LivenessAnalyzer(size_t local_count, bool has_accumulator, Zone* zone);
 
   LivenessAnalyzerBlock* NewBlock();
   LivenessAnalyzerBlock* NewBlock(LivenessAnalyzerBlock* predecessor);
@@ -73,6 +77,10 @@
   ZoneDeque<LivenessAnalyzerBlock*> blocks_;
   size_t local_count_;
 
+  // TODO(leszeks): Always true for bytecode, remove once AST graph builder is
+  // gone.
+  bool has_accumulator_;
+
   ZoneQueue<LivenessAnalyzerBlock*> queue_;
 };
 
@@ -83,6 +91,17 @@
 
   void Lookup(int var) { entries_.push_back(Entry(Entry::kLookup, var)); }
   void Bind(int var) { entries_.push_back(Entry(Entry::kBind, var)); }
+  void LookupAccumulator() {
+    DCHECK(has_accumulator_);
+    // The last entry is the accumulator entry.
+    entries_.push_back(Entry(Entry::kLookup, live_.length() - 1));
+  }
+  void BindAccumulator() {
+    DCHECK(has_accumulator_);
+    // The last entry is the accumulator entry.
+    entries_.push_back(Entry(Entry::kBind, live_.length() - 1));
+  }
+
   void Checkpoint(Node* node) { entries_.push_back(Entry(node)); }
   void AddPredecessor(LivenessAnalyzerBlock* b) { predecessors_.push_back(b); }
   LivenessAnalyzerBlock* GetPredecessor() {
@@ -116,7 +135,8 @@
     Node* node_;
   };
 
-  LivenessAnalyzerBlock(size_t id, size_t local_count, Zone* zone);
+  LivenessAnalyzerBlock(size_t id, size_t local_count, bool has_accumulator,
+                        Zone* zone);
   void Process(BitVector* result, NonLiveFrameStateSlotReplacer* relaxer);
   bool UpdateLive(BitVector* working_area);
 
@@ -138,6 +158,7 @@
 
   BitVector live_;
   bool queued_;
+  bool has_accumulator_;
 
   size_t id_;
 };
diff --git a/src/compiler/load-elimination.cc b/src/compiler/load-elimination.cc
index 93c24a0..e50ebe1 100644
--- a/src/compiler/load-elimination.cc
+++ b/src/compiler/load-elimination.cc
@@ -448,6 +448,26 @@
   return this;
 }
 
+LoadElimination::AbstractState const*
+LoadElimination::AbstractState::KillFields(Node* object, Zone* zone) const {
+  for (size_t i = 0;; ++i) {
+    if (i == arraysize(fields_)) return this;
+    if (AbstractField const* this_field = this->fields_[i]) {
+      AbstractField const* that_field = this_field->Kill(object, zone);
+      if (that_field != this_field) {
+        AbstractState* that = new (zone) AbstractState(*this);
+        that->fields_[i] = this_field;
+        while (++i < arraysize(fields_)) {
+          if (this->fields_[i] != nullptr) {
+            that->fields_[i] = this->fields_[i]->Kill(object, zone);
+          }
+        }
+        return that;
+      }
+    }
+  }
+}
+
 Node* LoadElimination::AbstractState::LookupField(Node* object,
                                                   size_t index) const {
   if (AbstractField const* this_field = this->fields_[index]) {
@@ -662,7 +682,7 @@
     state = state->AddField(object, field_index, new_value, zone());
   } else {
     // Unsupported StoreField operator.
-    state = empty_state();
+    state = state->KillFields(object, zone());
   }
   return UpdateState(node, state);
 }
@@ -856,8 +876,11 @@
             FieldAccess const& access = FieldAccessOf(current->op());
             Node* const object = NodeProperties::GetValueInput(current, 0);
             int field_index = FieldIndexOf(access);
-            if (field_index < 0) return empty_state();
-            state = state->KillField(object, field_index, zone());
+            if (field_index < 0) {
+              state = state->KillFields(object, zone());
+            } else {
+              state = state->KillField(object, field_index, zone());
+            }
             break;
           }
           case IrOpcode::kStoreElement: {
@@ -897,6 +920,7 @@
   switch (rep) {
     case MachineRepresentation::kNone:
     case MachineRepresentation::kBit:
+    case MachineRepresentation::kSimd128:
       UNREACHABLE();
       break;
     case MachineRepresentation::kWord32:
@@ -910,16 +934,20 @@
     case MachineRepresentation::kFloat32:
       return -1;  // Currently untracked.
     case MachineRepresentation::kFloat64:
-    case MachineRepresentation::kSimd128:
-      return -1;  // Currently untracked.
+      if (kDoubleSize != kPointerSize) {
+        return -1;  // We currently only track pointer size fields.
+      }
+    // Fall through.
     case MachineRepresentation::kTaggedSigned:
     case MachineRepresentation::kTaggedPointer:
     case MachineRepresentation::kTagged:
       // TODO(bmeurer): Check that we never do overlapping load/stores of
-      // individual parts of Float64/Simd128 values.
+      // individual parts of Float64 values.
       break;
   }
-  DCHECK_EQ(kTaggedBase, access.base_is_tagged);
+  if (access.base_is_tagged != kTaggedBase) {
+    return -1;  // We currently only track tagged objects.
+  }
   return FieldIndexOf(access.offset);
 }
 
diff --git a/src/compiler/load-elimination.h b/src/compiler/load-elimination.h
index 985e690..50979e4 100644
--- a/src/compiler/load-elimination.h
+++ b/src/compiler/load-elimination.h
@@ -5,7 +5,9 @@
 #ifndef V8_COMPILER_LOAD_ELIMINATION_H_
 #define V8_COMPILER_LOAD_ELIMINATION_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -17,7 +19,8 @@
 class Graph;
 class JSGraph;
 
-class LoadElimination final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE LoadElimination final
+    : public NON_EXPORTED_BASE(AdvancedReducer) {
  public:
   LoadElimination(Editor* editor, JSGraph* jsgraph, Zone* zone)
       : AdvancedReducer(editor), node_states_(zone), jsgraph_(jsgraph) {}
@@ -164,6 +167,7 @@
                                   Zone* zone) const;
     AbstractState const* KillField(Node* object, size_t index,
                                    Zone* zone) const;
+    AbstractState const* KillFields(Node* object, Zone* zone) const;
     Node* LookupField(Node* object, size_t index) const;
 
     AbstractState const* AddElement(Node* object, Node* index, Node* value,
diff --git a/src/compiler/loop-analysis.h b/src/compiler/loop-analysis.h
index 2d0f27b..fb3e1e7 100644
--- a/src/compiler/loop-analysis.h
+++ b/src/compiler/loop-analysis.h
@@ -8,6 +8,7 @@
 #include "src/base/iterator.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/node.h"
+#include "src/globals.h"
 #include "src/zone/zone-containers.h"
 
 namespace v8 {
@@ -153,7 +154,7 @@
   ZoneVector<Node*> loop_nodes_;
 };
 
-class LoopFinder {
+class V8_EXPORT_PRIVATE LoopFinder {
  public:
   // Build a loop tree for the entire graph.
   static LoopTree* BuildLoopTree(Graph* graph, Zone* temp_zone);
diff --git a/src/compiler/loop-peeling.h b/src/compiler/loop-peeling.h
index 8b38e25..301e4b8 100644
--- a/src/compiler/loop-peeling.h
+++ b/src/compiler/loop-peeling.h
@@ -5,7 +5,9 @@
 #ifndef V8_COMPILER_LOOP_PEELING_H_
 #define V8_COMPILER_LOOP_PEELING_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/loop-analysis.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -14,7 +16,7 @@
 // Represents the output of peeling a loop, which is basically the mapping
 // from the body of the loop to the corresponding nodes in the peeled
 // iteration.
-class PeeledIteration : public ZoneObject {
+class V8_EXPORT_PRIVATE PeeledIteration : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   // Maps {node} to its corresponding copy in the peeled iteration, if
   // the node was part of the body of the loop. Returns {node} otherwise.
@@ -27,7 +29,7 @@
 class CommonOperatorBuilder;
 
 // Implements loop peeling.
-class LoopPeeler {
+class V8_EXPORT_PRIVATE LoopPeeler {
  public:
   static bool CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop);
   static PeeledIteration* Peel(Graph* graph, CommonOperatorBuilder* common,
diff --git a/src/compiler/machine-graph-verifier.cc b/src/compiler/machine-graph-verifier.cc
index d33ee4e..a8f7a25 100644
--- a/src/compiler/machine-graph-verifier.cc
+++ b/src/compiler/machine-graph-verifier.cc
@@ -25,7 +25,8 @@
                                 Linkage* linkage, Zone* zone)
       : schedule_(schedule),
         linkage_(linkage),
-        representation_vector_(graph->NodeCount(), zone) {
+        representation_vector_(graph->NodeCount(), MachineRepresentation::kNone,
+                               zone) {
     Run();
   }
 
@@ -234,9 +235,10 @@
 
 class MachineRepresentationChecker {
  public:
-  MachineRepresentationChecker(Schedule const* const schedule,
-                               MachineRepresentationInferrer const* const typer)
-      : schedule_(schedule), typer_(typer) {}
+  MachineRepresentationChecker(
+      Schedule const* const schedule,
+      MachineRepresentationInferrer const* const inferrer)
+      : schedule_(schedule), inferrer_(inferrer) {}
 
   void Run() {
     BasicBlockVector const* blocks = schedule_->all_blocks();
@@ -255,11 +257,11 @@
             break;
           case IrOpcode::kChangeBitToTagged:
             CHECK_EQ(MachineRepresentation::kBit,
-                     typer_->GetRepresentation(node->InputAt(0)));
+                     inferrer_->GetRepresentation(node->InputAt(0)));
             break;
           case IrOpcode::kChangeTaggedToBit:
             CHECK_EQ(MachineRepresentation::kTagged,
-                     typer_->GetRepresentation(node->InputAt(0)));
+                     inferrer_->GetRepresentation(node->InputAt(0)));
             break;
           case IrOpcode::kRoundInt64ToFloat64:
           case IrOpcode::kRoundUint64ToFloat64:
@@ -290,7 +292,7 @@
           case IrOpcode::kWord64Equal:
             CheckValueInputIsTaggedOrPointer(node, 0);
             CheckValueInputRepresentationIs(
-                node, 1, typer_->GetRepresentation(node->InputAt(0)));
+                node, 1, inferrer_->GetRepresentation(node->InputAt(0)));
             break;
           case IrOpcode::kInt64LessThan:
           case IrOpcode::kInt64LessThanOrEqual:
@@ -400,7 +402,7 @@
             }
             break;
           case IrOpcode::kPhi:
-            switch (typer_->GetRepresentation(node)) {
+            switch (inferrer_->GetRepresentation(node)) {
               case MachineRepresentation::kTagged:
               case MachineRepresentation::kTaggedPointer:
               case MachineRepresentation::kTaggedSigned:
@@ -411,7 +413,7 @@
               default:
                 for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
                   CheckValueInputRepresentationIs(
-                      node, i, typer_->GetRepresentation(node));
+                      node, i, inferrer_->GetRepresentation(node));
                 }
                 break;
             }
@@ -444,19 +446,21 @@
   void CheckValueInputRepresentationIs(Node const* node, int index,
                                        MachineRepresentation representation) {
     Node const* input = node->InputAt(index);
-    if (typer_->GetRepresentation(input) != representation) {
+    MachineRepresentation input_representation =
+        inferrer_->GetRepresentation(input);
+    if (input_representation != representation) {
       std::stringstream str;
-      str << "TypeError: node #" << node->id() << ":" << *node->op()
-          << " uses node #" << input->id() << ":" << *input->op()
-          << " which doesn't have a " << MachineReprToString(representation)
-          << " representation.";
+      str << "TypeError: node #" << node->id() << ":" << *node->op() << ":"
+          << MachineReprToString(input_representation) << " uses node #"
+          << input->id() << ":" << *input->op() << " which doesn't have a "
+          << MachineReprToString(representation) << " representation.";
       FATAL(str.str().c_str());
     }
   }
 
   void CheckValueInputIsTagged(Node const* node, int index) {
     Node const* input = node->InputAt(index);
-    switch (typer_->GetRepresentation(input)) {
+    switch (inferrer_->GetRepresentation(input)) {
       case MachineRepresentation::kTagged:
       case MachineRepresentation::kTaggedPointer:
       case MachineRepresentation::kTaggedSigned:
@@ -473,7 +477,7 @@
 
   void CheckValueInputIsTaggedOrPointer(Node const* node, int index) {
     Node const* input = node->InputAt(index);
-    switch (typer_->GetRepresentation(input)) {
+    switch (inferrer_->GetRepresentation(input)) {
       case MachineRepresentation::kTagged:
       case MachineRepresentation::kTaggedPointer:
       case MachineRepresentation::kTaggedSigned:
@@ -481,7 +485,7 @@
       default:
         break;
     }
-    if (typer_->GetRepresentation(input) !=
+    if (inferrer_->GetRepresentation(input) !=
         MachineType::PointerRepresentation()) {
       std::ostringstream str;
       str << "TypeError: node #" << node->id() << ":" << *node->op()
@@ -493,7 +497,7 @@
 
   void CheckValueInputForInt32Op(Node const* node, int index) {
     Node const* input = node->InputAt(index);
-    switch (typer_->GetRepresentation(input)) {
+    switch (inferrer_->GetRepresentation(input)) {
       case MachineRepresentation::kBit:
       case MachineRepresentation::kWord8:
       case MachineRepresentation::kWord16:
@@ -518,7 +522,9 @@
 
   void CheckValueInputForInt64Op(Node const* node, int index) {
     Node const* input = node->InputAt(index);
-    switch (typer_->GetRepresentation(input)) {
+    MachineRepresentation input_representation =
+        inferrer_->GetRepresentation(input);
+    switch (input_representation) {
       case MachineRepresentation::kWord64:
         return;
       case MachineRepresentation::kNone: {
@@ -533,15 +539,16 @@
         break;
     }
     std::ostringstream str;
-    str << "TypeError: node #" << node->id() << ":" << *node->op()
-        << " uses node #" << input->id() << ":" << *input->op()
-        << " which doesn't have a kWord64 representation.";
+    str << "TypeError: node #" << node->id() << ":" << *node->op() << ":"
+        << input_representation << " uses node #" << input->id() << ":"
+        << *input->op() << " which doesn't have a kWord64 representation.";
     FATAL(str.str().c_str());
   }
 
   void CheckValueInputForFloat32Op(Node const* node, int index) {
     Node const* input = node->InputAt(index);
-    if (MachineRepresentation::kFloat32 == typer_->GetRepresentation(input)) {
+    if (MachineRepresentation::kFloat32 ==
+        inferrer_->GetRepresentation(input)) {
       return;
     }
     std::ostringstream str;
@@ -553,7 +560,8 @@
 
   void CheckValueInputForFloat64Op(Node const* node, int index) {
     Node const* input = node->InputAt(index);
-    if (MachineRepresentation::kFloat64 == typer_->GetRepresentation(input)) {
+    if (MachineRepresentation::kFloat64 ==
+        inferrer_->GetRepresentation(input)) {
       return;
     }
     std::ostringstream str;
@@ -569,7 +577,8 @@
     bool should_log_error = false;
     for (size_t i = 0; i < desc->InputCount(); ++i) {
       Node const* input = node->InputAt(static_cast<int>(i));
-      MachineRepresentation const input_type = typer_->GetRepresentation(input);
+      MachineRepresentation const input_type =
+          inferrer_->GetRepresentation(input);
       MachineRepresentation const expected_input_type =
           desc->GetInputType(i).representation();
       if (!IsCompatible(expected_input_type, input_type)) {
@@ -649,7 +658,7 @@
   }
 
   Schedule const* const schedule_;
-  MachineRepresentationInferrer const* const typer_;
+  MachineRepresentationInferrer const* const inferrer_;
 };
 
 }  // namespace
diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h
index 574f45c..d0845d9 100644
--- a/src/compiler/machine-operator-reducer.h
+++ b/src/compiler/machine-operator-reducer.h
@@ -5,8 +5,10 @@
 #ifndef V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
 #define V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/compiler/machine-operator.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -19,7 +21,8 @@
 
 // Performs constant folding and strength reduction on nodes that have
 // machine operators.
-class MachineOperatorReducer final : public Reducer {
+class V8_EXPORT_PRIVATE MachineOperatorReducer final
+    : public NON_EXPORTED_BASE(Reducer) {
  public:
   explicit MachineOperatorReducer(JSGraph* jsgraph);
   ~MachineOperatorReducer();
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 56cefc5..1cbec99 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -5,7 +5,9 @@
 #ifndef V8_COMPILER_MACHINE_OPERATOR_H_
 #define V8_COMPILER_MACHINE_OPERATOR_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/base/flags.h"
+#include "src/globals.h"
 #include "src/machine-type.h"
 
 namespace v8 {
@@ -62,12 +64,12 @@
   WriteBarrierKind write_barrier_kind_;
 };
 
-bool operator==(StoreRepresentation, StoreRepresentation);
+V8_EXPORT_PRIVATE bool operator==(StoreRepresentation, StoreRepresentation);
 bool operator!=(StoreRepresentation, StoreRepresentation);
 
 size_t hash_value(StoreRepresentation);
 
-std::ostream& operator<<(std::ostream&, StoreRepresentation);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, StoreRepresentation);
 
 StoreRepresentation const& StoreRepresentationOf(Operator const*);
 
@@ -99,7 +101,8 @@
 // Interface for building machine-level operators. These operators are
 // machine-level but machine-independent and thus define a language suitable
 // for generating code to run on architectures such as ia32, x64, arm, etc.
-class MachineOperatorBuilder final : public ZoneObject {
+class V8_EXPORT_PRIVATE MachineOperatorBuilder final
+    : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   // Flags that specify which operations are available. This is useful
   // for operations that are unsupported by some back-ends.
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
index 12ab4af..0a62b52 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -54,6 +54,14 @@
     return ToDoubleRegister(op);
   }
 
+  Register InputOrZeroRegister(size_t index) {
+    if (instr_->InputAt(index)->IsImmediate()) {
+      DCHECK((InputInt32(index) == 0));
+      return zero_reg;
+    }
+    return InputRegister(index);
+  }
+
   DoubleRegister InputOrZeroDoubleRegister(size_t index) {
     if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
 
@@ -381,45 +389,48 @@
     __ bind(ool->exit());                                                     \
   } while (0)
 
-
 #define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr)                 \
   do {                                                                 \
     Label done;                                                        \
     if (instr->InputAt(0)->IsRegister()) {                             \
       auto offset = i.InputRegister(0);                                \
-      auto value = i.Input##width##Register(2);                        \
+      auto value = i.InputOrZero##width##Register(2);                  \
+      if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {      \
+        __ Move(kDoubleRegZero, 0.0);                                  \
+      }                                                                \
       __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
       __ addu(kScratchReg, i.InputRegister(3), offset);                \
       __ asm_instr(value, MemOperand(kScratchReg, 0));                 \
     } else {                                                           \
       auto offset = i.InputOperand(0).immediate();                     \
-      auto value = i.Input##width##Register(2);                        \
+      auto value = i.InputOrZero##width##Register(2);                  \
+      if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {      \
+        __ Move(kDoubleRegZero, 0.0);                                  \
+      }                                                                \
       __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
       __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
     }                                                                  \
     __ bind(&done);                                                    \
   } while (0)
 
-
 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                      \
   do {                                                                 \
     Label done;                                                        \
     if (instr->InputAt(0)->IsRegister()) {                             \
       auto offset = i.InputRegister(0);                                \
-      auto value = i.InputRegister(2);                                 \
+      auto value = i.InputOrZeroRegister(2);                           \
       __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
       __ addu(kScratchReg, i.InputRegister(3), offset);                \
       __ asm_instr(value, MemOperand(kScratchReg, 0));                 \
     } else {                                                           \
       auto offset = i.InputOperand(0).immediate();                     \
-      auto value = i.InputRegister(2);                                 \
+      auto value = i.InputOrZeroRegister(2);                           \
       __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
       __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
     }                                                                  \
     __ bind(&done);                                                    \
   } while (0)
 
-
 #define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode)                                  \
   if (IsMipsArchVariant(kMips32r6)) {                                          \
     __ cfc1(kScratchReg, FCSR);                                                \
@@ -478,11 +489,11 @@
     __ sync();                                           \
   } while (0)
 
-#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr)         \
-  do {                                                   \
-    __ sync();                                           \
-    __ asm_instr(i.InputRegister(2), i.MemoryOperand()); \
-    __ sync();                                           \
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr)               \
+  do {                                                         \
+    __ sync();                                                 \
+    __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
+    __ sync();                                                 \
   } while (0)
 
 #define ASSEMBLE_IEEE754_BINOP(name)                                          \
@@ -639,20 +650,16 @@
       frame_access_state()->SetFrameAccessToDefault();
       break;
     }
-    case kArchTailCallJSFunctionFromJSFunction:
-    case kArchTailCallJSFunction: {
+    case kArchTailCallJSFunctionFromJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
         // Check the function's context matches the context argument.
         __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
         __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
       }
-
-      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
-        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
-                                         i.TempRegister(0), i.TempRegister(1),
-                                         i.TempRegister(2));
-      }
+      AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                       i.TempRegister(0), i.TempRegister(1),
+                                       i.TempRegister(2));
       __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(at);
       frame_access_state()->ClearSPDelta();
@@ -713,7 +720,7 @@
       break;
     }
     case kArchRet:
-      AssembleReturn();
+      AssembleReturn(instr->InputAt(0));
       break;
     case kArchStackPointer:
       __ mov(i.OutputRegister(), sp);
@@ -976,32 +983,38 @@
       }
       break;
     case kMipsShlPair: {
+      Register second_output =
+          instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
       if (instr->InputAt(2)->IsRegister()) {
-        __ ShlPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+        __ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0),
                    i.InputRegister(1), i.InputRegister(2));
       } else {
         uint32_t imm = i.InputOperand(2).immediate();
-        __ ShlPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+        __ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0),
                    i.InputRegister(1), imm);
       }
     } break;
     case kMipsShrPair: {
+      Register second_output =
+          instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
       if (instr->InputAt(2)->IsRegister()) {
-        __ ShrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+        __ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
                    i.InputRegister(1), i.InputRegister(2));
       } else {
         uint32_t imm = i.InputOperand(2).immediate();
-        __ ShrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+        __ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
                    i.InputRegister(1), imm);
       }
     } break;
     case kMipsSarPair: {
+      Register second_output =
+          instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
       if (instr->InputAt(2)->IsRegister()) {
-        __ SarPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+        __ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0),
                    i.InputRegister(1), i.InputRegister(2));
       } else {
         uint32_t imm = i.InputOperand(2).immediate();
-        __ SarPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+        __ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0),
                    i.InputRegister(1), imm);
       }
     } break;
@@ -1388,10 +1401,10 @@
 
     // ... more basic instructions ...
     case kMipsSeb:
-      __ seb(i.OutputRegister(), i.InputRegister(0));
+      __ Seb(i.OutputRegister(), i.InputRegister(0));
       break;
     case kMipsSeh:
-      __ seh(i.OutputRegister(), i.InputRegister(0));
+      __ Seh(i.OutputRegister(), i.InputRegister(0));
       break;
     case kMipsLbu:
       __ lbu(i.OutputRegister(), i.MemoryOperand());
@@ -1400,7 +1413,7 @@
       __ lb(i.OutputRegister(), i.MemoryOperand());
       break;
     case kMipsSb:
-      __ sb(i.InputRegister(2), i.MemoryOperand());
+      __ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
       break;
     case kMipsLhu:
       __ lhu(i.OutputRegister(), i.MemoryOperand());
@@ -1415,10 +1428,10 @@
       __ Ulh(i.OutputRegister(), i.MemoryOperand());
       break;
     case kMipsSh:
-      __ sh(i.InputRegister(2), i.MemoryOperand());
+      __ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
       break;
     case kMipsUsh:
-      __ Ush(i.InputRegister(2), i.MemoryOperand(), kScratchReg);
+      __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg);
       break;
     case kMipsLw:
       __ lw(i.OutputRegister(), i.MemoryOperand());
@@ -1427,10 +1440,10 @@
       __ Ulw(i.OutputRegister(), i.MemoryOperand());
       break;
     case kMipsSw:
-      __ sw(i.InputRegister(2), i.MemoryOperand());
+      __ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
       break;
     case kMipsUsw:
-      __ Usw(i.InputRegister(2), i.MemoryOperand());
+      __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
       break;
     case kMipsLwc1: {
       __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
@@ -1443,13 +1456,21 @@
     case kMipsSwc1: {
       size_t index = 0;
       MemOperand operand = i.MemoryOperand(&index);
-      __ swc1(i.InputSingleRegister(index), operand);
+      FPURegister ft = i.InputOrZeroSingleRegister(index);
+      if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+        __ Move(kDoubleRegZero, 0.0);
+      }
+      __ swc1(ft, operand);
       break;
     }
     case kMipsUswc1: {
       size_t index = 0;
       MemOperand operand = i.MemoryOperand(&index);
-      __ Uswc1(i.InputSingleRegister(index), operand, kScratchReg);
+      FPURegister ft = i.InputOrZeroSingleRegister(index);
+      if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+        __ Move(kDoubleRegZero, 0.0);
+      }
+      __ Uswc1(ft, operand, kScratchReg);
       break;
     }
     case kMipsLdc1:
@@ -1458,12 +1479,22 @@
     case kMipsUldc1:
       __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
       break;
-    case kMipsSdc1:
-      __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
+    case kMipsSdc1: {
+      FPURegister ft = i.InputOrZeroDoubleRegister(2);
+      if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+        __ Move(kDoubleRegZero, 0.0);
+      }
+      __ sdc1(ft, i.MemoryOperand());
       break;
-    case kMipsUsdc1:
-      __ Usdc1(i.InputDoubleRegister(2), i.MemoryOperand(), kScratchReg);
+    }
+    case kMipsUsdc1: {
+      FPURegister ft = i.InputOrZeroDoubleRegister(2);
+      if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+        __ Move(kDoubleRegZero, 0.0);
+      }
+      __ Usdc1(ft, i.MemoryOperand(), kScratchReg);
       break;
+    }
     case kMipsPush:
       if (instr->InputAt(0)->IsFPRegister()) {
         __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
@@ -1714,8 +1745,15 @@
 
   if (instr->arch_opcode() == kMipsTst) {
     cc = FlagsConditionToConditionTst(condition);
-    __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
-    __ Sltu(result, zero_reg, kScratchReg);
+    if (instr->InputAt(1)->IsImmediate() &&
+        base::bits::IsPowerOfTwo32(i.InputOperand(1).immediate())) {
+      uint16_t pos =
+          base::bits::CountTrailingZeros32(i.InputOperand(1).immediate());
+      __ Ext(result, i.InputRegister(0), pos, 1);
+    } else {
+      __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
+      __ Sltu(result, zero_reg, kScratchReg);
+    }
     if (cc == eq) {
       // Sltu produces 0 for equality, invert the result.
       __ xori(result, result, 1);
@@ -1884,7 +1922,7 @@
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   DeoptimizeReason deoptimization_reason =
       GetDeoptimizationReason(deoptimization_id);
-  __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -1920,12 +1958,16 @@
       __ mov(fp, sp);
     } else if (descriptor->IsJSFunctionCall()) {
       __ Prologue(this->info()->GeneratePreagedPrologue());
+      if (descriptor->PushArgumentCount()) {
+        __ Push(kJavaScriptCallArgCountRegister);
+      }
     } else {
       __ StubPrologue(info()->GetOutputStackFrameType());
     }
   }
 
-  int shrink_slots = frame()->GetSpillSlotCount();
+  int shrink_slots =
+      frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
 
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
@@ -1958,8 +2000,7 @@
   }
 }
 
-
-void CodeGenerator::AssembleReturn() {
+void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   int pop_count = static_cast<int>(descriptor->StackParameterCount());
 
@@ -1975,18 +2016,32 @@
     __ MultiPopFPU(saves_fpu);
   }
 
+  MipsOperandConverter g(this, nullptr);
   if (descriptor->IsCFunctionCall()) {
     AssembleDeconstructFrame();
   } else if (frame_access_state()->has_frame()) {
-    // Canonicalize JSFunction return sites for now.
-    if (return_label_.is_bound()) {
-      __ Branch(&return_label_);
-      return;
+    // Canonicalize JSFunction return sites for now unless they have an variable
+    // number of stack slot pops.
+    if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+      if (return_label_.is_bound()) {
+        __ Branch(&return_label_);
+        return;
+      } else {
+        __ bind(&return_label_);
+        AssembleDeconstructFrame();
+      }
     } else {
-      __ bind(&return_label_);
       AssembleDeconstructFrame();
     }
   }
+  if (pop->IsImmediate()) {
+    DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+    pop_count += g.ToConstant(pop).ToInt32();
+  } else {
+    Register pop_reg = g.ToRegister(pop);
+    __ sll(pop_reg, pop_reg, kPointerSizeLog2);
+    __ Addu(sp, sp, Operand(pop_reg));
+  }
   if (pop_count != 0) {
     __ DropAndRet(pop_count);
   } else {
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index 0a98930..1e4b996 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -31,6 +31,39 @@
     return UseRegister(node);
   }
 
+  // Use the zero register if the node has the immediate value zero, otherwise
+  // assign a register.
+  InstructionOperand UseRegisterOrImmediateZero(Node* node) {
+    if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
+        (IsFloatConstant(node) &&
+         (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
+      return UseImmediate(node);
+    }
+    return UseRegister(node);
+  }
+
+  bool IsIntegerConstant(Node* node) {
+    return (node->opcode() == IrOpcode::kInt32Constant);
+  }
+
+  int64_t GetIntegerConstantValue(Node* node) {
+    DCHECK(node->opcode() == IrOpcode::kInt32Constant);
+    return OpParameter<int32_t>(node);
+  }
+
+  bool IsFloatConstant(Node* node) {
+    return (node->opcode() == IrOpcode::kFloat32Constant) ||
+           (node->opcode() == IrOpcode::kFloat64Constant);
+  }
+
+  double GetFloatConstantValue(Node* node) {
+    if (node->opcode() == IrOpcode::kFloat32Constant) {
+      return OpParameter<float>(node);
+    }
+    DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
+    return OpParameter<double>(node);
+  }
+
   bool CanBeImmediate(Node* node, InstructionCode opcode) {
     Int32Matcher m(node);
     if (!m.HasValue()) return false;
@@ -40,14 +73,40 @@
       case kMipsSar:
       case kMipsShr:
         return is_uint5(value);
+      case kMipsAdd:
+      case kMipsAnd:
+      case kMipsOr:
+      case kMipsTst:
+      case kMipsSub:
       case kMipsXor:
         return is_uint16(value);
+      case kMipsLb:
+      case kMipsLbu:
+      case kMipsSb:
+      case kMipsLh:
+      case kMipsLhu:
+      case kMipsSh:
+      case kMipsLw:
+      case kMipsSw:
+      case kMipsLwc1:
+      case kMipsSwc1:
       case kMipsLdc1:
       case kMipsSdc1:
+      case kCheckedLoadInt8:
+      case kCheckedLoadUint8:
+      case kCheckedLoadInt16:
+      case kCheckedLoadUint16:
+      case kCheckedLoadWord32:
+      case kCheckedStoreWord8:
+      case kCheckedStoreWord16:
+      case kCheckedStoreWord32:
+      case kCheckedLoadFloat32:
       case kCheckedLoadFloat64:
+      case kCheckedStoreFloat32:
       case kCheckedStoreFloat64:
-        return std::numeric_limits<int16_t>::min() <= (value + kIntSize) &&
-               std::numeric_limits<int16_t>::max() >= (value + kIntSize);
+        // true even for 32b values, offsets > 16b
+        // are handled in assembler-mips.cc
+        return is_int32(value);
       default:
         return is_int16(value);
     }
@@ -86,9 +145,23 @@
                  g.UseOperand(node->InputAt(1), opcode));
 }
 
+bool TryMatchImmediate(InstructionSelector* selector,
+                       InstructionCode* opcode_return, Node* node,
+                       size_t* input_count_return, InstructionOperand* inputs) {
+  MipsOperandGenerator g(selector);
+  if (g.CanBeImmediate(node, *opcode_return)) {
+    *opcode_return |= AddressingModeField::encode(kMode_MRI);
+    inputs[0] = g.UseImmediate(node);
+    *input_count_return = 1;
+    return true;
+  }
+  return false;
+}
 
 static void VisitBinop(InstructionSelector* selector, Node* node,
-                       InstructionCode opcode, FlagsContinuation* cont) {
+                       InstructionCode opcode, bool has_reverse_opcode,
+                       InstructionCode reverse_opcode,
+                       FlagsContinuation* cont) {
   MipsOperandGenerator g(selector);
   Int32BinopMatcher m(node);
   InstructionOperand inputs[4];
@@ -96,8 +169,21 @@
   InstructionOperand outputs[2];
   size_t output_count = 0;
 
-  inputs[input_count++] = g.UseRegister(m.left().node());
-  inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+  if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
+                        &inputs[1])) {
+    inputs[0] = g.UseRegister(m.left().node());
+    input_count++;
+  }
+  if (has_reverse_opcode &&
+      TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
+                        &input_count, &inputs[1])) {
+    inputs[0] = g.UseRegister(m.right().node());
+    opcode = reverse_opcode;
+    input_count++;
+  } else {
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+  }
 
   if (cont->IsBranch()) {
     inputs[input_count++] = g.Label(cont->true_block());
@@ -130,11 +216,21 @@
   }
 }
 
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, bool has_reverse_opcode,
+                       InstructionCode reverse_opcode) {
+  FlagsContinuation cont;
+  VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, FlagsContinuation* cont) {
+  VisitBinop(selector, node, opcode, false, kArchNop, cont);
+}
 
 static void VisitBinop(InstructionSelector* selector, Node* node,
                        InstructionCode opcode) {
-  FlagsContinuation cont;
-  VisitBinop(selector, node, opcode, &cont);
+  VisitBinop(selector, node, opcode, false, kArchNop);
 }
 
 
@@ -259,14 +355,15 @@
 
     if (g.CanBeImmediate(index, opcode)) {
       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
-           g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+           g.UseRegister(base), g.UseImmediate(index),
+           g.UseRegisterOrImmediateZero(value));
     } else {
       InstructionOperand addr_reg = g.TempRegister();
       Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
            g.UseRegister(index), g.UseRegister(base));
       // Emit desired store opcode, using temp addr_reg.
       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
-           addr_reg, g.TempImmediate(0), g.UseRegister(value));
+           addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
     }
   }
 }
@@ -317,12 +414,12 @@
       return;
     }
   }
-  VisitBinop(this, node, kMipsAnd);
+  VisitBinop(this, node, kMipsAnd, true, kMipsAnd);
 }
 
 
 void InstructionSelector::VisitWord32Or(Node* node) {
-  VisitBinop(this, node, kMipsOr);
+  VisitBinop(this, node, kMipsOr, true, kMipsOr);
 }
 
 
@@ -346,7 +443,7 @@
          g.TempImmediate(0));
     return;
   }
-  VisitBinop(this, node, kMipsXor);
+  VisitBinop(this, node, kMipsXor, true, kMipsXor);
 }
 
 
@@ -429,32 +526,43 @@
 }
 
 static void VisitInt32PairBinop(InstructionSelector* selector,
-                                InstructionCode opcode, Node* node) {
+                                InstructionCode pair_opcode,
+                                InstructionCode single_opcode, Node* node) {
   MipsOperandGenerator g(selector);
 
-  // We use UseUniqueRegister here to avoid register sharing with the output
-  // register.
-  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
-                                 g.UseUniqueRegister(node->InputAt(1)),
-                                 g.UseUniqueRegister(node->InputAt(2)),
-                                 g.UseUniqueRegister(node->InputAt(3))};
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
 
-  InstructionOperand outputs[] = {
-      g.DefineAsRegister(node),
-      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
-  selector->Emit(opcode, 2, outputs, 4, inputs);
+  if (projection1) {
+    // We use UseUniqueRegister here to avoid register sharing with the output
+    // register.
+    InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+                                   g.UseUniqueRegister(node->InputAt(1)),
+                                   g.UseUniqueRegister(node->InputAt(2)),
+                                   g.UseUniqueRegister(node->InputAt(3))};
+
+    InstructionOperand outputs[] = {
+        g.DefineAsRegister(node),
+        g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+    selector->Emit(pair_opcode, 2, outputs, 4, inputs);
+  } else {
+    // The high word of the result is not used, so we emit the standard 32 bit
+    // instruction.
+    selector->Emit(single_opcode, g.DefineSameAsFirst(node),
+                   g.UseRegister(node->InputAt(0)),
+                   g.UseRegister(node->InputAt(2)));
+  }
 }
 
 void InstructionSelector::VisitInt32PairAdd(Node* node) {
-  VisitInt32PairBinop(this, kMipsAddPair, node);
+  VisitInt32PairBinop(this, kMipsAddPair, kMipsAdd, node);
 }
 
 void InstructionSelector::VisitInt32PairSub(Node* node) {
-  VisitInt32PairBinop(this, kMipsSubPair, node);
+  VisitInt32PairBinop(this, kMipsSubPair, kMipsSub, node);
 }
 
 void InstructionSelector::VisitInt32PairMul(Node* node) {
-  VisitInt32PairBinop(this, kMipsMulPair, node);
+  VisitInt32PairBinop(this, kMipsMulPair, kMipsMul, node);
 }
 
 // Shared routine for multiple shift operations.
@@ -475,11 +583,21 @@
                                  g.UseUniqueRegister(node->InputAt(1)),
                                  shift_operand};
 
-  InstructionOperand outputs[] = {
-      g.DefineAsRegister(node),
-      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
 
-  selector->Emit(opcode, 2, outputs, 3, inputs);
+  InstructionOperand outputs[2];
+  InstructionOperand temps[1];
+  int32_t output_count = 0;
+  int32_t temp_count = 0;
+
+  outputs[output_count++] = g.DefineAsRegister(node);
+  if (projection1) {
+    outputs[output_count++] = g.DefineAsRegister(projection1);
+  } else {
+    temps[temp_count++] = g.TempRegister();
+  }
+
+  selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
 }
 
 void InstructionSelector::VisitWord32PairShl(Node* node) {
@@ -554,7 +672,7 @@
     }
   }
 
-  VisitBinop(this, node, kMipsAdd);
+  VisitBinop(this, node, kMipsAdd, true, kMipsAdd);
 }
 
 
@@ -1170,14 +1288,15 @@
 
   if (g.CanBeImmediate(index, opcode)) {
     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
-         g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+         g.UseRegister(base), g.UseImmediate(index),
+         g.UseRegisterOrImmediateZero(value));
   } else {
     InstructionOperand addr_reg = g.TempRegister();
     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
          g.UseRegister(index), g.UseRegister(base));
     // Emit desired store opcode, using temp addr_reg.
     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
-         addr_reg, g.TempImmediate(0), g.UseRegister(value));
+         addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
   }
 }
 
@@ -1269,7 +1388,7 @@
                                           : g.UseRegister(length);
 
   Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
-       offset_operand, length_operand, g.UseRegister(value),
+       offset_operand, length_operand, g.UseRegisterOrImmediateZero(value),
        g.UseRegister(buffer));
 }
 
@@ -1334,51 +1453,61 @@
 
   // Match immediates on left or right side of comparison.
   if (g.CanBeImmediate(right, opcode)) {
-    switch (cont->condition()) {
-      case kEqual:
-      case kNotEqual:
-        if (cont->IsSet()) {
+    if (opcode == kMipsTst) {
+      VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+                   cont);
+    } else {
+      switch (cont->condition()) {
+        case kEqual:
+        case kNotEqual:
+          if (cont->IsSet()) {
+            VisitCompare(selector, opcode, g.UseRegister(left),
+                         g.UseImmediate(right), cont);
+          } else {
+            VisitCompare(selector, opcode, g.UseRegister(left),
+                         g.UseRegister(right), cont);
+          }
+          break;
+        case kSignedLessThan:
+        case kSignedGreaterThanOrEqual:
+        case kUnsignedLessThan:
+        case kUnsignedGreaterThanOrEqual:
           VisitCompare(selector, opcode, g.UseRegister(left),
                        g.UseImmediate(right), cont);
-        } else {
+          break;
+        default:
           VisitCompare(selector, opcode, g.UseRegister(left),
                        g.UseRegister(right), cont);
-        }
-        break;
-      case kSignedLessThan:
-      case kSignedGreaterThanOrEqual:
-      case kUnsignedLessThan:
-      case kUnsignedGreaterThanOrEqual:
-        VisitCompare(selector, opcode, g.UseRegister(left),
-                     g.UseImmediate(right), cont);
-        break;
-      default:
-        VisitCompare(selector, opcode, g.UseRegister(left),
-                     g.UseRegister(right), cont);
+      }
     }
   } else if (g.CanBeImmediate(left, opcode)) {
     if (!commutative) cont->Commute();
-    switch (cont->condition()) {
-      case kEqual:
-      case kNotEqual:
-        if (cont->IsSet()) {
+    if (opcode == kMipsTst) {
+      VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+                   cont);
+    } else {
+      switch (cont->condition()) {
+        case kEqual:
+        case kNotEqual:
+          if (cont->IsSet()) {
+            VisitCompare(selector, opcode, g.UseRegister(right),
+                         g.UseImmediate(left), cont);
+          } else {
+            VisitCompare(selector, opcode, g.UseRegister(right),
+                         g.UseRegister(left), cont);
+          }
+          break;
+        case kSignedLessThan:
+        case kSignedGreaterThanOrEqual:
+        case kUnsignedLessThan:
+        case kUnsignedGreaterThanOrEqual:
           VisitCompare(selector, opcode, g.UseRegister(right),
                        g.UseImmediate(left), cont);
-        } else {
+          break;
+        default:
           VisitCompare(selector, opcode, g.UseRegister(right),
                        g.UseRegister(left), cont);
-        }
-        break;
-      case kSignedLessThan:
-      case kSignedGreaterThanOrEqual:
-      case kUnsignedLessThan:
-      case kUnsignedGreaterThanOrEqual:
-        VisitCompare(selector, opcode, g.UseRegister(right),
-                     g.UseImmediate(left), cont);
-        break;
-      default:
-        VisitCompare(selector, opcode, g.UseRegister(right),
-                     g.UseRegister(left), cont);
+      }
     }
   } else {
     VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
@@ -1395,21 +1524,22 @@
 // Shared routine for word comparisons against zero.
 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
                           Node* value, FlagsContinuation* cont) {
-  while (selector->CanCover(user, value)) {
+  // Try to combine with comparisons against 0 by simply inverting the branch.
+  while (value->opcode() == IrOpcode::kWord32Equal &&
+         selector->CanCover(user, value)) {
+    Int32BinopMatcher m(value);
+    if (!m.right().Is(0)) break;
+
+    user = value;
+    value = m.left().node();
+    cont->Negate();
+  }
+
+  if (selector->CanCover(user, value)) {
     switch (value->opcode()) {
-      case IrOpcode::kWord32Equal: {
-        // Combine with comparisons against 0 by simply inverting the
-        // continuation.
-        Int32BinopMatcher m(value);
-        if (m.right().Is(0)) {
-          user = value;
-          value = m.left().node();
-          cont->Negate();
-          continue;
-        }
+      case IrOpcode::kWord32Equal:
         cont->OverwriteAndNegateIfEqual(kEqual);
         return VisitWordCompare(selector, value, cont);
-      }
       case IrOpcode::kInt32LessThan:
         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
         return VisitWordCompare(selector, value, cont);
@@ -1473,7 +1603,6 @@
       default:
         break;
     }
-    break;
   }
 
   // Continuation could not be combined with a compare, emit compare against 0.
@@ -1703,6 +1832,7 @@
       UNREACHABLE();
       return;
   }
+
   if (g.CanBeImmediate(index, opcode)) {
     Emit(opcode | AddressingModeField::encode(kMode_MRI),
          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
@@ -1740,14 +1870,15 @@
 
   if (g.CanBeImmediate(index, opcode)) {
     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
-         g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+         g.UseRegister(base), g.UseImmediate(index),
+         g.UseRegisterOrImmediateZero(value));
   } else {
     InstructionOperand addr_reg = g.TempRegister();
     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
          g.UseRegister(index), g.UseRegister(base));
     // Emit desired store opcode, using temp addr_reg.
     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
-         addr_reg, g.TempImmediate(0), g.UseRegister(value));
+         addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
   }
 }
 
diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc
index 9ed72ae..a3bf433 100644
--- a/src/compiler/mips64/code-generator-mips64.cc
+++ b/src/compiler/mips64/code-generator-mips64.cc
@@ -53,6 +53,14 @@
     return ToDoubleRegister(op);
   }
 
+  Register InputOrZeroRegister(size_t index) {
+    if (instr_->InputAt(index)->IsImmediate()) {
+      DCHECK((InputInt32(index) == 0));
+      return zero_reg;
+    }
+    return InputRegister(index);
+  }
+
   DoubleRegister InputOrZeroDoubleRegister(size_t index) {
     if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
 
@@ -400,14 +408,20 @@
     Label done;                                                        \
     if (instr->InputAt(0)->IsRegister()) {                             \
       auto offset = i.InputRegister(0);                                \
-      auto value = i.Input##width##Register(2);                        \
+      auto value = i.InputOrZero##width##Register(2);                  \
+      if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {      \
+        __ Move(kDoubleRegZero, 0.0);                                  \
+      }                                                                \
       __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
       __ And(kScratchReg, offset, Operand(0xffffffff));                \
       __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg);          \
       __ asm_instr(value, MemOperand(kScratchReg, 0));                 \
     } else {                                                           \
       int offset = static_cast<int>(i.InputOperand(0).immediate());    \
-      auto value = i.Input##width##Register(2);                        \
+      auto value = i.InputOrZero##width##Register(2);                  \
+      if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {      \
+        __ Move(kDoubleRegZero, 0.0);                                  \
+      }                                                                \
       __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
       __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
     }                                                                  \
@@ -419,14 +433,14 @@
     Label done;                                                        \
     if (instr->InputAt(0)->IsRegister()) {                             \
       auto offset = i.InputRegister(0);                                \
-      auto value = i.InputRegister(2);                                 \
+      auto value = i.InputOrZeroRegister(2);                           \
       __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
       __ And(kScratchReg, offset, Operand(0xffffffff));                \
       __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg);          \
       __ asm_instr(value, MemOperand(kScratchReg, 0));                 \
     } else {                                                           \
       int offset = static_cast<int>(i.InputOperand(0).immediate());    \
-      auto value = i.InputRegister(2);                                 \
+      auto value = i.InputOrZeroRegister(2);                           \
       __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
       __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
     }                                                                  \
@@ -489,11 +503,11 @@
     __ sync();                                           \
   } while (0)
 
-#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr)         \
-  do {                                                   \
-    __ sync();                                           \
-    __ asm_instr(i.InputRegister(2), i.MemoryOperand()); \
-    __ sync();                                           \
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr)               \
+  do {                                                         \
+    __ sync();                                                 \
+    __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
+    __ sync();                                                 \
   } while (0)
 
 #define ASSEMBLE_IEEE754_BINOP(name)                                          \
@@ -648,19 +662,16 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchTailCallJSFunctionFromJSFunction:
-    case kArchTailCallJSFunction: {
+    case kArchTailCallJSFunctionFromJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
         // Check the function's context matches the context argument.
         __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
         __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
       }
-      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
-        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
-                                         i.TempRegister(0), i.TempRegister(1),
-                                         i.TempRegister(2));
-      }
+      AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                       i.TempRegister(0), i.TempRegister(1),
+                                       i.TempRegister(2));
       __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(at);
       frame_access_state()->ClearSPDelta();
@@ -722,7 +733,7 @@
       break;
     }
     case kArchRet:
-      AssembleReturn();
+      AssembleReturn(instr->InputAt(0));
       break;
     case kArchStackPointer:
       __ mov(i.OutputRegister(), sp);
@@ -1686,7 +1697,7 @@
       __ lb(i.OutputRegister(), i.MemoryOperand());
       break;
     case kMips64Sb:
-      __ sb(i.InputRegister(2), i.MemoryOperand());
+      __ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
       break;
     case kMips64Lhu:
       __ lhu(i.OutputRegister(), i.MemoryOperand());
@@ -1701,10 +1712,10 @@
       __ Ulh(i.OutputRegister(), i.MemoryOperand());
       break;
     case kMips64Sh:
-      __ sh(i.InputRegister(2), i.MemoryOperand());
+      __ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
       break;
     case kMips64Ush:
-      __ Ush(i.InputRegister(2), i.MemoryOperand(), kScratchReg);
+      __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg);
       break;
     case kMips64Lw:
       __ lw(i.OutputRegister(), i.MemoryOperand());
@@ -1725,16 +1736,16 @@
       __ Uld(i.OutputRegister(), i.MemoryOperand());
       break;
     case kMips64Sw:
-      __ sw(i.InputRegister(2), i.MemoryOperand());
+      __ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
       break;
     case kMips64Usw:
-      __ Usw(i.InputRegister(2), i.MemoryOperand());
+      __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
       break;
     case kMips64Sd:
-      __ sd(i.InputRegister(2), i.MemoryOperand());
+      __ sd(i.InputOrZeroRegister(2), i.MemoryOperand());
       break;
     case kMips64Usd:
-      __ Usd(i.InputRegister(2), i.MemoryOperand());
+      __ Usd(i.InputOrZeroRegister(2), i.MemoryOperand());
       break;
     case kMips64Lwc1: {
       __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
@@ -1747,13 +1758,21 @@
     case kMips64Swc1: {
       size_t index = 0;
       MemOperand operand = i.MemoryOperand(&index);
-      __ swc1(i.InputSingleRegister(index), operand);
+      FPURegister ft = i.InputOrZeroSingleRegister(index);
+      if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+        __ Move(kDoubleRegZero, 0.0);
+      }
+      __ swc1(ft, operand);
       break;
     }
     case kMips64Uswc1: {
       size_t index = 0;
       MemOperand operand = i.MemoryOperand(&index);
-      __ Uswc1(i.InputSingleRegister(index), operand, kScratchReg);
+      FPURegister ft = i.InputOrZeroSingleRegister(index);
+      if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+        __ Move(kDoubleRegZero, 0.0);
+      }
+      __ Uswc1(ft, operand, kScratchReg);
       break;
     }
     case kMips64Ldc1:
@@ -1762,12 +1781,22 @@
     case kMips64Uldc1:
       __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
       break;
-    case kMips64Sdc1:
-      __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
+    case kMips64Sdc1: {
+      FPURegister ft = i.InputOrZeroDoubleRegister(2);
+      if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+        __ Move(kDoubleRegZero, 0.0);
+      }
+      __ sdc1(ft, i.MemoryOperand());
       break;
-    case kMips64Usdc1:
-      __ Usdc1(i.InputDoubleRegister(2), i.MemoryOperand(), kScratchReg);
+    }
+    case kMips64Usdc1: {
+      FPURegister ft = i.InputOrZeroDoubleRegister(2);
+      if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+        __ Move(kDoubleRegZero, 0.0);
+      }
+      __ Usdc1(ft, i.MemoryOperand(), kScratchReg);
       break;
+    }
     case kMips64Push:
       if (instr->InputAt(0)->IsFPRegister()) {
         __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
@@ -1866,6 +1895,10 @@
     case kAtomicStoreWord32:
       ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
       break;
+    case kMips64AssertEqual:
+      __ Assert(eq, static_cast<BailoutReason>(i.InputOperand(2).immediate()),
+                i.InputRegister(0), Operand(i.InputRegister(1)));
+      break;
   }
   return kSuccess;
 }  // NOLINT(readability/fn_size)
@@ -2025,8 +2058,15 @@
 
   if (instr->arch_opcode() == kMips64Tst) {
     cc = FlagsConditionToConditionTst(condition);
-    __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
-    __ Sltu(result, zero_reg, kScratchReg);
+    if (instr->InputAt(1)->IsImmediate() &&
+        base::bits::IsPowerOfTwo64(i.InputOperand(1).immediate())) {
+      uint16_t pos =
+          base::bits::CountTrailingZeros64(i.InputOperand(1).immediate());
+      __ ExtractBits(result, i.InputRegister(0), pos, 1);
+    } else {
+      __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
+      __ Sltu(result, zero_reg, kScratchReg);
+    }
     if (cc == eq) {
       // Sltu produces 0 for equality, invert the result.
       __ xori(result, result, 1);
@@ -2206,7 +2246,7 @@
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   DeoptimizeReason deoptimization_reason =
       GetDeoptimizationReason(deoptimization_id);
-  __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -2238,12 +2278,16 @@
       __ mov(fp, sp);
     } else if (descriptor->IsJSFunctionCall()) {
       __ Prologue(this->info()->GeneratePreagedPrologue());
+      if (descriptor->PushArgumentCount()) {
+        __ Push(kJavaScriptCallArgCountRegister);
+      }
     } else {
       __ StubPrologue(info()->GetOutputStackFrameType());
     }
   }
 
-  int shrink_slots = frame()->GetSpillSlotCount();
+  int shrink_slots =
+      frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
 
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
@@ -2277,8 +2321,7 @@
   }
 }
 
-
-void CodeGenerator::AssembleReturn() {
+void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
 
   // Restore GP registers.
@@ -2293,19 +2336,33 @@
     __ MultiPopFPU(saves_fpu);
   }
 
+  MipsOperandConverter g(this, nullptr);
   if (descriptor->IsCFunctionCall()) {
     AssembleDeconstructFrame();
   } else if (frame_access_state()->has_frame()) {
-    // Canonicalize JSFunction return sites for now.
-    if (return_label_.is_bound()) {
-      __ Branch(&return_label_);
-      return;
+    // Canonicalize JSFunction return sites for now unless they have an variable
+    // number of stack slot pops.
+    if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+      if (return_label_.is_bound()) {
+        __ Branch(&return_label_);
+        return;
+      } else {
+        __ bind(&return_label_);
+        AssembleDeconstructFrame();
+      }
     } else {
-      __ bind(&return_label_);
       AssembleDeconstructFrame();
     }
   }
   int pop_count = static_cast<int>(descriptor->StackParameterCount());
+  if (pop->IsImmediate()) {
+    DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+    pop_count += g.ToConstant(pop).ToInt32();
+  } else {
+    Register pop_reg = g.ToRegister(pop);
+    __ dsll(pop_reg, pop_reg, kPointerSizeLog2);
+    __ Daddu(sp, sp, pop_reg);
+  }
   if (pop_count != 0) {
     __ DropAndRet(pop_count);
   } else {
diff --git a/src/compiler/mips64/instruction-codes-mips64.h b/src/compiler/mips64/instruction-codes-mips64.h
index 6a44434..8f68ced 100644
--- a/src/compiler/mips64/instruction-codes-mips64.h
+++ b/src/compiler/mips64/instruction-codes-mips64.h
@@ -169,7 +169,8 @@
   V(Mips64ByteSwap32)               \
   V(Mips64StackClaim)               \
   V(Mips64Seb)                      \
-  V(Mips64Seh)
+  V(Mips64Seh)                      \
+  V(Mips64AssertEqual)
 
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc
index 6e937e2..fbf09d6 100644
--- a/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/src/compiler/mips64/instruction-selector-mips64.cc
@@ -31,14 +31,49 @@
     return UseRegister(node);
   }
 
-  bool CanBeImmediate(Node* node, InstructionCode opcode) {
-    int64_t value;
-    if (node->opcode() == IrOpcode::kInt32Constant)
-      value = OpParameter<int32_t>(node);
-    else if (node->opcode() == IrOpcode::kInt64Constant)
-      value = OpParameter<int64_t>(node);
-    else
-      return false;
+  // Use the zero register if the node has the immediate value zero, otherwise
+  // assign a register.
+  InstructionOperand UseRegisterOrImmediateZero(Node* node) {
+    if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
+        (IsFloatConstant(node) &&
+         (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
+      return UseImmediate(node);
+    }
+    return UseRegister(node);
+  }
+
+  bool IsIntegerConstant(Node* node) {
+    return (node->opcode() == IrOpcode::kInt32Constant) ||
+           (node->opcode() == IrOpcode::kInt64Constant);
+  }
+
+  int64_t GetIntegerConstantValue(Node* node) {
+    if (node->opcode() == IrOpcode::kInt32Constant) {
+      return OpParameter<int32_t>(node);
+    }
+    DCHECK(node->opcode() == IrOpcode::kInt64Constant);
+    return OpParameter<int64_t>(node);
+  }
+
+  bool IsFloatConstant(Node* node) {
+    return (node->opcode() == IrOpcode::kFloat32Constant) ||
+           (node->opcode() == IrOpcode::kFloat64Constant);
+  }
+
+  double GetFloatConstantValue(Node* node) {
+    if (node->opcode() == IrOpcode::kFloat32Constant) {
+      return OpParameter<float>(node);
+    }
+    DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
+    return OpParameter<double>(node);
+  }
+
+  bool CanBeImmediate(Node* node, InstructionCode mode) {
+    return IsIntegerConstant(node) &&
+           CanBeImmediate(GetIntegerConstantValue(node), mode);
+  }
+
+  bool CanBeImmediate(int64_t value, InstructionCode opcode) {
     switch (ArchOpcodeField::decode(opcode)) {
       case kMips64Shl:
       case kMips64Sar:
@@ -48,6 +83,13 @@
       case kMips64Dsar:
       case kMips64Dshr:
         return is_uint6(value);
+      case kMips64Add:
+      case kMips64And32:
+      case kMips64And:
+      case kMips64Dadd:
+      case kMips64Or32:
+      case kMips64Or:
+      case kMips64Tst:
       case kMips64Xor:
         return is_uint16(value);
       case kMips64Ldc1:
@@ -91,9 +133,94 @@
                  g.UseOperand(node->InputAt(1), opcode));
 }
 
+struct ExtendingLoadMatcher {
+  ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
+      : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
+    Initialize(node);
+  }
+
+  bool Matches() const { return matches_; }
+
+  Node* base() const {
+    DCHECK(Matches());
+    return base_;
+  }
+  int64_t immediate() const {
+    DCHECK(Matches());
+    return immediate_;
+  }
+  ArchOpcode opcode() const {
+    DCHECK(Matches());
+    return opcode_;
+  }
+
+ private:
+  bool matches_;
+  InstructionSelector* selector_;
+  Node* base_;
+  int64_t immediate_;
+  ArchOpcode opcode_;
+
+  void Initialize(Node* node) {
+    Int64BinopMatcher m(node);
+    // When loading a 64-bit value and shifting by 32, we should
+    // just load and sign-extend the interesting 4 bytes instead.
+    // This happens, for example, when we're loading and untagging SMIs.
+    DCHECK(m.IsWord64Sar());
+    if (m.left().IsLoad() && m.right().Is(32) &&
+        selector_->CanCover(m.node(), m.left().node())) {
+      Mips64OperandGenerator g(selector_);
+      Node* load = m.left().node();
+      Node* offset = load->InputAt(1);
+      base_ = load->InputAt(0);
+      opcode_ = kMips64Lw;
+      if (g.CanBeImmediate(offset, opcode_)) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+        immediate_ = g.GetIntegerConstantValue(offset) + 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+        immediate_ = g.GetIntegerConstantValue(offset);
+#endif
+        matches_ = g.CanBeImmediate(immediate_, kMips64Lw);
+      }
+    }
+  }
+};
+
+bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node) {
+  ExtendingLoadMatcher m(node, selector);
+  Mips64OperandGenerator g(selector);
+  if (m.Matches()) {
+    InstructionOperand inputs[2];
+    inputs[0] = g.UseRegister(m.base());
+    InstructionCode opcode =
+        m.opcode() | AddressingModeField::encode(kMode_MRI);
+    DCHECK(is_int32(m.immediate()));
+    inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
+    InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+    selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
+                   inputs);
+    return true;
+  }
+  return false;
+}
+
+bool TryMatchImmediate(InstructionSelector* selector,
+                       InstructionCode* opcode_return, Node* node,
+                       size_t* input_count_return, InstructionOperand* inputs) {
+  Mips64OperandGenerator g(selector);
+  if (g.CanBeImmediate(node, *opcode_return)) {
+    *opcode_return |= AddressingModeField::encode(kMode_MRI);
+    inputs[0] = g.UseImmediate(node);
+    *input_count_return = 1;
+    return true;
+  }
+  return false;
+}
 
 static void VisitBinop(InstructionSelector* selector, Node* node,
-                       InstructionCode opcode, FlagsContinuation* cont) {
+                       InstructionCode opcode, bool has_reverse_opcode,
+                       InstructionCode reverse_opcode,
+                       FlagsContinuation* cont) {
   Mips64OperandGenerator g(selector);
   Int32BinopMatcher m(node);
   InstructionOperand inputs[4];
@@ -101,8 +228,21 @@
   InstructionOperand outputs[2];
   size_t output_count = 0;
 
-  inputs[input_count++] = g.UseRegister(m.left().node());
-  inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+  if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
+                        &inputs[1])) {
+    inputs[0] = g.UseRegister(m.left().node());
+    input_count++;
+  }
+  if (has_reverse_opcode &&
+      TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
+                        &input_count, &inputs[1])) {
+    inputs[0] = g.UseRegister(m.right().node());
+    opcode = reverse_opcode;
+    input_count++;
+  } else {
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+  }
 
   if (cont->IsBranch()) {
     inputs[input_count++] = g.Label(cont->true_block());
@@ -135,11 +275,21 @@
   }
 }
 
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, bool has_reverse_opcode,
+                       InstructionCode reverse_opcode) {
+  FlagsContinuation cont;
+  VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, FlagsContinuation* cont) {
+  VisitBinop(selector, node, opcode, false, kArchNop, cont);
+}
 
 static void VisitBinop(InstructionSelector* selector, Node* node,
                        InstructionCode opcode) {
-  FlagsContinuation cont;
-  VisitBinop(selector, node, opcode, &cont);
+  VisitBinop(selector, node, opcode, false, kArchNop);
 }
 
 void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -275,14 +425,15 @@
 
     if (g.CanBeImmediate(index, opcode)) {
       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
-           g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+           g.UseRegister(base), g.UseImmediate(index),
+           g.UseRegisterOrImmediateZero(value));
     } else {
       InstructionOperand addr_reg = g.TempRegister();
       Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
            g.UseRegister(index), g.UseRegister(base));
       // Emit desired store opcode, using temp addr_reg.
       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
-           addr_reg, g.TempImmediate(0), g.UseRegister(value));
+           addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
     }
   }
 }
@@ -334,7 +485,7 @@
       return;
     }
   }
-  VisitBinop(this, node, kMips64And32);
+  VisitBinop(this, node, kMips64And32, true, kMips64And32);
 }
 
 
@@ -385,17 +536,17 @@
       return;
     }
   }
-  VisitBinop(this, node, kMips64And);
+  VisitBinop(this, node, kMips64And, true, kMips64And);
 }
 
 
 void InstructionSelector::VisitWord32Or(Node* node) {
-  VisitBinop(this, node, kMips64Or32);
+  VisitBinop(this, node, kMips64Or32, true, kMips64Or32);
 }
 
 
 void InstructionSelector::VisitWord64Or(Node* node) {
-  VisitBinop(this, node, kMips64Or);
+  VisitBinop(this, node, kMips64Or, true, kMips64Or);
 }
 
 
@@ -419,7 +570,7 @@
          g.TempImmediate(0));
     return;
   }
-  VisitBinop(this, node, kMips64Xor32);
+  VisitBinop(this, node, kMips64Xor32, true, kMips64Xor32);
 }
 
 
@@ -443,7 +594,7 @@
          g.TempImmediate(0));
     return;
   }
-  VisitBinop(this, node, kMips64Xor);
+  VisitBinop(this, node, kMips64Xor, true, kMips64Xor);
 }
 
 
@@ -597,6 +748,7 @@
 
 
 void InstructionSelector::VisitWord64Sar(Node* node) {
+  if (TryEmitExtendingLoad(this, node)) return;
   VisitRRO(this, kMips64Dsar, node);
 }
 
@@ -692,7 +844,7 @@
       return;
     }
   }
-  VisitBinop(this, node, kMips64Add);
+  VisitBinop(this, node, kMips64Add, true, kMips64Add);
 }
 
 
@@ -726,7 +878,7 @@
     }
   }
 
-  VisitBinop(this, node, kMips64Dadd);
+  VisitBinop(this, node, kMips64Dadd, true, kMips64Dadd);
 }
 
 
@@ -1128,6 +1280,33 @@
 
 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
   Mips64OperandGenerator g(this);
+  Node* value = node->InputAt(0);
+  switch (value->opcode()) {
+    // 32-bit operations will write their result in a 64 bit register,
+    // clearing the top 32 bits of the destination register.
+    case IrOpcode::kUint32Div:
+    case IrOpcode::kUint32Mod:
+    case IrOpcode::kUint32MulHigh: {
+      Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+      return;
+    }
+    case IrOpcode::kLoad: {
+      LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+      if (load_rep.IsUnsigned()) {
+        switch (load_rep.representation()) {
+          case MachineRepresentation::kWord8:
+          case MachineRepresentation::kWord16:
+          case MachineRepresentation::kWord32:
+            Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+            return;
+          default:
+            break;
+        }
+      }
+    }
+    default:
+      break;
+  }
   Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
        g.TempImmediate(0), g.TempImmediate(32));
 }
@@ -1613,14 +1792,15 @@
 
   if (g.CanBeImmediate(index, opcode)) {
     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
-         g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+         g.UseRegister(base), g.UseImmediate(index),
+         g.UseRegisterOrImmediateZero(value));
   } else {
     InstructionOperand addr_reg = g.TempRegister();
     Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
          g.UseRegister(index), g.UseRegister(base));
     // Emit desired store opcode, using temp addr_reg.
     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
-         addr_reg, g.TempImmediate(0), g.UseRegister(value));
+         addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
   }
 }
 
@@ -1722,7 +1902,7 @@
                                           : g.UseRegister(length);
 
   Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
-       offset_operand, length_operand, g.UseRegister(value),
+       offset_operand, length_operand, g.UseRegisterOrImmediateZero(value),
        g.UseRegister(buffer));
 }
 
@@ -1788,51 +1968,61 @@
 
   // Match immediates on left or right side of comparison.
   if (g.CanBeImmediate(right, opcode)) {
-    switch (cont->condition()) {
-      case kEqual:
-      case kNotEqual:
-        if (cont->IsSet()) {
+    if (opcode == kMips64Tst) {
+      VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+                   cont);
+    } else {
+      switch (cont->condition()) {
+        case kEqual:
+        case kNotEqual:
+          if (cont->IsSet()) {
+            VisitCompare(selector, opcode, g.UseRegister(left),
+                         g.UseImmediate(right), cont);
+          } else {
+            VisitCompare(selector, opcode, g.UseRegister(left),
+                         g.UseRegister(right), cont);
+          }
+          break;
+        case kSignedLessThan:
+        case kSignedGreaterThanOrEqual:
+        case kUnsignedLessThan:
+        case kUnsignedGreaterThanOrEqual:
           VisitCompare(selector, opcode, g.UseRegister(left),
                        g.UseImmediate(right), cont);
-        } else {
+          break;
+        default:
           VisitCompare(selector, opcode, g.UseRegister(left),
                        g.UseRegister(right), cont);
-        }
-        break;
-      case kSignedLessThan:
-      case kSignedGreaterThanOrEqual:
-      case kUnsignedLessThan:
-      case kUnsignedGreaterThanOrEqual:
-        VisitCompare(selector, opcode, g.UseRegister(left),
-                     g.UseImmediate(right), cont);
-        break;
-      default:
-        VisitCompare(selector, opcode, g.UseRegister(left),
-                     g.UseRegister(right), cont);
+      }
     }
   } else if (g.CanBeImmediate(left, opcode)) {
     if (!commutative) cont->Commute();
-    switch (cont->condition()) {
-      case kEqual:
-      case kNotEqual:
-        if (cont->IsSet()) {
+    if (opcode == kMips64Tst) {
+      VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+                   cont);
+    } else {
+      switch (cont->condition()) {
+        case kEqual:
+        case kNotEqual:
+          if (cont->IsSet()) {
+            VisitCompare(selector, opcode, g.UseRegister(right),
+                         g.UseImmediate(left), cont);
+          } else {
+            VisitCompare(selector, opcode, g.UseRegister(right),
+                         g.UseRegister(left), cont);
+          }
+          break;
+        case kSignedLessThan:
+        case kSignedGreaterThanOrEqual:
+        case kUnsignedLessThan:
+        case kUnsignedGreaterThanOrEqual:
           VisitCompare(selector, opcode, g.UseRegister(right),
                        g.UseImmediate(left), cont);
-        } else {
+          break;
+        default:
           VisitCompare(selector, opcode, g.UseRegister(right),
                        g.UseRegister(left), cont);
-        }
-        break;
-      case kSignedLessThan:
-      case kSignedGreaterThanOrEqual:
-      case kUnsignedLessThan:
-      case kUnsignedGreaterThanOrEqual:
-        VisitCompare(selector, opcode, g.UseRegister(right),
-                     g.UseImmediate(left), cont);
-        break;
-      default:
-        VisitCompare(selector, opcode, g.UseRegister(right),
-                     g.UseRegister(left), cont);
+      }
     }
   } else {
     VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
@@ -1840,10 +2030,89 @@
   }
 }
 
+bool IsNodeUnsigned(Node* n) {
+  NodeMatcher m(n);
+
+  if (m.IsLoad()) {
+    LoadRepresentation load_rep = LoadRepresentationOf(n->op());
+    return load_rep.IsUnsigned();
+  } else if (m.IsUnalignedLoad()) {
+    UnalignedLoadRepresentation load_rep =
+        UnalignedLoadRepresentationOf(n->op());
+    return load_rep.IsUnsigned();
+  } else {
+    return m.IsUint32Div() || m.IsUint32LessThan() ||
+           m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
+           m.IsUint32MulHigh() || m.IsChangeFloat64ToUint32() ||
+           m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32();
+  }
+}
+
+// Shared routine for multiple word compare operations.
+void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
+                            InstructionCode opcode, FlagsContinuation* cont) {
+  Mips64OperandGenerator g(selector);
+  InstructionOperand leftOp = g.TempRegister();
+  InstructionOperand rightOp = g.TempRegister();
+
+  selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
+                 g.TempImmediate(32));
+  selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
+                 g.TempImmediate(32));
+
+  VisitCompare(selector, opcode, leftOp, rightOp, cont);
+}
+
+void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
+                                 InstructionCode opcode,
+                                 FlagsContinuation* cont) {
+  if (FLAG_debug_code) {
+    Mips64OperandGenerator g(selector);
+    InstructionOperand leftOp = g.TempRegister();
+    InstructionOperand rightOp = g.TempRegister();
+    InstructionOperand optimizedResult = g.TempRegister();
+    InstructionOperand fullResult = g.TempRegister();
+    FlagsCondition condition = cont->condition();
+    InstructionCode testOpcode = opcode |
+                                 FlagsConditionField::encode(condition) |
+                                 FlagsModeField::encode(kFlags_set);
+
+    selector->Emit(testOpcode, optimizedResult, g.UseRegister(node->InputAt(0)),
+                   g.UseRegister(node->InputAt(1)));
+
+    selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
+                   g.TempImmediate(32));
+    selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
+                   g.TempImmediate(32));
+    selector->Emit(testOpcode, fullResult, leftOp, rightOp);
+
+    selector->Emit(
+        kMips64AssertEqual, g.NoOutput(), optimizedResult, fullResult,
+        g.TempImmediate(BailoutReason::kUnsupportedNonPrimitiveCompare));
+  }
+
+  VisitWordCompare(selector, node, opcode, cont, false);
+}
 
 void VisitWord32Compare(InstructionSelector* selector, Node* node,
                         FlagsContinuation* cont) {
-  VisitWordCompare(selector, node, kMips64Cmp, cont, false);
+  // MIPS64 doesn't support Word32 compare instructions. Instead it relies
+  // that the values in registers are correctly sign-extended and uses
+  // Word64 comparison instead. This behavior is correct in most cases,
+  // but doesn't work when comparing signed with unsigned operands.
+  // We could simulate full Word32 compare in all cases but this would
+  // create an unnecessary overhead since unsigned integers are rarely
+  // used in JavaScript.
+  // The solution proposed here tries to match a comparison of signed
+  // with unsigned operand, and perform full Word32Compare only
+  // in those cases. Unfortunately, the solution is not complete because
+  // it might skip cases where Word32 full compare is needed, so
+  // basically it is a hack.
+  if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) {
+    VisitFullWord32Compare(selector, node, kMips64Cmp, cont);
+  } else {
+    VisitOptimizedWord32Compare(selector, node, kMips64Cmp, cont);
+  }
 }
 
 
@@ -1876,21 +2145,30 @@
 // Shared routine for word comparisons against zero.
 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
                           Node* value, FlagsContinuation* cont) {
+  // Try to combine with comparisons against 0 by simply inverting the branch.
   while (selector->CanCover(user, value)) {
+    if (value->opcode() == IrOpcode::kWord32Equal) {
+      Int32BinopMatcher m(value);
+      if (!m.right().Is(0)) break;
+      user = value;
+      value = m.left().node();
+    } else if (value->opcode() == IrOpcode::kWord64Equal) {
+      Int64BinopMatcher m(value);
+      if (!m.right().Is(0)) break;
+      user = value;
+      value = m.left().node();
+    } else {
+      break;
+    }
+
+    cont->Negate();
+  }
+
+  if (selector->CanCover(user, value)) {
     switch (value->opcode()) {
-      case IrOpcode::kWord32Equal: {
-        // Combine with comparisons against 0 by simply inverting the
-        // continuation.
-        Int32BinopMatcher m(value);
-        if (m.right().Is(0)) {
-          user = value;
-          value = m.left().node();
-          cont->Negate();
-          continue;
-        }
+      case IrOpcode::kWord32Equal:
         cont->OverwriteAndNegateIfEqual(kEqual);
         return VisitWord32Compare(selector, value, cont);
-      }
       case IrOpcode::kInt32LessThan:
         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
         return VisitWord32Compare(selector, value, cont);
@@ -1903,19 +2181,9 @@
       case IrOpcode::kUint32LessThanOrEqual:
         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
         return VisitWord32Compare(selector, value, cont);
-      case IrOpcode::kWord64Equal: {
-        // Combine with comparisons against 0 by simply inverting the
-        // continuation.
-        Int64BinopMatcher m(value);
-        if (m.right().Is(0)) {
-          user = value;
-          value = m.left().node();
-          cont->Negate();
-          continue;
-        }
+      case IrOpcode::kWord64Equal:
         cont->OverwriteAndNegateIfEqual(kEqual);
         return VisitWord64Compare(selector, value, cont);
-      }
       case IrOpcode::kInt64LessThan:
         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
         return VisitWord64Compare(selector, value, cont);
@@ -1986,7 +2254,6 @@
       default:
         break;
     }
-    break;
   }
 
   // Continuation could not be combined with a compare, emit compare against 0.
@@ -2288,14 +2555,15 @@
 
   if (g.CanBeImmediate(index, opcode)) {
     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
-         g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+         g.UseRegister(base), g.UseImmediate(index),
+         g.UseRegisterOrImmediateZero(value));
   } else {
     InstructionOperand addr_reg = g.TempRegister();
     Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
          g.UseRegister(index), g.UseRegister(base));
     // Emit desired store opcode, using temp addr_reg.
     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
-         addr_reg, g.TempImmediate(0), g.UseRegister(value));
+         addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
   }
 }
 
diff --git a/src/compiler/move-optimizer.cc b/src/compiler/move-optimizer.cc
index d87ece3..b62a8cc 100644
--- a/src/compiler/move-optimizer.cc
+++ b/src/compiler/move-optimizer.cc
@@ -25,11 +25,92 @@
 };
 
 typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
-typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
 
-bool Blocks(const OperandSet& set, const InstructionOperand& operand) {
-  return set.find(operand) != set.end();
-}
+class OperandSet {
+ public:
+  explicit OperandSet(ZoneVector<InstructionOperand>* buffer)
+      : set_(buffer), fp_reps_(0) {
+    buffer->clear();
+  }
+
+  void InsertOp(const InstructionOperand& op) {
+    set_->push_back(op);
+
+    if (!kSimpleFPAliasing && op.IsFPRegister())
+      fp_reps_ |= RepBit(LocationOperand::cast(op).representation());
+  }
+
+  bool Contains(const InstructionOperand& op) const {
+    for (const InstructionOperand& elem : *set_) {
+      if (elem.EqualsCanonicalized(op)) return true;
+    }
+    return false;
+  }
+
+  bool ContainsOpOrAlias(const InstructionOperand& op) const {
+    if (Contains(op)) return true;
+
+    if (!kSimpleFPAliasing && op.IsFPRegister()) {
+      // Platforms where FP registers have complex aliasing need extra checks.
+      const LocationOperand& loc = LocationOperand::cast(op);
+      MachineRepresentation rep = loc.representation();
+      // If haven't encountered mixed rep FP registers, skip the extra checks.
+      if (!HasMixedFPReps(fp_reps_ | RepBit(rep))) return false;
+
+      // Check register against aliasing registers of other FP representations.
+      MachineRepresentation other_rep1, other_rep2;
+      switch (rep) {
+        case MachineRepresentation::kFloat32:
+          other_rep1 = MachineRepresentation::kFloat64;
+          other_rep2 = MachineRepresentation::kSimd128;
+          break;
+        case MachineRepresentation::kFloat64:
+          other_rep1 = MachineRepresentation::kFloat32;
+          other_rep2 = MachineRepresentation::kSimd128;
+          break;
+        case MachineRepresentation::kSimd128:
+          other_rep1 = MachineRepresentation::kFloat32;
+          other_rep2 = MachineRepresentation::kFloat64;
+          break;
+        default:
+          UNREACHABLE();
+          break;
+      }
+      const RegisterConfiguration* config = RegisterConfiguration::Turbofan();
+      int base = -1;
+      int aliases =
+          config->GetAliases(rep, loc.register_code(), other_rep1, &base);
+      DCHECK(aliases > 0 || (aliases == 0 && base == -1));
+      while (aliases--) {
+        if (Contains(AllocatedOperand(LocationOperand::REGISTER, other_rep1,
+                                      base + aliases))) {
+          return true;
+        }
+      }
+      aliases = config->GetAliases(rep, loc.register_code(), other_rep2, &base);
+      DCHECK(aliases > 0 || (aliases == 0 && base == -1));
+      while (aliases--) {
+        if (Contains(AllocatedOperand(LocationOperand::REGISTER, other_rep2,
+                                      base + aliases))) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
+ private:
+  static int RepBit(MachineRepresentation rep) {
+    return 1 << static_cast<int>(rep);
+  }
+
+  static bool HasMixedFPReps(int reps) {
+    return reps && !base::bits::IsPowerOfTwo32(reps);
+  }
+
+  ZoneVector<InstructionOperand>* set_;
+  int fp_reps_;
+};
 
 int FindFirstNonEmptySlot(const Instruction* instr) {
   int i = Instruction::FIRST_GAP_POSITION;
@@ -47,12 +128,12 @@
 
 }  // namespace
 
-
 MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code)
     : local_zone_(local_zone),
       code_(code),
-      local_vector_(local_zone) {}
-
+      local_vector_(local_zone),
+      operand_buffer1(local_zone),
+      operand_buffer2(local_zone) {}
 
 void MoveOptimizer::Run() {
   for (Instruction* instruction : code()->instructions()) {
@@ -92,27 +173,27 @@
   DCHECK(instruction->parallel_moves()[1] == nullptr ||
          instruction->parallel_moves()[1]->empty());
 
-  OperandSet outputs(local_zone());
-  OperandSet inputs(local_zone());
+  OperandSet outputs(&operand_buffer1);
+  OperandSet inputs(&operand_buffer2);
 
   // Outputs and temps are treated together as potentially clobbering a
   // destination operand.
   for (size_t i = 0; i < instruction->OutputCount(); ++i) {
-    outputs.insert(*instruction->OutputAt(i));
+    outputs.InsertOp(*instruction->OutputAt(i));
   }
   for (size_t i = 0; i < instruction->TempCount(); ++i) {
-    outputs.insert(*instruction->TempAt(i));
+    outputs.InsertOp(*instruction->TempAt(i));
   }
 
   // Input operands block elisions.
   for (size_t i = 0; i < instruction->InputCount(); ++i) {
-    inputs.insert(*instruction->InputAt(i));
+    inputs.InsertOp(*instruction->InputAt(i));
   }
 
   // Elide moves made redundant by the instruction.
   for (MoveOperands* move : *moves) {
-    if (outputs.find(move->destination()) != outputs.end() &&
-        inputs.find(move->destination()) == inputs.end()) {
+    if (outputs.ContainsOpOrAlias(move->destination()) &&
+        !inputs.ContainsOpOrAlias(move->destination())) {
       move->Eliminate();
     }
   }
@@ -121,7 +202,7 @@
   // the one for its input.
   if (instruction->IsRet() || instruction->IsTailCall()) {
     for (MoveOperands* move : *moves) {
-      if (inputs.find(move->destination()) == inputs.end()) {
+      if (!inputs.ContainsOpOrAlias(move->destination())) {
         move->Eliminate();
       }
     }
@@ -134,13 +215,13 @@
   ParallelMove* from_moves = from->parallel_moves()[0];
   if (from_moves == nullptr || from_moves->empty()) return;
 
-  OperandSet dst_cant_be(local_zone());
-  OperandSet src_cant_be(local_zone());
+  OperandSet dst_cant_be(&operand_buffer1);
+  OperandSet src_cant_be(&operand_buffer2);
 
   // If an operand is an input to the instruction, we cannot move assignments
   // where it appears on the LHS.
   for (size_t i = 0; i < from->InputCount(); ++i) {
-    dst_cant_be.insert(*from->InputAt(i));
+    dst_cant_be.InsertOp(*from->InputAt(i));
   }
   // If an operand is output to the instruction, we cannot move assignments
   // where it appears on the RHS, because we would lose its value before the
@@ -149,10 +230,10 @@
   // The output can't appear on the LHS because we performed
   // RemoveClobberedDestinations for the "from" instruction.
   for (size_t i = 0; i < from->OutputCount(); ++i) {
-    src_cant_be.insert(*from->OutputAt(i));
+    src_cant_be.InsertOp(*from->OutputAt(i));
   }
   for (size_t i = 0; i < from->TempCount(); ++i) {
-    src_cant_be.insert(*from->TempAt(i));
+    src_cant_be.InsertOp(*from->TempAt(i));
   }
   for (MoveOperands* move : *from_moves) {
     if (move->IsRedundant()) continue;
@@ -160,7 +241,7 @@
     // move "z = dest", because z would become y rather than "V".
     // We assume CompressMoves has happened before this, which means we don't
     // have more than one assignment to dest.
-    src_cant_be.insert(move->destination());
+    src_cant_be.InsertOp(move->destination());
   }
 
   ZoneSet<MoveKey, MoveKeyCompare> move_candidates(local_zone());
@@ -168,7 +249,7 @@
   // destination operands are eligible for being moved down.
   for (MoveOperands* move : *from_moves) {
     if (move->IsRedundant()) continue;
-    if (!Blocks(dst_cant_be, move->destination())) {
+    if (!dst_cant_be.ContainsOpOrAlias(move->destination())) {
       MoveKey key = {move->source(), move->destination()};
       move_candidates.insert(key);
     }
@@ -183,8 +264,8 @@
       auto current = iter;
       ++iter;
       InstructionOperand src = current->source;
-      if (Blocks(src_cant_be, src)) {
-        src_cant_be.insert(current->destination);
+      if (src_cant_be.ContainsOpOrAlias(src)) {
+        src_cant_be.InsertOp(current->destination);
         move_candidates.erase(current);
         changed = true;
       }
@@ -223,8 +304,7 @@
     // merging the two gaps.
     for (MoveOperands* move : *right) {
       if (move->IsRedundant()) continue;
-      MoveOperands* to_eliminate = left->PrepareInsertAfter(move);
-      if (to_eliminate != nullptr) eliminated.push_back(to_eliminate);
+      left->PrepareInsertAfter(move, &eliminated);
     }
     // Eliminate dead moves.
     for (MoveOperands* to_eliminate : eliminated) {
@@ -317,7 +397,7 @@
       if (!op->IsConstant() && !op->IsImmediate()) return;
     }
   }
-  // TODO(dcarney): pass a ZonePool down for this?
+  // TODO(dcarney): pass a ZoneStats down for this?
   MoveMap move_map(local_zone());
   size_t correct_counts = 0;
   // Accumulate set of shared moves.
@@ -350,7 +430,7 @@
   if (correct_counts != move_map.size()) {
     // Moves that are unique to each predecessor won't be pushed to the common
     // successor.
-    OperandSet conflicting_srcs(local_zone());
+    OperandSet conflicting_srcs(&operand_buffer1);
     for (auto iter = move_map.begin(), end = move_map.end(); iter != end;) {
       auto current = iter;
       ++iter;
@@ -360,7 +440,7 @@
         // there are such moves, we could move them, but the destination of the
         // moves staying behind can't appear as a source of a common move,
         // because the move staying behind will clobber this destination.
-        conflicting_srcs.insert(dest);
+        conflicting_srcs.InsertOp(dest);
         move_map.erase(current);
       }
     }
@@ -374,9 +454,8 @@
         auto current = iter;
         ++iter;
         DCHECK_EQ(block->PredecessorCount(), current->second);
-        if (conflicting_srcs.find(current->first.source) !=
-            conflicting_srcs.end()) {
-          conflicting_srcs.insert(current->first.destination);
+        if (conflicting_srcs.ContainsOpOrAlias(current->first.source)) {
+          conflicting_srcs.InsertOp(current->first.destination);
           move_map.erase(current);
           changed = true;
         }
diff --git a/src/compiler/move-optimizer.h b/src/compiler/move-optimizer.h
index ce26a7f..3844d33 100644
--- a/src/compiler/move-optimizer.h
+++ b/src/compiler/move-optimizer.h
@@ -6,13 +6,14 @@
 #define V8_COMPILER_MOVE_OPTIMIZER_
 
 #include "src/compiler/instruction.h"
+#include "src/globals.h"
 #include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-class MoveOptimizer final {
+class V8_EXPORT_PRIVATE MoveOptimizer final {
  public:
   MoveOptimizer(Zone* local_zone, InstructionSequence* code);
   void Run();
@@ -52,6 +53,11 @@
   InstructionSequence* const code_;
   MoveOpVector local_vector_;
 
+  // Reusable buffers for storing operand sets. We need at most two sets
+  // at any given time, so we create two buffers.
+  ZoneVector<InstructionOperand> operand_buffer1;
+  ZoneVector<InstructionOperand> operand_buffer2;
+
   DISALLOW_COPY_AND_ASSIGN(MoveOptimizer);
 };
 
diff --git a/src/compiler/node-aux-data.h b/src/compiler/node-aux-data.h
index b50ff38..277ff18 100644
--- a/src/compiler/node-aux-data.h
+++ b/src/compiler/node-aux-data.h
@@ -15,20 +15,20 @@
 // Forward declarations.
 class Node;
 
-template <class T>
+template <class T, T def()>
 class NodeAuxData {
  public:
   explicit NodeAuxData(Zone* zone) : aux_data_(zone) {}
 
   void Set(Node* node, T const& data) {
     size_t const id = node->id();
-    if (id >= aux_data_.size()) aux_data_.resize(id + 1);
+    if (id >= aux_data_.size()) aux_data_.resize(id + 1, def());
     aux_data_[id] = data;
   }
 
   T Get(Node* node) const {
     size_t const id = node->id();
-    return (id < aux_data_.size()) ? aux_data_[id] : T();
+    return (id < aux_data_.size()) ? aux_data_[id] : def();
   }
 
   class const_iterator;
@@ -41,9 +41,8 @@
   ZoneVector<T> aux_data_;
 };
 
-
-template <class T>
-class NodeAuxData<T>::const_iterator {
+template <class T, T def()>
+class NodeAuxData<T, def>::const_iterator {
  public:
   typedef std::forward_iterator_tag iterator_category;
   typedef int difference_type;
@@ -76,14 +75,16 @@
   size_t current_;
 };
 
-template <class T>
-typename NodeAuxData<T>::const_iterator NodeAuxData<T>::begin() const {
-  return typename NodeAuxData<T>::const_iterator(&aux_data_, 0);
+template <class T, T def()>
+typename NodeAuxData<T, def>::const_iterator NodeAuxData<T, def>::begin()
+    const {
+  return typename NodeAuxData<T, def>::const_iterator(&aux_data_, 0);
 }
 
-template <class T>
-typename NodeAuxData<T>::const_iterator NodeAuxData<T>::end() const {
-  return typename NodeAuxData<T>::const_iterator(&aux_data_, aux_data_.size());
+template <class T, T def()>
+typename NodeAuxData<T, def>::const_iterator NodeAuxData<T, def>::end() const {
+  return typename NodeAuxData<T, def>::const_iterator(&aux_data_,
+                                                      aux_data_.size());
 }
 
 }  // namespace compiler
diff --git a/src/compiler/node-cache.cc b/src/compiler/node-cache.cc
index 0be6f81..fc9a44c 100644
--- a/src/compiler/node-cache.cc
+++ b/src/compiler/node-cache.cc
@@ -6,6 +6,7 @@
 
 #include <cstring>
 
+#include "src/globals.h"
 #include "src/zone/zone-containers.h"
 #include "src/zone/zone.h"
 
@@ -111,12 +112,11 @@
 // -----------------------------------------------------------------------------
 // Instantiations
 
+template class V8_EXPORT_PRIVATE NodeCache<int32_t>;
+template class V8_EXPORT_PRIVATE NodeCache<int64_t>;
 
-template class NodeCache<int32_t>;
-template class NodeCache<int64_t>;
-
-template class NodeCache<RelocInt32Key>;
-template class NodeCache<RelocInt64Key>;
+template class V8_EXPORT_PRIVATE NodeCache<RelocInt32Key>;
+template class V8_EXPORT_PRIVATE NodeCache<RelocInt64Key>;
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/node-matchers.h b/src/compiler/node-matchers.h
index 6c283dc..c317fdd 100644
--- a/src/compiler/node-matchers.h
+++ b/src/compiler/node-matchers.h
@@ -9,9 +9,11 @@
 
 // TODO(turbofan): Move ExternalReference out of assembler.h
 #include "src/assembler.h"
+#include "src/base/compiler-specific.h"
 #include "src/compiler/node.h"
 #include "src/compiler/operator.h"
 #include "src/double.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -165,6 +167,9 @@
   bool IsNormal() const {
     return this->HasValue() && std::isnormal(this->Value());
   }
+  bool IsInteger() const {
+    return this->HasValue() && std::nearbyint(this->Value()) == this->Value();
+  }
   bool IsPositiveOrNegativePowerOf2() const {
     if (!this->HasValue() || (this->Value() == 0.0)) {
       return false;
@@ -651,7 +656,7 @@
 typedef BaseWithIndexAndDisplacementMatcher<Int64AddMatcher>
     BaseWithIndexAndDisplacement64Matcher;
 
-struct BranchMatcher : public NodeMatcher {
+struct V8_EXPORT_PRIVATE BranchMatcher : public NON_EXPORTED_BASE(NodeMatcher) {
   explicit BranchMatcher(Node* branch);
 
   bool Matched() const { return if_true_ && if_false_; }
@@ -665,8 +670,8 @@
   Node* if_false_;
 };
 
-
-struct DiamondMatcher : public NodeMatcher {
+struct V8_EXPORT_PRIVATE DiamondMatcher
+    : public NON_EXPORTED_BASE(NodeMatcher) {
   explicit DiamondMatcher(Node* merge);
 
   bool Matched() const { return branch_; }
diff --git a/src/compiler/node-properties.cc b/src/compiler/node-properties.cc
index 22539cb..646dbc2 100644
--- a/src/compiler/node-properties.cc
+++ b/src/compiler/node-properties.cc
@@ -338,74 +338,6 @@
 
 
 // static
-MaybeHandle<Context> NodeProperties::GetSpecializationNativeContext(
-    Node* node, MaybeHandle<Context> native_context) {
-  while (true) {
-    switch (node->opcode()) {
-      case IrOpcode::kJSLoadContext: {
-        ContextAccess const& access = ContextAccessOf(node->op());
-        if (access.index() != Context::NATIVE_CONTEXT_INDEX) {
-          return MaybeHandle<Context>();
-        }
-        // Skip over the intermediate contexts, we're only interested in the
-        // very last context in the context chain anyway.
-        node = NodeProperties::GetContextInput(node);
-        break;
-      }
-      case IrOpcode::kJSCreateBlockContext:
-      case IrOpcode::kJSCreateCatchContext:
-      case IrOpcode::kJSCreateFunctionContext:
-      case IrOpcode::kJSCreateScriptContext:
-      case IrOpcode::kJSCreateWithContext: {
-        // Skip over the intermediate contexts, we're only interested in the
-        // very last context in the context chain anyway.
-        node = NodeProperties::GetContextInput(node);
-        break;
-      }
-      case IrOpcode::kHeapConstant: {
-        // Extract the native context from the actual {context}.
-        Handle<Context> context =
-            Handle<Context>::cast(OpParameter<Handle<HeapObject>>(node));
-        return handle(context->native_context());
-      }
-      case IrOpcode::kOsrValue: {
-        int const index = OpParameter<int>(node);
-        if (index == Linkage::kOsrContextSpillSlotIndex) {
-          return native_context;
-        }
-        return MaybeHandle<Context>();
-      }
-      case IrOpcode::kParameter: {
-        Node* const start = NodeProperties::GetValueInput(node, 0);
-        DCHECK_EQ(IrOpcode::kStart, start->opcode());
-        int const index = ParameterIndexOf(node->op());
-        // The context is always the last parameter to a JavaScript function,
-        // and {Parameter} indices start at -1, so value outputs of {Start}
-        // look like this: closure, receiver, param0, ..., paramN, context.
-        if (index == start->op()->ValueOutputCount() - 2) {
-          return native_context;
-        }
-        return MaybeHandle<Context>();
-      }
-      default:
-        return MaybeHandle<Context>();
-    }
-  }
-}
-
-
-// static
-MaybeHandle<JSGlobalObject> NodeProperties::GetSpecializationGlobalObject(
-    Node* node, MaybeHandle<Context> native_context) {
-  Handle<Context> context;
-  if (GetSpecializationNativeContext(node, native_context).ToHandle(&context)) {
-    return handle(context->global_object());
-  }
-  return MaybeHandle<JSGlobalObject>();
-}
-
-
-// static
 Type* NodeProperties::GetTypeOrAny(Node* node) {
   return IsTyped(node) ? node->type() : Type::Any();
 }
diff --git a/src/compiler/node-properties.h b/src/compiler/node-properties.h
index ed3c117..2325323 100644
--- a/src/compiler/node-properties.h
+++ b/src/compiler/node-properties.h
@@ -7,6 +7,7 @@
 
 #include "src/compiler/node.h"
 #include "src/compiler/types.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -17,7 +18,7 @@
 class CommonOperatorBuilder;
 
 // A facade that simplifies access to the different kinds of inputs to a node.
-class NodeProperties final {
+class V8_EXPORT_PRIVATE NodeProperties final {
  public:
   // ---------------------------------------------------------------------------
   // Input layout.
@@ -131,18 +132,6 @@
   static MaybeHandle<Context> GetSpecializationContext(
       Node* node, MaybeHandle<Context> context = MaybeHandle<Context>());
 
-  // Try to retrieve the specialization native context from the given
-  // {node}, optionally utilizing the knowledge about the (outermost)
-  // {native_context}.
-  static MaybeHandle<Context> GetSpecializationNativeContext(
-      Node* node, MaybeHandle<Context> native_context = MaybeHandle<Context>());
-
-  // Try to retrieve the specialization global object from the given
-  // {node}, optionally utilizing the knowledge about the (outermost)
-  // {native_context}.
-  static MaybeHandle<JSGlobalObject> GetSpecializationGlobalObject(
-      Node* node, MaybeHandle<Context> native_context = MaybeHandle<Context>());
-
   // ---------------------------------------------------------------------------
   // Type.
 
diff --git a/src/compiler/node.h b/src/compiler/node.h
index e940371..dc6c5dc 100644
--- a/src/compiler/node.h
+++ b/src/compiler/node.h
@@ -8,6 +8,7 @@
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
 #include "src/compiler/types.h"
+#include "src/globals.h"
 #include "src/zone/zone-containers.h"
 
 namespace v8 {
@@ -39,7 +40,7 @@
 // compilation, e.g. during lowering passes. Other information that needs to be
 // associated with Nodes during compilation must be stored out-of-line indexed
 // by the Node's id.
-class Node final {
+class V8_EXPORT_PRIVATE Node final {
  public:
   static Node* New(Zone* zone, NodeId id, const Operator* op, int input_count,
                    Node* const* inputs, bool has_extensible_inputs);
@@ -126,7 +127,7 @@
 
   InputEdges input_edges() { return InputEdges(this); }
 
-  class Inputs final {
+  class V8_EXPORT_PRIVATE Inputs final {
    public:
     typedef Node* value_type;
 
@@ -162,7 +163,7 @@
 
   UseEdges use_edges() { return UseEdges(this); }
 
-  class Uses final {
+  class V8_EXPORT_PRIVATE Uses final {
    public:
     typedef Node* value_type;
 
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
index 5ac2012..fdbe001 100644
--- a/src/compiler/opcodes.h
+++ b/src/compiler/opcodes.h
@@ -7,6 +7,8 @@
 
 #include <iosfwd>
 
+#include "src/globals.h"
+
 // Opcodes for control operators.
 #define CONTROL_OP_LIST(V) \
   V(Start)                 \
@@ -39,6 +41,7 @@
   V(Float64Constant)          \
   V(ExternalConstant)         \
   V(NumberConstant)           \
+  V(PointerConstant)          \
   V(HeapConstant)             \
   V(RelocatableInt32Constant) \
   V(RelocatableInt64Constant)
@@ -55,9 +58,11 @@
   V(StateValues)          \
   V(TypedStateValues)     \
   V(ObjectState)          \
+  V(TypedObjectState)     \
   V(Call)                 \
   V(Parameter)            \
   V(OsrValue)             \
+  V(OsrGuard)             \
   V(LoopExit)             \
   V(LoopExitValue)        \
   V(LoopExitEffect)       \
@@ -123,6 +128,7 @@
   V(JSCreateArray)            \
   V(JSCreateClosure)          \
   V(JSCreateIterResultObject) \
+  V(JSCreateKeyValueArray)    \
   V(JSCreateLiteralArray)     \
   V(JSCreateLiteralObject)    \
   V(JSCreateLiteralRegExp)    \
@@ -155,6 +161,8 @@
   V(JSForInPrepare)                 \
   V(JSLoadMessage)                  \
   V(JSStoreMessage)                 \
+  V(JSLoadModule)                   \
+  V(JSStoreModule)                  \
   V(JSGeneratorStore)               \
   V(JSGeneratorRestoreContinuation) \
   V(JSGeneratorRestoreRegister)     \
@@ -177,6 +185,7 @@
   V(ChangeInt32ToTagged)             \
   V(ChangeUint32ToTagged)            \
   V(ChangeFloat64ToTagged)           \
+  V(ChangeFloat64ToTaggedPointer)    \
   V(ChangeTaggedToBit)               \
   V(ChangeBitToTagged)               \
   V(TruncateTaggedToWord32)          \
@@ -199,7 +208,8 @@
   V(CheckedTaggedToInt32)             \
   V(CheckedTruncateTaggedToWord32)    \
   V(CheckedTaggedToFloat64)           \
-  V(CheckedTaggedToTaggedSigned)
+  V(CheckedTaggedToTaggedSigned)      \
+  V(CheckedTaggedToTaggedPointer)
 
 #define SIMPLIFIED_COMPARE_BINOP_LIST(V) \
   V(NumberEqual)                         \
@@ -276,6 +286,7 @@
   V(NumberToBoolean)                   \
   V(NumberToInt32)                     \
   V(NumberToUint32)                    \
+  V(NumberToUint8Clamped)              \
   V(NumberSilenceNaN)
 
 #define SIMPLIFIED_OTHER_OP_LIST(V) \
@@ -724,7 +735,7 @@
 
 // Declare an enumeration with all the opcodes at all levels so that they
 // can be globally, uniquely numbered.
-class IrOpcode {
+class V8_EXPORT_PRIVATE IrOpcode {
  public:
   enum Value {
 #define DECLARE_OPCODE(x) k##x,
@@ -784,7 +795,7 @@
   }
 };
 
-std::ostream& operator<<(std::ostream&, IrOpcode::Value);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, IrOpcode::Value);
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/operation-typer.cc b/src/compiler/operation-typer.cc
index 4295a22..9198f4b 100644
--- a/src/compiler/operation-typer.cc
+++ b/src/compiler/operation-typer.cc
@@ -19,18 +19,14 @@
 OperationTyper::OperationTyper(Isolate* isolate, Zone* zone)
     : zone_(zone), cache_(TypeCache::Get()) {
   Factory* factory = isolate->factory();
-  infinity_ = Type::Constant(factory->infinity_value(), zone);
-  minus_infinity_ = Type::Constant(factory->minus_infinity_value(), zone);
-  // Unfortunately, the infinities created in other places might be different
-  // ones (eg the result of NewNumber in TypeNumberConstant).
-  Type* truncating_to_zero =
-      Type::Union(Type::Union(infinity_, minus_infinity_, zone),
-                  Type::MinusZeroOrNaN(), zone);
+  infinity_ = Type::NewConstant(factory->infinity_value(), zone);
+  minus_infinity_ = Type::NewConstant(factory->minus_infinity_value(), zone);
+  Type* truncating_to_zero = Type::MinusZeroOrNaN();
   DCHECK(!truncating_to_zero->Maybe(Type::Integral32()));
 
-  singleton_false_ = Type::Constant(factory->false_value(), zone);
-  singleton_true_ = Type::Constant(factory->true_value(), zone);
-  singleton_the_hole_ = Type::Constant(factory->the_hole_value(), zone);
+  singleton_false_ = Type::HeapConstant(factory->false_value(), zone);
+  singleton_true_ = Type::HeapConstant(factory->true_value(), zone);
+  singleton_the_hole_ = Type::HeapConstant(factory->the_hole_value(), zone);
   signed32ish_ = Type::Union(Type::Signed32(), truncating_to_zero, zone);
   unsigned32ish_ = Type::Union(Type::Unsigned32(), truncating_to_zero, zone);
 }
@@ -494,6 +490,13 @@
   return Type::Unsigned32();
 }
 
+Type* OperationTyper::NumberToUint8Clamped(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+
+  if (type->Is(cache_.kUint8)) return type;
+  return cache_.kUint8;
+}
+
 Type* OperationTyper::NumberSilenceNaN(Type* type) {
   DCHECK(type->Is(Type::Number()));
   // TODO(jarin): This is a terrible hack; we definitely need a dedicated type
diff --git a/src/compiler/operator-properties.h b/src/compiler/operator-properties.h
index 4fe5f59..b4bb8b5 100644
--- a/src/compiler/operator-properties.h
+++ b/src/compiler/operator-properties.h
@@ -6,6 +6,7 @@
 #define V8_COMPILER_OPERATOR_PROPERTIES_H_
 
 #include "src/base/macros.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -14,7 +15,7 @@
 // Forward declarations.
 class Operator;
 
-class OperatorProperties final {
+class V8_EXPORT_PRIVATE OperatorProperties final {
  public:
   static bool HasContextInput(const Operator* op);
   static int GetContextInputCount(const Operator* op) {
diff --git a/src/compiler/operator.cc b/src/compiler/operator.cc
index fa1b2d8..4f746e2 100644
--- a/src/compiler/operator.cc
+++ b/src/compiler/operator.cc
@@ -24,7 +24,6 @@
 // static
 STATIC_CONST_MEMBER_DEFINITION const size_t Operator::kMaxControlOutputCount;
 
-
 Operator::Operator(Opcode opcode, Properties properties, const char* mnemonic,
                    size_t value_in, size_t effect_in, size_t control_in,
                    size_t value_out, size_t effect_out, size_t control_out)
@@ -36,8 +35,7 @@
       control_in_(CheckRange<uint16_t>(control_in)),
       value_out_(CheckRange<uint16_t>(value_out)),
       effect_out_(CheckRange<uint8_t>(effect_out)),
-      control_out_(CheckRange<uint16_t>(control_out)) {}
-
+      control_out_(CheckRange<uint32_t>(control_out)) {}
 
 std::ostream& operator<<(std::ostream& os, const Operator& op) {
   op.PrintTo(os);
diff --git a/src/compiler/operator.h b/src/compiler/operator.h
index 8e3a9d1..dea94f0 100644
--- a/src/compiler/operator.h
+++ b/src/compiler/operator.h
@@ -7,8 +7,10 @@
 
 #include <ostream>  // NOLINT(readability/streams)
 
+#include "src/base/compiler-specific.h"
 #include "src/base/flags.h"
 #include "src/base/functional.h"
+#include "src/globals.h"
 #include "src/handles.h"
 #include "src/zone/zone.h"
 
@@ -28,7 +30,7 @@
 // as the name for a named field access, the ID of a runtime function, etc.
 // Static parameters are private to the operator and only semantically
 // meaningful to the operator itself.
-class Operator : public ZoneObject {
+class V8_EXPORT_PRIVATE Operator : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   typedef uint16_t Opcode;
 
@@ -142,7 +144,7 @@
   uint16_t control_in_;
   uint16_t value_out_;
   uint8_t effect_out_;
-  uint16_t control_out_;
+  uint32_t control_out_;
 
   DISALLOW_COPY_AND_ASSIGN(Operator);
 };
diff --git a/src/compiler/osr.cc b/src/compiler/osr.cc
index 6d61aff..a2dc430 100644
--- a/src/compiler/osr.cc
+++ b/src/compiler/osr.cc
@@ -47,13 +47,14 @@
     if (TRACE_COND) PrintF(__VA_ARGS__); \
   } while (false)
 
+namespace {
 
 // Peel outer loops and rewire the graph so that control reduction can
 // produce a properly formed graph.
-static void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
-                                 Zone* tmp_zone, Node* dead,
-                                 LoopTree* loop_tree, LoopTree::Loop* osr_loop,
-                                 Node* osr_normal_entry, Node* osr_loop_entry) {
+void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
+                          Zone* tmp_zone, Node* dead, LoopTree* loop_tree,
+                          LoopTree::Loop* osr_loop, Node* osr_normal_entry,
+                          Node* osr_loop_entry) {
   const size_t original_count = graph->NodeCount();
   AllNodes all(tmp_zone, graph);
   NodeVector tmp_inputs(tmp_zone);
@@ -93,7 +94,8 @@
         continue;
       }
       if (orig->InputCount() == 0 || orig->opcode() == IrOpcode::kParameter ||
-          orig->opcode() == IrOpcode::kOsrValue) {
+          orig->opcode() == IrOpcode::kOsrValue ||
+          orig->opcode() == IrOpcode::kOsrGuard) {
         // No need to copy leaf nodes or parameters.
         mapping->at(orig->id()) = orig;
         continue;
@@ -255,6 +257,42 @@
   }
 }
 
+void SetTypeForOsrValue(Node* osr_value, Node* loop,
+                        CommonOperatorBuilder* common) {
+  Node* osr_guard = nullptr;
+  for (Node* use : osr_value->uses()) {
+    if (use->opcode() == IrOpcode::kOsrGuard) {
+      DCHECK_EQ(use->InputAt(0), osr_value);
+      osr_guard = use;
+      break;
+    }
+  }
+
+  OsrGuardType guard_type = OsrGuardType::kAny;
+  // Find the phi that uses the OsrGuard node and get the type from
+  // there. Skip the search if the OsrGuard does not have value use
+  // (i.e., if there is other use beyond the effect use).
+  if (OsrGuardTypeOf(osr_guard->op()) == OsrGuardType::kUninitialized &&
+      osr_guard->UseCount() > 1) {
+    Type* type = nullptr;
+    for (Node* use : osr_guard->uses()) {
+      if (use->opcode() == IrOpcode::kPhi) {
+        if (NodeProperties::GetControlInput(use) != loop) continue;
+        CHECK_NULL(type);
+        type = NodeProperties::GetType(use);
+      }
+    }
+    CHECK_NOT_NULL(type);
+
+    if (type->Is(Type::SignedSmall())) {
+      guard_type = OsrGuardType::kSignedSmall;
+    }
+  }
+
+  NodeProperties::ChangeOp(osr_guard, common->OsrGuard(guard_type));
+}
+
+}  // namespace
 
 void OsrHelper::Deconstruct(JSGraph* jsgraph, CommonOperatorBuilder* common,
                             Zone* tmp_zone) {
@@ -283,6 +321,12 @@
 
   CHECK(osr_loop);  // Should have found the OSR loop.
 
+  for (Node* use : osr_loop_entry->uses()) {
+    if (use->opcode() == IrOpcode::kOsrValue) {
+      SetTypeForOsrValue(use, osr_loop, common);
+    }
+  }
+
   // Analyze the graph to determine how deeply nested the OSR loop is.
   LoopTree* loop_tree = LoopFinder::BuildLoopTree(graph, tmp_zone);
 
diff --git a/src/compiler/pipeline-statistics.cc b/src/compiler/pipeline-statistics.cc
index a032c3d..2b6ffe4 100644
--- a/src/compiler/pipeline-statistics.cc
+++ b/src/compiler/pipeline-statistics.cc
@@ -6,7 +6,7 @@
 
 #include "src/compilation-info.h"
 #include "src/compiler/pipeline-statistics.h"
-#include "src/compiler/zone-pool.h"
+#include "src/compiler/zone-stats.h"
 #include "src/isolate.h"
 
 namespace v8 {
@@ -16,13 +16,13 @@
 void PipelineStatistics::CommonStats::Begin(
     PipelineStatistics* pipeline_stats) {
   DCHECK(!scope_);
-  scope_.reset(new ZonePool::StatsScope(pipeline_stats->zone_pool_));
+  scope_.reset(new ZoneStats::StatsScope(pipeline_stats->zone_stats_));
   timer_.Start();
   outer_zone_initial_size_ = pipeline_stats->OuterZoneSize();
   allocated_bytes_at_start_ =
       outer_zone_initial_size_ -
       pipeline_stats->total_stats_.outer_zone_initial_size_ +
-      pipeline_stats->zone_pool_->GetCurrentAllocatedBytes();
+      pipeline_stats->zone_stats_->GetCurrentAllocatedBytes();
 }
 
 
@@ -43,12 +43,11 @@
   timer_.Stop();
 }
 
-
 PipelineStatistics::PipelineStatistics(CompilationInfo* info,
-                                       ZonePool* zone_pool)
+                                       ZoneStats* zone_stats)
     : isolate_(info->isolate()),
       outer_zone_(info->zone()),
-      zone_pool_(zone_pool),
+      zone_stats_(zone_stats),
       compilation_stats_(isolate_->GetTurboStatistics()),
       source_size_(0),
       phase_kind_name_(nullptr),
diff --git a/src/compiler/pipeline-statistics.h b/src/compiler/pipeline-statistics.h
index a9931eb..b09e236 100644
--- a/src/compiler/pipeline-statistics.h
+++ b/src/compiler/pipeline-statistics.h
@@ -10,7 +10,7 @@
 
 #include "src/base/platform/elapsed-timer.h"
 #include "src/compilation-statistics.h"
-#include "src/compiler/zone-pool.h"
+#include "src/compiler/zone-stats.h"
 
 namespace v8 {
 namespace internal {
@@ -20,7 +20,7 @@
 
 class PipelineStatistics : public Malloced {
  public:
-  PipelineStatistics(CompilationInfo* info, ZonePool* zone_pool);
+  PipelineStatistics(CompilationInfo* info, ZoneStats* zone_stats);
   ~PipelineStatistics();
 
   void BeginPhaseKind(const char* phase_kind_name);
@@ -39,7 +39,7 @@
     void End(PipelineStatistics* pipeline_stats,
              CompilationStatistics::BasicStats* diff);
 
-    std::unique_ptr<ZonePool::StatsScope> scope_;
+    std::unique_ptr<ZoneStats::StatsScope> scope_;
     base::ElapsedTimer timer_;
     size_t outer_zone_initial_size_;
     size_t allocated_bytes_at_start_;
@@ -57,7 +57,7 @@
 
   Isolate* isolate_;
   Zone* outer_zone_;
-  ZonePool* zone_pool_;
+  ZoneStats* zone_stats_;
   CompilationStatistics* compilation_stats_;
   std::string function_name_;
 
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 805b687..2614155 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -70,7 +70,7 @@
 #include "src/compiler/typer.h"
 #include "src/compiler/value-numbering-reducer.h"
 #include "src/compiler/verifier.h"
-#include "src/compiler/zone-pool.h"
+#include "src/compiler/zone-stats.h"
 #include "src/isolate-inl.h"
 #include "src/ostreams.h"
 #include "src/parsing/parse-info.h"
@@ -85,19 +85,19 @@
 class PipelineData {
  public:
   // For main entry point.
-  PipelineData(ZonePool* zone_pool, CompilationInfo* info,
+  PipelineData(ZoneStats* zone_stats, CompilationInfo* info,
                PipelineStatistics* pipeline_statistics)
       : isolate_(info->isolate()),
         info_(info),
         debug_name_(info_->GetDebugName()),
         outer_zone_(info_->zone()),
-        zone_pool_(zone_pool),
+        zone_stats_(zone_stats),
         pipeline_statistics_(pipeline_statistics),
-        graph_zone_scope_(zone_pool_),
+        graph_zone_scope_(zone_stats_, ZONE_NAME),
         graph_zone_(graph_zone_scope_.zone()),
-        instruction_zone_scope_(zone_pool_),
+        instruction_zone_scope_(zone_stats_, ZONE_NAME),
         instruction_zone_(instruction_zone_scope_.zone()),
-        register_allocation_zone_scope_(zone_pool_),
+        register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
         register_allocation_zone_(register_allocation_zone_scope_.zone()) {
     PhaseScope scope(pipeline_statistics, "init pipeline data");
     graph_ = new (graph_zone_) Graph(graph_zone_);
@@ -114,48 +114,48 @@
   }
 
   // For WASM compile entry point.
-  PipelineData(ZonePool* zone_pool, CompilationInfo* info, Graph* graph,
+  PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Graph* graph,
                SourcePositionTable* source_positions)
       : isolate_(info->isolate()),
         info_(info),
         debug_name_(info_->GetDebugName()),
-        zone_pool_(zone_pool),
-        graph_zone_scope_(zone_pool_),
+        zone_stats_(zone_stats),
+        graph_zone_scope_(zone_stats_, ZONE_NAME),
         graph_(graph),
         source_positions_(source_positions),
-        instruction_zone_scope_(zone_pool_),
+        instruction_zone_scope_(zone_stats_, ZONE_NAME),
         instruction_zone_(instruction_zone_scope_.zone()),
-        register_allocation_zone_scope_(zone_pool_),
+        register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
         register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
 
   // For machine graph testing entry point.
-  PipelineData(ZonePool* zone_pool, CompilationInfo* info, Graph* graph,
+  PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Graph* graph,
                Schedule* schedule)
       : isolate_(info->isolate()),
         info_(info),
         debug_name_(info_->GetDebugName()),
-        zone_pool_(zone_pool),
-        graph_zone_scope_(zone_pool_),
+        zone_stats_(zone_stats),
+        graph_zone_scope_(zone_stats_, ZONE_NAME),
         graph_(graph),
         source_positions_(new (info->zone()) SourcePositionTable(graph_)),
         schedule_(schedule),
-        instruction_zone_scope_(zone_pool_),
+        instruction_zone_scope_(zone_stats_, ZONE_NAME),
         instruction_zone_(instruction_zone_scope_.zone()),
-        register_allocation_zone_scope_(zone_pool_),
+        register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
         register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
 
   // For register allocation testing entry point.
-  PipelineData(ZonePool* zone_pool, CompilationInfo* info,
+  PipelineData(ZoneStats* zone_stats, CompilationInfo* info,
                InstructionSequence* sequence)
       : isolate_(info->isolate()),
         info_(info),
         debug_name_(info_->GetDebugName()),
-        zone_pool_(zone_pool),
-        graph_zone_scope_(zone_pool_),
-        instruction_zone_scope_(zone_pool_),
+        zone_stats_(zone_stats),
+        graph_zone_scope_(zone_stats_, ZONE_NAME),
+        instruction_zone_scope_(zone_stats_, ZONE_NAME),
         instruction_zone_(sequence->zone()),
         sequence_(sequence),
-        register_allocation_zone_scope_(zone_pool_),
+        register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
         register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
 
   ~PipelineData() {
@@ -166,7 +166,7 @@
 
   Isolate* isolate() const { return isolate_; }
   CompilationInfo* info() const { return info_; }
-  ZonePool* zone_pool() const { return zone_pool_; }
+  ZoneStats* zone_stats() const { return zone_stats_; }
   PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
   bool compilation_failed() const { return compilation_failed_; }
   void set_compilation_failed() { compilation_failed_ = true; }
@@ -186,11 +186,11 @@
   CommonOperatorBuilder* common() const { return common_; }
   JSOperatorBuilder* javascript() const { return javascript_; }
   JSGraph* jsgraph() const { return jsgraph_; }
-  MaybeHandle<Context> native_context() const {
-    if (info()->is_native_context_specializing()) {
-      return handle(info()->native_context(), isolate());
-    }
-    return MaybeHandle<Context>();
+  Handle<Context> native_context() const {
+    return handle(info()->native_context(), isolate());
+  }
+  Handle<JSGlobalObject> global_object() const {
+    return handle(info()->global_object(), isolate());
   }
 
   LoopAssignmentAnalysis* loop_assignment() const { return loop_assignment_; }
@@ -274,8 +274,8 @@
     if (descriptor && descriptor->RequiresFrameAsIncoming()) {
       sequence_->instruction_blocks()[0]->mark_needs_frame();
     } else {
-      DCHECK_EQ(0, descriptor->CalleeSavedFPRegisters());
-      DCHECK_EQ(0, descriptor->CalleeSavedRegisters());
+      DCHECK_EQ(0u, descriptor->CalleeSavedFPRegisters());
+      DCHECK_EQ(0u, descriptor->CalleeSavedRegisters());
     }
   }
 
@@ -283,7 +283,7 @@
     DCHECK(frame_ == nullptr);
     int fixed_frame_size = 0;
     if (descriptor != nullptr) {
-      fixed_frame_size = CalculateFixedFrameSize(descriptor);
+      fixed_frame_size = descriptor->CalculateFixedFrameSize();
     }
     frame_ = new (instruction_zone()) Frame(fixed_frame_size);
   }
@@ -313,14 +313,14 @@
   CompilationInfo* const info_;
   std::unique_ptr<char[]> debug_name_;
   Zone* outer_zone_ = nullptr;
-  ZonePool* const zone_pool_;
+  ZoneStats* const zone_stats_;
   PipelineStatistics* pipeline_statistics_ = nullptr;
   bool compilation_failed_ = false;
   Handle<Code> code_ = Handle<Code>::null();
 
   // All objects in the following group of fields are allocated in graph_zone_.
   // They are all set to nullptr when the graph_zone_ is destroyed.
-  ZonePool::Scope graph_zone_scope_;
+  ZoneStats::Scope graph_zone_scope_;
   Zone* graph_zone_ = nullptr;
   Graph* graph_ = nullptr;
   SourcePositionTable* source_positions_ = nullptr;
@@ -337,7 +337,7 @@
   // instruction_zone_.  They are all set to nullptr when the instruction_zone_
   // is
   // destroyed.
-  ZonePool::Scope instruction_zone_scope_;
+  ZoneStats::Scope instruction_zone_scope_;
   Zone* instruction_zone_;
   InstructionSequence* sequence_ = nullptr;
   Frame* frame_ = nullptr;
@@ -345,7 +345,7 @@
   // All objects in the following group of fields are allocated in
   // register_allocation_zone_.  They are all set to nullptr when the zone is
   // destroyed.
-  ZonePool::Scope register_allocation_zone_scope_;
+  ZoneStats::Scope register_allocation_zone_scope_;
   Zone* register_allocation_zone_;
   RegisterAllocationData* register_allocation_data_ = nullptr;
 
@@ -355,16 +355,6 @@
   // Source position output for --trace-turbo.
   std::string source_position_output_;
 
-  int CalculateFixedFrameSize(CallDescriptor* descriptor) {
-    if (descriptor->IsJSFunctionCall()) {
-      return StandardFrameConstants::kFixedSlotCount;
-    }
-    return descriptor->IsCFunctionCall()
-               ? (CommonFrameConstants::kFixedSlotCountAboveFp +
-                  CommonFrameConstants::kCPSlotCount)
-               : TypedFrameConstants::kFixedSlotCount;
-  }
-
   DISALLOW_COPY_AND_ASSIGN(PipelineData);
 };
 
@@ -389,7 +379,7 @@
   // Perform the actual code generation and return handle to a code object.
   Handle<Code> GenerateCode(Linkage* linkage);
 
-  bool ScheduleAndSelectInstructions(Linkage* linkage);
+  bool ScheduleAndSelectInstructions(Linkage* linkage, bool trim_graph);
   void RunPrintAndVerify(const char* phase, bool untyped = false);
   Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
   void AllocateRegisters(const RegisterConfiguration* config,
@@ -437,38 +427,6 @@
 }
 
 
-class AstGraphBuilderWithPositions final : public AstGraphBuilder {
- public:
-  AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
-                               JSGraph* jsgraph,
-                               LoopAssignmentAnalysis* loop_assignment,
-                               TypeHintAnalysis* type_hint_analysis,
-                               SourcePositionTable* source_positions)
-      : AstGraphBuilder(local_zone, info, jsgraph, 1.0f, loop_assignment,
-                        type_hint_analysis),
-        source_positions_(source_positions),
-        start_position_(info->shared_info()->start_position()) {}
-
-  bool CreateGraph() {
-    SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
-    return AstGraphBuilder::CreateGraph();
-  }
-
-#define DEF_VISIT(type)                                               \
-  void Visit##type(type* node) override {                             \
-    SourcePositionTable::Scope pos(source_positions_,                 \
-                                   SourcePosition(node->position())); \
-    AstGraphBuilder::Visit##type(node);                               \
-  }
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- private:
-  SourcePositionTable* const source_positions_;
-  SourcePosition const start_position_;
-};
-
-
 class SourcePositionWrapper final : public Reducer {
  public:
   SourcePositionWrapper(Reducer* reducer, SourcePositionTable* table)
@@ -518,21 +476,21 @@
       : phase_scope_(
             phase_name == nullptr ? nullptr : data->pipeline_statistics(),
             phase_name),
-        zone_scope_(data->zone_pool()) {}
+        zone_scope_(data->zone_stats(), ZONE_NAME) {}
 
   Zone* zone() { return zone_scope_.zone(); }
 
  private:
   PhaseScope phase_scope_;
-  ZonePool::Scope zone_scope_;
+  ZoneStats::Scope zone_scope_;
 };
 
 PipelineStatistics* CreatePipelineStatistics(CompilationInfo* info,
-                                             ZonePool* zone_pool) {
+                                             ZoneStats* zone_stats) {
   PipelineStatistics* pipeline_statistics = nullptr;
 
   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
-    pipeline_statistics = new PipelineStatistics(info, zone_pool);
+    pipeline_statistics = new PipelineStatistics(info, zone_stats);
     pipeline_statistics->BeginPhaseKind("initializing");
   }
 
@@ -568,12 +526,12 @@
       // Note that the CompilationInfo is not initialized at the time we pass it
       // to the CompilationJob constructor, but it is not dereferenced there.
       : CompilationJob(isolate, &info_, "TurboFan"),
-        zone_(isolate->allocator()),
-        zone_pool_(isolate->allocator()),
-        parse_info_(&zone_, function),
+        zone_(isolate->allocator(), ZONE_NAME),
+        zone_stats_(isolate->allocator()),
+        parse_info_(&zone_, handle(function->shared())),
         info_(&parse_info_, function),
-        pipeline_statistics_(CreatePipelineStatistics(info(), &zone_pool_)),
-        data_(&zone_pool_, info(), pipeline_statistics_.get()),
+        pipeline_statistics_(CreatePipelineStatistics(info(), &zone_stats_)),
+        data_(&zone_stats_, info(), pipeline_statistics_.get()),
         pipeline_(&data_),
         linkage_(nullptr) {}
 
@@ -584,7 +542,7 @@
 
  private:
   Zone zone_;
-  ZonePool zone_pool_;
+  ZoneStats zone_stats_;
   ParseInfo parse_info_;
   CompilationInfo info_;
   std::unique_ptr<PipelineStatistics> pipeline_statistics_;
@@ -603,20 +561,17 @@
     if (!FLAG_always_opt) {
       info()->MarkAsBailoutOnUninitialized();
     }
-    if (FLAG_native_context_specialization) {
-      info()->MarkAsNativeContextSpecializing();
-    }
     if (FLAG_turbo_inlining) {
       info()->MarkAsInliningEnabled();
     }
   }
   if (!info()->shared_info()->asm_function() || FLAG_turbo_asm_deoptimization) {
     info()->MarkAsDeoptimizationEnabled();
-  }
-  if (!info()->is_optimizing_from_bytecode()) {
     if (FLAG_inline_accessors) {
       info()->MarkAsAccessorInliningEnabled();
     }
+  }
+  if (!info()->is_optimizing_from_bytecode()) {
     if (info()->is_deoptimization_enabled() && FLAG_turbo_type_feedback) {
       info()->MarkAsTypeFeedbackEnabled();
     }
@@ -662,8 +617,8 @@
                                       SourcePositionTable* source_positions)
       : CompilationJob(info->isolate(), info, "TurboFan",
                        State::kReadyToExecute),
-        zone_pool_(info->isolate()->allocator()),
-        data_(&zone_pool_, info, graph, source_positions),
+        zone_stats_(info->isolate()->allocator()),
+        data_(&zone_stats_, info, graph, source_positions),
         pipeline_(&data_),
         linkage_(descriptor) {}
 
@@ -673,7 +628,7 @@
   Status FinalizeJobImpl() final;
 
  private:
-  ZonePool zone_pool_;
+  ZoneStats zone_stats_;
   PipelineData data_;
   PipelineImpl pipeline_;
   Linkage linkage_;
@@ -695,7 +650,7 @@
 
   pipeline_.RunPrintAndVerify("Machine", true);
 
-  if (!pipeline_.ScheduleAndSelectInstructions(&linkage_)) return FAILED;
+  if (!pipeline_.ScheduleAndSelectInstructions(&linkage_, true)) return FAILED;
   return SUCCEEDED;
 }
 
@@ -761,12 +716,14 @@
 
     if (data->info()->is_optimizing_from_bytecode()) {
       BytecodeGraphBuilder graph_builder(temp_zone, data->info(),
-                                         data->jsgraph(), 1.0f);
+                                         data->jsgraph(), 1.0f,
+                                         data->source_positions());
       succeeded = graph_builder.CreateGraph();
     } else {
       AstGraphBuilderWithPositions graph_builder(
-          temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
-          data->type_hint_analysis(), data->source_positions());
+          temp_zone, data->info(), data->jsgraph(), 1.0f,
+          data->loop_assignment(), data->type_hint_analysis(),
+          data->source_positions());
       succeeded = graph_builder.CreateGraph();
     }
 
@@ -800,10 +757,10 @@
         data->info()->is_function_context_specializing()
             ? handle(data->info()->context())
             : MaybeHandle<Context>());
-    JSFrameSpecialization frame_specialization(data->info()->osr_frame(),
-                                               data->jsgraph());
+    JSFrameSpecialization frame_specialization(
+        &graph_reducer, data->info()->osr_frame(), data->jsgraph());
     JSGlobalObjectSpecialization global_object_specialization(
-        &graph_reducer, data->jsgraph(), data->native_context(),
+        &graph_reducer, data->jsgraph(), data->global_object(),
         data->info()->dependencies());
     JSNativeContextSpecialization::Flags flags =
         JSNativeContextSpecialization::kNoFlags;
@@ -819,11 +776,11 @@
     JSNativeContextSpecialization native_context_specialization(
         &graph_reducer, data->jsgraph(), flags, data->native_context(),
         data->info()->dependencies(), temp_zone);
-    JSInliningHeuristic inlining(&graph_reducer,
-                                 data->info()->is_inlining_enabled()
-                                     ? JSInliningHeuristic::kGeneralInlining
-                                     : JSInliningHeuristic::kRestrictedInlining,
-                                 temp_zone, data->info(), data->jsgraph());
+    JSInliningHeuristic inlining(
+        &graph_reducer, data->info()->is_inlining_enabled()
+                            ? JSInliningHeuristic::kGeneralInlining
+                            : JSInliningHeuristic::kRestrictedInlining,
+        temp_zone, data->info(), data->jsgraph(), data->source_positions());
     JSIntrinsicLowering intrinsic_lowering(
         &graph_reducer, data->jsgraph(),
         data->info()->is_deoptimization_enabled()
@@ -860,7 +817,20 @@
   }
 };
 
-#ifdef DEBUG
+struct OsrTyperPhase {
+  static const char* phase_name() { return "osr typer"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    NodeVector roots(temp_zone);
+    data->jsgraph()->GetCachedNodes(&roots);
+    // Dummy induction variable optimizer: at the moment, we do not try
+    // to compute loop variable bounds on OSR.
+    LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
+                                         data->common(), temp_zone);
+    Typer typer(data->isolate(), Typer::kNoFlags, data->graph());
+    typer.Run(roots, &induction_vars);
+  }
+};
 
 struct UntyperPhase {
   static const char* phase_name() { return "untyper"; }
@@ -877,6 +847,12 @@
       }
     };
 
+    NodeVector roots(temp_zone);
+    data->jsgraph()->GetCachedNodes(&roots);
+    for (Node* node : roots) {
+      NodeProperties::RemoveType(node);
+    }
+
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
     RemoveTypeReducer remove_type_reducer;
     AddReducer(data, &graph_reducer, &remove_type_reducer);
@@ -884,12 +860,15 @@
   }
 };
 
-#endif  // DEBUG
-
 struct OsrDeconstructionPhase {
   static const char* phase_name() { return "OSR deconstruction"; }
 
   void Run(PipelineData* data, Zone* temp_zone) {
+    GraphTrimmer trimmer(temp_zone, data->graph());
+    NodeVector roots(temp_zone);
+    data->jsgraph()->GetCachedNodes(&roots);
+    trimmer.TrimGraph(roots.begin(), roots.end());
+
     OsrHelper osr_helper(data->info());
     osr_helper.Deconstruct(data->jsgraph(), data->common(), temp_zone);
   }
@@ -908,11 +887,8 @@
         data->info()->is_deoptimization_enabled()
             ? JSBuiltinReducer::kDeoptimizationEnabled
             : JSBuiltinReducer::kNoFlags,
-        data->info()->dependencies());
-    MaybeHandle<LiteralsArray> literals_array =
-        data->info()->is_native_context_specializing()
-            ? handle(data->info()->closure()->literals(), data->isolate())
-            : MaybeHandle<LiteralsArray>();
+        data->info()->dependencies(), data->native_context());
+    Handle<LiteralsArray> literals_array(data->info()->closure()->literals());
     JSCreateLowering create_lowering(
         &graph_reducer, data->info()->dependencies(), data->jsgraph(),
         literals_array, data->native_context(), temp_zone);
@@ -960,6 +936,10 @@
                                          &escape_analysis, temp_zone);
     AddReducer(data, &graph_reducer, &escape_reducer);
     graph_reducer.ReduceGraph();
+    if (escape_reducer.compilation_failed()) {
+      data->set_compilation_failed();
+      return;
+    }
     escape_reducer.VerifyReplacement();
   }
 };
@@ -1068,7 +1048,8 @@
     //   chains and lower them,
     // - get rid of the region markers,
     // - introduce effect phis and rewire effects to get SSA again.
-    EffectControlLinearizer linearizer(data->jsgraph(), schedule, temp_zone);
+    EffectControlLinearizer linearizer(data->jsgraph(), schedule, temp_zone,
+                                       data->source_positions());
     linearizer.Run();
   }
 };
@@ -1189,7 +1170,9 @@
   void Run(PipelineData* data, Zone* temp_zone) {
     GraphTrimmer trimmer(temp_zone, data->graph());
     NodeVector roots(temp_zone);
-    data->jsgraph()->GetCachedNodes(&roots);
+    if (data->jsgraph()) {
+      data->jsgraph()->GetCachedNodes(&roots);
+    }
     trimmer.TrimGraph(roots.begin(), roots.end());
   }
 };
@@ -1503,7 +1486,11 @@
 
   // Perform OSR deconstruction.
   if (info()->is_osr()) {
+    Run<OsrTyperPhase>();
+
     Run<OsrDeconstructionPhase>();
+
+    Run<UntyperPhase>();
     RunPrintAndVerify("OSR deconstruction", true);
   }
 
@@ -1522,10 +1509,22 @@
 
   // Run the type-sensitive lowerings and optimizations on the graph.
   {
+    // Determine the Typer operation flags.
+    Typer::Flags flags = Typer::kNoFlags;
+    if (is_sloppy(info()->shared_info()->language_mode()) &&
+        !info()->shared_info()->IsBuiltin()) {
+      // Sloppy mode functions always have an Object for this.
+      flags |= Typer::kThisIsReceiver;
+    }
+    if (IsClassConstructor(info()->shared_info()->kind())) {
+      // Class constructors cannot be [[Call]]ed.
+      flags |= Typer::kNewTargetIsReceiver;
+    }
+
     // Type the graph and keep the Typer running on newly created nodes within
     // this scope; the Typer is automatically unlinked from the Graph once we
     // leave this scope below.
-    Typer typer(isolate(), data->graph());
+    Typer typer(isolate(), flags, data->graph());
     Run<TyperPhase>(&typer);
     RunPrintAndVerify("Typed");
 
@@ -1548,14 +1547,21 @@
       RunPrintAndVerify("Loop peeled");
     }
 
-    if (FLAG_turbo_escape) {
-      Run<EscapeAnalysisPhase>();
-      RunPrintAndVerify("Escape Analysed");
-    }
+    if (!info()->shared_info()->asm_function()) {
+      if (FLAG_turbo_load_elimination) {
+        Run<LoadEliminationPhase>();
+        RunPrintAndVerify("Load eliminated");
+      }
 
-    if (!info()->shared_info()->asm_function() && FLAG_turbo_load_elimination) {
-      Run<LoadEliminationPhase>();
-      RunPrintAndVerify("Load eliminated");
+      if (FLAG_turbo_escape) {
+        Run<EscapeAnalysisPhase>();
+        if (data->compilation_failed()) {
+          info()->AbortOptimization(kCyclicObjectStateDetectedInEscapeAnalysis);
+          data->EndPhaseKind();
+          return false;
+        }
+        RunPrintAndVerify("Escape Analysed");
+      }
     }
   }
 
@@ -1627,13 +1633,9 @@
   // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
   RunPrintAndVerify("Late optimized", true);
 
-  Run<LateGraphTrimmingPhase>();
-  // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
-  RunPrintAndVerify("Late trimmed", true);
-
   data->source_positions()->RemoveDecorator();
 
-  return ScheduleAndSelectInstructions(linkage);
+  return ScheduleAndSelectInstructions(linkage, true);
 }
 
 Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
@@ -1645,11 +1647,11 @@
   if (isolate->serializer_enabled()) info.PrepareForSerializing();
 
   // Construct a pipeline for scheduling and code generation.
-  ZonePool zone_pool(isolate->allocator());
-  PipelineData data(&zone_pool, &info, graph, schedule);
+  ZoneStats zone_stats(isolate->allocator());
+  PipelineData data(&zone_stats, &info, graph, schedule);
   std::unique_ptr<PipelineStatistics> pipeline_statistics;
   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
-    pipeline_statistics.reset(new PipelineStatistics(&info, &zone_pool));
+    pipeline_statistics.reset(new PipelineStatistics(&info, &zone_stats));
     pipeline_statistics->BeginPhaseKind("stub codegen");
   }
 
@@ -1671,10 +1673,10 @@
 
 // static
 Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info) {
-  ZonePool zone_pool(info->isolate()->allocator());
+  ZoneStats zone_stats(info->isolate()->allocator());
   std::unique_ptr<PipelineStatistics> pipeline_statistics(
-      CreatePipelineStatistics(info, &zone_pool));
-  PipelineData data(&zone_pool, info, pipeline_statistics.get());
+      CreatePipelineStatistics(info, &zone_stats));
+  PipelineData data(&zone_stats, info, pipeline_statistics.get());
   PipelineImpl pipeline(&data);
 
   Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
@@ -1699,11 +1701,11 @@
                                               Graph* graph,
                                               Schedule* schedule) {
   // Construct a pipeline for scheduling and code generation.
-  ZonePool zone_pool(info->isolate()->allocator());
-  PipelineData data(&zone_pool, info, graph, schedule);
+  ZoneStats zone_stats(info->isolate()->allocator());
+  PipelineData data(&zone_stats, info, graph, schedule);
   std::unique_ptr<PipelineStatistics> pipeline_statistics;
   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
-    pipeline_statistics.reset(new PipelineStatistics(info, &zone_pool));
+    pipeline_statistics.reset(new PipelineStatistics(info, &zone_stats));
     pipeline_statistics->BeginPhaseKind("test codegen");
   }
 
@@ -1738,20 +1740,25 @@
                                            bool run_verifier) {
   CompilationInfo info(ArrayVector("testing"), sequence->isolate(),
                        sequence->zone(), Code::ComputeFlags(Code::STUB));
-  ZonePool zone_pool(sequence->isolate()->allocator());
-  PipelineData data(&zone_pool, &info, sequence);
+  ZoneStats zone_stats(sequence->isolate()->allocator());
+  PipelineData data(&zone_stats, &info, sequence);
   PipelineImpl pipeline(&data);
   pipeline.data_->InitializeFrameData(nullptr);
   pipeline.AllocateRegisters(config, nullptr, run_verifier);
   return !data.compilation_failed();
 }
 
-bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage) {
+bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
+                                                 bool trim_graph) {
   CallDescriptor* call_descriptor = linkage->GetIncomingDescriptor();
   PipelineData* data = this->data_;
 
   DCHECK_NOT_NULL(data->graph());
 
+  if (trim_graph) {
+    Run<LateGraphTrimmingPhase>();
+    RunPrintAndVerify("Late trimmed", true);
+  }
   if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
   TraceSchedule(data->info(), data->schedule());
 
@@ -1760,8 +1767,11 @@
         info(), data->graph(), data->schedule()));
   }
 
-  if (FLAG_turbo_verify_machine_graph) {
-    Zone temp_zone(data->isolate()->allocator());
+  if (FLAG_turbo_verify_machine_graph != nullptr &&
+      (!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
+       !strcmp(FLAG_turbo_verify_machine_graph,
+               data->info()->GetDebugName().get()))) {
+    Zone temp_zone(data->isolate()->allocator(), ZONE_NAME);
     MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage,
                               &temp_zone);
   }
@@ -1871,7 +1881,7 @@
   Linkage linkage(call_descriptor);
 
   // Schedule the graph, perform instruction selection and register allocation.
-  if (!ScheduleAndSelectInstructions(&linkage)) return Handle<Code>();
+  if (!ScheduleAndSelectInstructions(&linkage, false)) return Handle<Code>();
 
   // Generate the final machine code.
   return GenerateCode(&linkage);
@@ -1885,7 +1895,7 @@
   std::unique_ptr<Zone> verifier_zone;
   RegisterAllocatorVerifier* verifier = nullptr;
   if (run_verifier) {
-    verifier_zone.reset(new Zone(isolate()->allocator()));
+    verifier_zone.reset(new Zone(isolate()->allocator(), ZONE_NAME));
     verifier = new (verifier_zone.get()) RegisterAllocatorVerifier(
         verifier_zone.get(), config, data->sequence());
   }
diff --git a/src/compiler/pipeline.h b/src/compiler/pipeline.h
index 64befbf..0c0a57b 100644
--- a/src/compiler/pipeline.h
+++ b/src/compiler/pipeline.h
@@ -7,6 +7,7 @@
 
 // Clients of this interface shouldn't depend on lots of compiler internals.
 // Do not include anything from src/compiler here!
+#include "src/globals.h"
 #include "src/objects.h"
 
 namespace v8 {
@@ -53,9 +54,9 @@
                                              Schedule* schedule = nullptr);
 
   // Run just the register allocator phases.
-  static bool AllocateRegistersForTesting(const RegisterConfiguration* config,
-                                          InstructionSequence* sequence,
-                                          bool run_verifier);
+  V8_EXPORT_PRIVATE static bool AllocateRegistersForTesting(
+      const RegisterConfiguration* config, InstructionSequence* sequence,
+      bool run_verifier);
 
   // Run the pipeline on a machine graph and generate code. If {schedule} is
   // {nullptr}, then compute a new schedule for code generation.
diff --git a/src/compiler/ppc/code-generator-ppc.cc b/src/compiler/ppc/code-generator-ppc.cc
index f8f3099..a838ede 100644
--- a/src/compiler/ppc/code-generator-ppc.cc
+++ b/src/compiler/ppc/code-generator-ppc.cc
@@ -1012,8 +1012,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchTailCallJSFunctionFromJSFunction:
-    case kArchTailCallJSFunction: {
+    case kArchTailCallJSFunctionFromJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
         // Check the function's context matches the context argument.
@@ -1022,11 +1021,9 @@
         __ cmp(cp, kScratchReg);
         __ Assert(eq, kWrongFunctionContext);
       }
-      if (opcode == kArchTailCallJSFunctionFromJSFunction) {
-        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
-                                         i.TempRegister(0), i.TempRegister(1),
-                                         i.TempRegister(2));
-      }
+      AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                       i.TempRegister(0), i.TempRegister(1),
+                                       i.TempRegister(2));
       __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(ip);
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
@@ -1093,7 +1090,7 @@
       break;
     }
     case kArchRet:
-      AssembleReturn();
+      AssembleReturn(instr->InputAt(0));
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
     case kArchStackPointer:
@@ -1241,39 +1238,46 @@
       __ mulhwu(i.OutputRegister(1), i.InputRegister(0), i.InputRegister(2));
       __ add(i.OutputRegister(1), i.OutputRegister(1), i.TempRegister(0));
       break;
-    case kPPC_ShiftLeftPair:
+    case kPPC_ShiftLeftPair: {
+      Register second_output =
+          instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
       if (instr->InputAt(2)->IsImmediate()) {
-        __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
-                         i.InputRegister(0), i.InputRegister(1),
-                         i.InputInt32(2));
+        __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
+                         i.InputRegister(1), i.InputInt32(2));
       } else {
-        __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
-                         i.InputRegister(0), i.InputRegister(1), kScratchReg,
-                         i.InputRegister(2));
+        __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
+                         i.InputRegister(1), kScratchReg, i.InputRegister(2));
       }
       break;
-    case kPPC_ShiftRightPair:
+    }
+    case kPPC_ShiftRightPair: {
+      Register second_output =
+          instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
       if (instr->InputAt(2)->IsImmediate()) {
-        __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+        __ ShiftRightPair(i.OutputRegister(0), second_output,
                           i.InputRegister(0), i.InputRegister(1),
                           i.InputInt32(2));
       } else {
-        __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+        __ ShiftRightPair(i.OutputRegister(0), second_output,
                           i.InputRegister(0), i.InputRegister(1), kScratchReg,
                           i.InputRegister(2));
       }
       break;
-    case kPPC_ShiftRightAlgPair:
+    }
+    case kPPC_ShiftRightAlgPair: {
+      Register second_output =
+          instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
       if (instr->InputAt(2)->IsImmediate()) {
-        __ ShiftRightAlgPair(i.OutputRegister(0), i.OutputRegister(1),
+        __ ShiftRightAlgPair(i.OutputRegister(0), second_output,
                              i.InputRegister(0), i.InputRegister(1),
                              i.InputInt32(2));
       } else {
-        __ ShiftRightAlgPair(i.OutputRegister(0), i.OutputRegister(1),
+        __ ShiftRightAlgPair(i.OutputRegister(0), second_output,
                              i.InputRegister(0), i.InputRegister(1),
                              kScratchReg, i.InputRegister(2));
       }
       break;
+    }
 #endif
     case kPPC_RotRight32:
       if (HasRegisterInput(instr, 1)) {
@@ -2078,7 +2082,7 @@
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   DeoptimizeReason deoptimization_reason =
       GetDeoptimizationReason(deoptimization_id);
-  __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -2125,6 +2129,9 @@
       }
     } else if (descriptor->IsJSFunctionCall()) {
       __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
+      if (descriptor->PushArgumentCount()) {
+        __ Push(kJavaScriptCallArgCountRegister);
+      }
     } else {
       StackFrame::Type type = info()->GetOutputStackFrameType();
       // TODO(mbrandy): Detect cases where ip is the entrypoint (for
@@ -2133,7 +2140,8 @@
     }
   }
 
-  int shrink_slots = frame()->GetSpillSlotCount();
+  int shrink_slots =
+      frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
     __ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -2170,8 +2178,7 @@
   }
 }
 
-
-void CodeGenerator::AssembleReturn() {
+void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   int pop_count = static_cast<int>(descriptor->StackParameterCount());
 
@@ -2189,20 +2196,33 @@
   if (double_saves != 0) {
     __ MultiPopDoubles(double_saves);
   }
+  PPCOperandConverter g(this, nullptr);
 
   if (descriptor->IsCFunctionCall()) {
     AssembleDeconstructFrame();
   } else if (frame_access_state()->has_frame()) {
-    // Canonicalize JSFunction return sites for now.
-    if (return_label_.is_bound()) {
-      __ b(&return_label_);
-      return;
+    // Canonicalize JSFunction return sites for now unless they have an variable
+    // number of stack slot pops
+    if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+      if (return_label_.is_bound()) {
+        __ b(&return_label_);
+        return;
+      } else {
+        __ bind(&return_label_);
+        AssembleDeconstructFrame();
+      }
     } else {
-      __ bind(&return_label_);
       AssembleDeconstructFrame();
     }
   }
-  __ Ret(pop_count);
+  if (pop->IsImmediate()) {
+    DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+    pop_count += g.ToConstant(pop).ToInt32();
+  } else {
+    __ Drop(g.ToRegister(pop));
+  }
+  __ Drop(pop_count);
+  __ Ret();
 }
 
 
diff --git a/src/compiler/ppc/instruction-selector-ppc.cc b/src/compiler/ppc/instruction-selector-ppc.cc
index a2eb7b8..768b188 100644
--- a/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/src/compiler/ppc/instruction-selector-ppc.cc
@@ -245,7 +245,7 @@
   MachineRepresentation rep = store_rep.representation();
 
   if (write_barrier_kind != kNoWriteBarrier) {
-    DCHECK_EQ(MachineRepresentation::kTagged, rep);
+    DCHECK(CanBeTaggedPointer(rep));
     AddressingMode addressing_mode;
     InstructionOperand inputs[3];
     size_t input_count = 0;
@@ -810,49 +810,70 @@
 
 #if !V8_TARGET_ARCH_PPC64
 void VisitPairBinop(InstructionSelector* selector, InstructionCode opcode,
-                    Node* node) {
+                    InstructionCode opcode2, Node* node) {
   PPCOperandGenerator g(selector);
 
-  // We use UseUniqueRegister here to avoid register sharing with the output
-  // registers.
-  InstructionOperand inputs[] = {
-      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
-      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
+  if (projection1) {
+    // We use UseUniqueRegister here to avoid register sharing with the output
+    // registers.
+    InstructionOperand inputs[] = {
+        g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+        g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
 
-  InstructionOperand outputs[] = {
-      g.DefineAsRegister(node),
-      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+    InstructionOperand outputs[] = {
+        g.DefineAsRegister(node),
+        g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
 
-  selector->Emit(opcode, 2, outputs, 4, inputs);
+    selector->Emit(opcode, 2, outputs, 4, inputs);
+  } else {
+    // The high word of the result is not used, so we emit the standard 32 bit
+    // instruction.
+    selector->Emit(opcode2, g.DefineSameAsFirst(node),
+                   g.UseRegister(node->InputAt(0)),
+                   g.UseRegister(node->InputAt(2)));
+  }
 }
 
 void InstructionSelector::VisitInt32PairAdd(Node* node) {
-  VisitPairBinop(this, kPPC_AddPair, node);
+  VisitPairBinop(this, kPPC_AddPair, kPPC_Add, node);
 }
 
 void InstructionSelector::VisitInt32PairSub(Node* node) {
-  VisitPairBinop(this, kPPC_SubPair, node);
+  VisitPairBinop(this, kPPC_SubPair, kPPC_Sub, node);
 }
 
 void InstructionSelector::VisitInt32PairMul(Node* node) {
   PPCOperandGenerator g(this);
-  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
-                                 g.UseUniqueRegister(node->InputAt(1)),
-                                 g.UseUniqueRegister(node->InputAt(2)),
-                                 g.UseRegister(node->InputAt(3))};
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
+  if (projection1) {
+    InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+                                   g.UseUniqueRegister(node->InputAt(1)),
+                                   g.UseUniqueRegister(node->InputAt(2)),
+                                   g.UseUniqueRegister(node->InputAt(3))};
 
-  InstructionOperand outputs[] = {
-      g.DefineAsRegister(node),
-      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+    InstructionOperand outputs[] = {
+        g.DefineAsRegister(node),
+        g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
 
-  InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+    InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
 
-  Emit(kPPC_MulPair, 2, outputs, 4, inputs, 2, temps);
+    Emit(kPPC_MulPair, 2, outputs, 4, inputs, 2, temps);
+  } else {
+    // The high word of the result is not used, so we emit the standard 32 bit
+    // instruction.
+    Emit(kPPC_Mul32, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+         g.UseRegister(node->InputAt(2)));
+  }
 }
 
+namespace {
+// Shared routine for multiple shift operations.
 void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
                     Node* node) {
   PPCOperandGenerator g(selector);
+  // We use g.UseUniqueRegister here to guarantee that there is
+  // no register aliasing of input registers with output registers.
   Int32Matcher m(node->InputAt(2));
   InstructionOperand shift_operand;
   if (m.HasValue()) {
@@ -861,16 +882,27 @@
     shift_operand = g.UseUniqueRegister(m.node());
   }
 
-  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
-                                 g.UseRegister(node->InputAt(1)),
+  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+                                 g.UseUniqueRegister(node->InputAt(1)),
                                  shift_operand};
 
-  InstructionOperand outputs[] = {
-      g.DefineSameAsFirst(node),
-      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
 
-  selector->Emit(opcode, 2, outputs, 3, inputs);
+  InstructionOperand outputs[2];
+  InstructionOperand temps[1];
+  int32_t output_count = 0;
+  int32_t temp_count = 0;
+
+  outputs[output_count++] = g.DefineAsRegister(node);
+  if (projection1) {
+    outputs[output_count++] = g.DefineAsRegister(projection1);
+  } else {
+    temps[temp_count++] = g.TempRegister();
+  }
+
+  selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
 }
+}  // namespace
 
 void InstructionSelector::VisitWord32PairShl(Node* node) {
   VisitPairShift(this, kPPC_ShiftLeftPair, node);
@@ -1570,21 +1602,22 @@
 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
                           Node* value, InstructionCode opcode,
                           FlagsContinuation* cont) {
-  while (selector->CanCover(user, value)) {
+  // Try to combine with comparisons against 0 by simply inverting the branch.
+  while (value->opcode() == IrOpcode::kWord32Equal &&
+         selector->CanCover(user, value)) {
+    Int32BinopMatcher m(value);
+    if (!m.right().Is(0)) break;
+
+    user = value;
+    value = m.left().node();
+    cont->Negate();
+  }
+
+  if (selector->CanCover(user, value)) {
     switch (value->opcode()) {
-      case IrOpcode::kWord32Equal: {
-        // Combine with comparisons against 0 by simply inverting the
-        // continuation.
-        Int32BinopMatcher m(value);
-        if (m.right().Is(0)) {
-          user = value;
-          value = m.left().node();
-          cont->Negate();
-          continue;
-        }
+      case IrOpcode::kWord32Equal:
         cont->OverwriteAndNegateIfEqual(kEqual);
         return VisitWord32Compare(selector, value, cont);
-      }
       case IrOpcode::kInt32LessThan:
         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
         return VisitWord32Compare(selector, value, cont);
@@ -1706,7 +1739,6 @@
       default:
         break;
     }
-    break;
   }
 
   // Branch could not be combined with a compare, emit compare against 0.
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
index cdf45ab..14695c1 100644
--- a/src/compiler/raw-machine-assembler.cc
+++ b/src/compiler/raw-machine-assembler.cc
@@ -120,23 +120,46 @@
 }
 
 void RawMachineAssembler::Return(Node* value) {
-  Node* ret = MakeNode(common()->Return(), 1, &value);
+  Node* values[] = {Int32Constant(0), value};
+  Node* ret = MakeNode(common()->Return(1), 2, values);
   schedule()->AddReturn(CurrentBlock(), ret);
   current_block_ = nullptr;
 }
 
 
 void RawMachineAssembler::Return(Node* v1, Node* v2) {
-  Node* values[] = {v1, v2};
-  Node* ret = MakeNode(common()->Return(2), 2, values);
+  Node* values[] = {Int32Constant(0), v1, v2};
+  Node* ret = MakeNode(common()->Return(2), 3, values);
   schedule()->AddReturn(CurrentBlock(), ret);
   current_block_ = nullptr;
 }
 
 
 void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) {
-  Node* values[] = {v1, v2, v3};
-  Node* ret = MakeNode(common()->Return(3), 3, values);
+  Node* values[] = {Int32Constant(0), v1, v2, v3};
+  Node* ret = MakeNode(common()->Return(3), 4, values);
+  schedule()->AddReturn(CurrentBlock(), ret);
+  current_block_ = nullptr;
+}
+
+void RawMachineAssembler::PopAndReturn(Node* pop, Node* value) {
+  Node* values[] = {pop, value};
+  Node* ret = MakeNode(common()->Return(1), 2, values);
+  schedule()->AddReturn(CurrentBlock(), ret);
+  current_block_ = nullptr;
+}
+
+void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2) {
+  Node* values[] = {pop, v1, v2};
+  Node* ret = MakeNode(common()->Return(2), 3, values);
+  schedule()->AddReturn(CurrentBlock(), ret);
+  current_block_ = nullptr;
+}
+
+void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2,
+                                       Node* v3) {
+  Node* values[] = {pop, v1, v2, v3};
+  Node* ret = MakeNode(common()->Return(3), 4, values);
   schedule()->AddReturn(CurrentBlock(), ret);
   current_block_ = nullptr;
 }
@@ -253,6 +276,21 @@
                  ref, arity, context);
 }
 
+Node* RawMachineAssembler::CallRuntime5(Runtime::FunctionId function,
+                                        Node* arg1, Node* arg2, Node* arg3,
+                                        Node* arg4, Node* arg5, Node* context) {
+  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+      zone(), function, 5, Operator::kNoProperties, CallDescriptor::kNoFlags);
+  int return_count = static_cast<int>(descriptor->ReturnCount());
+
+  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+  Node* ref = AddNode(
+      common()->ExternalConstant(ExternalReference(function, isolate())));
+  Node* arity = Int32Constant(5);
+
+  return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, arg4,
+                 arg5, ref, arity, context);
+}
 
 Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, Node* function,
                                      Node** args) {
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
index cdd368c..6d2accb 100644
--- a/src/compiler/raw-machine-assembler.h
+++ b/src/compiler/raw-machine-assembler.h
@@ -13,6 +13,7 @@
 #include "src/compiler/node.h"
 #include "src/compiler/operator.h"
 #include "src/factory.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -34,7 +35,7 @@
 // Also note that the generated graph is only valid together with the generated
 // schedule, using one without the other is invalid as the graph is inherently
 // non-schedulable due to missing control and effect dependencies.
-class RawMachineAssembler {
+class V8_EXPORT_PRIVATE RawMachineAssembler {
  public:
   RawMachineAssembler(
       Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor,
@@ -717,6 +718,9 @@
   // Call to a runtime function with four arguments.
   Node* CallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
                      Node* arg3, Node* arg4, Node* context);
+  // Call to a runtime function with five arguments.
+  Node* CallRuntime5(Runtime::FunctionId function, Node* arg1, Node* arg2,
+                     Node* arg3, Node* arg4, Node* arg5, Node* context);
   // Call to a C function with zero arguments.
   Node* CallCFunction0(MachineType return_type, Node* function);
   // Call to a C function with one parameter.
@@ -773,6 +777,9 @@
   void Return(Node* value);
   void Return(Node* v1, Node* v2);
   void Return(Node* v1, Node* v2, Node* v3);
+  void PopAndReturn(Node* pop, Node* value);
+  void PopAndReturn(Node* pop, Node* v1, Node* v2);
+  void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3);
   void Bind(RawMachineLabel* label);
   void Deoptimize(Node* state);
   void DebugBreak();
@@ -834,8 +841,7 @@
   DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler);
 };
 
-
-class RawMachineLabel final {
+class V8_EXPORT_PRIVATE RawMachineLabel final {
  public:
   enum Type { kDeferred, kNonDeferred };
 
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index efcdcb4..0ed479f 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -19,6 +19,11 @@
 
 namespace {
 
+static const int kFloatRepBit =
+    1 << static_cast<int>(MachineRepresentation::kFloat32);
+static const int kSimd128RepBit =
+    1 << static_cast<int>(MachineRepresentation::kSimd128);
+
 void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
   auto it = std::find(v->begin(), v->end(), range);
   DCHECK(it != v->end());
@@ -33,7 +38,7 @@
 
 int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
                                 RegisterKind kind) {
-  return kind == FP_REGISTERS ? cfg->num_allocatable_aliased_double_registers()
+  return kind == FP_REGISTERS ? cfg->num_allocatable_double_registers()
                               : cfg->num_allocatable_general_registers();
 }
 
@@ -74,14 +79,8 @@
     case MachineRepresentation::kTaggedSigned:
     case MachineRepresentation::kTaggedPointer:
     case MachineRepresentation::kTagged:
-      return kPointerSize;
     case MachineRepresentation::kFloat32:
-// TODO(bbudge) Eliminate this when FP register aliasing works.
-#if V8_TARGET_ARCH_ARM
-      return kDoubleSize;
-#else
       return kPointerSize;
-#endif
     case MachineRepresentation::kWord64:
     case MachineRepresentation::kFloat64:
       return kDoubleSize;
@@ -342,6 +341,11 @@
   return UsePositionHintType::kNone;
 }
 
+void UsePosition::SetHint(UsePosition* use_pos) {
+  DCHECK_NOT_NULL(use_pos);
+  hint_ = use_pos;
+  flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
+}
 
 void UsePosition::ResolveHint(UsePosition* use_pos) {
   DCHECK_NOT_NULL(use_pos);
@@ -493,6 +497,12 @@
   return pos;
 }
 
+LifetimePosition LiveRange::NextLifetimePositionRegisterIsBeneficial(
+    const LifetimePosition& start) const {
+  UsePosition* next_use = NextUsePositionRegisterIsBeneficial(start);
+  if (next_use == nullptr) return End();
+  return next_use->pos();
+}
 
 UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
     LifetimePosition start) const {
@@ -581,7 +591,9 @@
 LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
   int new_id = TopLevel()->GetNextChildId();
   LiveRange* child = new (zone) LiveRange(new_id, representation(), TopLevel());
-  DetachAt(position, child, zone);
+  // If we split, we do so because we're about to switch registers or move
+  // to/from a slot, so there's no value in connecting hints.
+  DetachAt(position, child, zone, DoNotConnectHints);
 
   child->top_level_ = TopLevel();
   child->next_ = next_;
@@ -589,9 +601,9 @@
   return child;
 }
 
-
 UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
-                                 Zone* zone) {
+                                 Zone* zone,
+                                 HintConnectionOption connect_hints) {
   DCHECK(Start() < position);
   DCHECK(End() > position);
   DCHECK(result->IsEmpty());
@@ -670,6 +682,10 @@
   last_processed_use_ = nullptr;
   current_interval_ = nullptr;
 
+  if (connect_hints == ConnectHints && use_before != nullptr &&
+      use_after != nullptr) {
+    use_after->SetHint(use_before);
+  }
 #ifdef DEBUG
   VerifyChildStructure();
   result->VerifyChildStructure();
@@ -912,17 +928,21 @@
 
   if (end >= End()) {
     DCHECK(start > Start());
-    DetachAt(start, &splinter_temp, zone);
+    DetachAt(start, &splinter_temp, zone, ConnectHints);
     next_ = nullptr;
   } else {
     DCHECK(start < End() && Start() < end);
 
     const int kInvalidId = std::numeric_limits<int>::max();
 
-    UsePosition* last = DetachAt(start, &splinter_temp, zone);
+    UsePosition* last = DetachAt(start, &splinter_temp, zone, ConnectHints);
 
     LiveRange end_part(kInvalidId, this->representation(), nullptr);
-    last_in_splinter = splinter_temp.DetachAt(end, &end_part, zone);
+    // The last chunk exits the deferred region, and we don't want to connect
+    // hints here, because the non-deferred region shouldn't be affected
+    // by allocation decisions on the deferred path.
+    last_in_splinter =
+        splinter_temp.DetachAt(end, &end_part, zone, DoNotConnectHints);
 
     next_ = end_part.next_;
     last_interval_->set_next(end_part.first_interval_);
@@ -1345,14 +1365,23 @@
                    allocation_zone()),
       fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
                          allocation_zone()),
+      fixed_float_live_ranges_(allocation_zone()),
       fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
                                 allocation_zone()),
+      fixed_simd128_live_ranges_(allocation_zone()),
       spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
       delayed_references_(allocation_zone()),
       assigned_registers_(nullptr),
       assigned_double_registers_(nullptr),
       virtual_register_count_(code->VirtualRegisterCount()),
       preassigned_slot_ranges_(zone) {
+  if (!kSimpleFPAliasing) {
+    fixed_float_live_ranges_.resize(this->config()->num_float_registers(),
+                                    nullptr);
+    fixed_simd128_live_ranges_.resize(this->config()->num_simd128_registers(),
+                                      nullptr);
+  }
+
   assigned_registers_ = new (code_zone())
       BitVector(this->config()->num_general_registers(), code_zone());
   assigned_double_registers_ = new (code_zone())
@@ -1524,8 +1553,21 @@
                                            int index) {
   switch (rep) {
     case MachineRepresentation::kFloat32:
-    case MachineRepresentation::kFloat64:
     case MachineRepresentation::kSimd128:
+      if (kSimpleFPAliasing) {
+        assigned_double_registers_->Add(index);
+      } else {
+        int alias_base_index = -1;
+        int aliases = config()->GetAliases(
+            rep, index, MachineRepresentation::kFloat64, &alias_base_index);
+        DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+        while (aliases--) {
+          int aliased_reg = alias_base_index + aliases;
+          assigned_double_registers_->Add(aliased_reg);
+        }
+      }
+      break;
+    case MachineRepresentation::kFloat64:
       assigned_double_registers_->Add(index);
       break;
     default:
@@ -1852,7 +1894,11 @@
   int result = -index - 1;
   switch (rep) {
     case MachineRepresentation::kSimd128:
+      result -= config()->num_float_registers();
+    // Fall through.
     case MachineRepresentation::kFloat32:
+      result -= config()->num_double_registers();
+    // Fall through.
     case MachineRepresentation::kFloat64:
       result -= config()->num_general_registers();
       break;
@@ -1879,24 +1925,33 @@
 
 TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
     int index, MachineRepresentation rep) {
-  TopLevelLiveRange* result = nullptr;
-  switch (rep) {
-    case MachineRepresentation::kFloat32:
-    case MachineRepresentation::kFloat64:
-    case MachineRepresentation::kSimd128:
-      DCHECK(index < config()->num_double_registers());
-      result = data()->fixed_double_live_ranges()[index];
-      if (result == nullptr) {
-        result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
-        DCHECK(result->IsFixed());
-        result->set_assigned_register(index);
-        data()->MarkAllocated(rep, index);
-        data()->fixed_double_live_ranges()[index] = result;
-      }
-      break;
-    default:
-      UNREACHABLE();
-      break;
+  int num_regs = config()->num_double_registers();
+  ZoneVector<TopLevelLiveRange*>* live_ranges =
+      &data()->fixed_double_live_ranges();
+  if (!kSimpleFPAliasing) {
+    switch (rep) {
+      case MachineRepresentation::kFloat32:
+        num_regs = config()->num_float_registers();
+        live_ranges = &data()->fixed_float_live_ranges();
+        break;
+      case MachineRepresentation::kSimd128:
+        num_regs = config()->num_simd128_registers();
+        live_ranges = &data()->fixed_simd128_live_ranges();
+        break;
+      default:
+        break;
+    }
+  }
+
+  DCHECK(index < num_regs);
+  USE(num_regs);
+  TopLevelLiveRange* result = (*live_ranges)[index];
+  if (result == nullptr) {
+    result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
+    DCHECK(result->IsFixed());
+    result->set_assigned_register(index);
+    data()->MarkAllocated(rep, index);
+    (*live_ranges)[index] = result;
   }
   return result;
 }
@@ -1972,6 +2027,13 @@
   int block_start = block->first_instruction_index();
   LifetimePosition block_start_position =
       LifetimePosition::GapFromInstructionIndex(block_start);
+  bool fixed_float_live_ranges = false;
+  bool fixed_simd128_live_ranges = false;
+  if (!kSimpleFPAliasing) {
+    int mask = data()->code()->representation_mask();
+    fixed_float_live_ranges = (mask & kFloatRepBit) != 0;
+    fixed_simd128_live_ranges = (mask & kSimd128RepBit) != 0;
+  }
 
   for (int index = block->last_instruction_index(); index >= block_start;
        index--) {
@@ -2020,8 +2082,7 @@
     }
 
     if (instr->ClobbersDoubleRegisters()) {
-      for (int i = 0; i < config()->num_allocatable_aliased_double_registers();
-           ++i) {
+      for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) {
         // Add a UseInterval for all DoubleRegisters. See comment above for
         // general registers.
         int code = config()->GetAllocatableDoubleCode(i);
@@ -2030,6 +2091,31 @@
         range->AddUseInterval(curr_position, curr_position.End(),
                               allocation_zone());
       }
+      // Clobber fixed float registers on archs with non-simple aliasing.
+      if (!kSimpleFPAliasing) {
+        if (fixed_float_live_ranges) {
+          for (int i = 0; i < config()->num_allocatable_float_registers();
+               ++i) {
+            // Add a UseInterval for all FloatRegisters. See comment above for
+            // general registers.
+            int code = config()->GetAllocatableFloatCode(i);
+            TopLevelLiveRange* range =
+                FixedFPLiveRangeFor(code, MachineRepresentation::kFloat32);
+            range->AddUseInterval(curr_position, curr_position.End(),
+                                  allocation_zone());
+          }
+        }
+        if (fixed_simd128_live_ranges) {
+          for (int i = 0; i < config()->num_allocatable_simd128_registers();
+               ++i) {
+            int code = config()->GetAllocatableSimd128Code(i);
+            TopLevelLiveRange* range =
+                FixedFPLiveRangeFor(code, MachineRepresentation::kSimd128);
+            range->AddUseInterval(curr_position, curr_position.End(),
+                                  allocation_zone());
+          }
+        }
+      }
     }
 
     for (size_t i = 0; i < instr->InputCount(); i++) {
@@ -2141,7 +2227,6 @@
   }
 }
 
-
 void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
                                    BitVector* live) {
   for (PhiInstruction* phi : block->phis()) {
@@ -2149,33 +2234,112 @@
     // block.
     int phi_vreg = phi->virtual_register();
     live->Remove(phi_vreg);
-    // Select the hint from the first predecessor block that preceeds this block
-    // in the rpo ordering. Prefer non-deferred blocks. The enforcement of
-    // hinting in rpo order is required because hint resolution that happens
-    // later in the compiler pipeline visits instructions in reverse rpo,
-    // relying on the fact that phis are encountered before their hints.
-    const Instruction* instr = nullptr;
-    const InstructionBlock::Predecessors& predecessors = block->predecessors();
-    for (size_t i = 0; i < predecessors.size(); ++i) {
-      const InstructionBlock* predecessor_block =
-          code()->InstructionBlockAt(predecessors[i]);
-      if (predecessor_block->rpo_number() < block->rpo_number()) {
-        instr = GetLastInstruction(code(), predecessor_block);
-        if (!predecessor_block->IsDeferred()) break;
-      }
-    }
-    DCHECK_NOT_NULL(instr);
-
+    // Select a hint from a predecessor block that preceeds this block in the
+    // rpo order. In order of priority:
+    // - Avoid hints from deferred blocks.
+    // - Prefer hints from allocated (or explicit) operands.
+    // - Prefer hints from empty blocks (containing just parallel moves and a
+    //   jump). In these cases, if we can elide the moves, the jump threader
+    //   is likely to be able to elide the jump.
+    // The enforcement of hinting in rpo order is required because hint
+    // resolution that happens later in the compiler pipeline visits
+    // instructions in reverse rpo order, relying on the fact that phis are
+    // encountered before their hints.
     InstructionOperand* hint = nullptr;
-    for (MoveOperands* move : *instr->GetParallelMove(Instruction::END)) {
-      InstructionOperand& to = move->destination();
-      if (to.IsUnallocated() &&
-          UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
-        hint = &move->source();
-        break;
+    int hint_preference = 0;
+
+    // The cost of hinting increases with the number of predecessors. At the
+    // same time, the typical benefit decreases, since this hinting only
+    // optimises the execution path through one predecessor. A limit of 2 is
+    // sufficient to hit the common if/else pattern.
+    int predecessor_limit = 2;
+
+    for (RpoNumber predecessor : block->predecessors()) {
+      const InstructionBlock* predecessor_block =
+          code()->InstructionBlockAt(predecessor);
+      DCHECK_EQ(predecessor_block->rpo_number(), predecessor);
+
+      // Only take hints from earlier rpo numbers.
+      if (predecessor >= block->rpo_number()) continue;
+
+      // Look up the predecessor instruction.
+      const Instruction* predecessor_instr =
+          GetLastInstruction(code(), predecessor_block);
+      InstructionOperand* predecessor_hint = nullptr;
+      // Phis are assigned in the END position of the last instruction in each
+      // predecessor block.
+      for (MoveOperands* move :
+           *predecessor_instr->GetParallelMove(Instruction::END)) {
+        InstructionOperand& to = move->destination();
+        if (to.IsUnallocated() &&
+            UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
+          predecessor_hint = &move->source();
+          break;
+        }
       }
+      DCHECK_NOT_NULL(predecessor_hint);
+
+      // For each predecessor, generate a score according to the priorities
+      // described above, and pick the best one. Flags in higher-order bits have
+      // a higher priority than those in lower-order bits.
+      int predecessor_hint_preference = 0;
+      const int kNotDeferredBlockPreference = (1 << 2);
+      const int kMoveIsAllocatedPreference = (1 << 1);
+      const int kBlockIsEmptyPreference = (1 << 0);
+
+      // - Avoid hints from deferred blocks.
+      if (!predecessor_block->IsDeferred()) {
+        predecessor_hint_preference |= kNotDeferredBlockPreference;
+      }
+
+      // - Prefer hints from allocated (or explicit) operands.
+      //
+      // Already-allocated or explicit operands are typically assigned using
+      // the parallel moves on the last instruction. For example:
+      //
+      //      gap (v101 = [x0|R|w32]) (v100 = v101)
+      //      ArchJmp
+      //    ...
+      //    phi: v100 = v101 v102
+      //
+      // We have already found the END move, so look for a matching START move
+      // from an allocated (or explicit) operand.
+      //
+      // Note that we cannot simply look up data()->live_ranges()[vreg] here
+      // because the live ranges are still being built when this function is
+      // called.
+      // TODO(v8): Find a way to separate hinting from live range analysis in
+      // BuildLiveRanges so that we can use the O(1) live-range look-up.
+      auto moves = predecessor_instr->GetParallelMove(Instruction::START);
+      if (moves != nullptr) {
+        for (MoveOperands* move : *moves) {
+          InstructionOperand& to = move->destination();
+          if (predecessor_hint->Equals(to)) {
+            if (move->source().IsAllocated() || move->source().IsExplicit()) {
+              predecessor_hint_preference |= kMoveIsAllocatedPreference;
+            }
+            break;
+          }
+        }
+      }
+
+      // - Prefer hints from empty blocks.
+      if (predecessor_block->last_instruction_index() ==
+          predecessor_block->first_instruction_index()) {
+        predecessor_hint_preference |= kBlockIsEmptyPreference;
+      }
+
+      if ((hint == nullptr) ||
+          (predecessor_hint_preference > hint_preference)) {
+        // Take the hint from this predecessor.
+        hint = predecessor_hint;
+        hint_preference = predecessor_hint_preference;
+      }
+
+      if (--predecessor_limit <= 0) break;
     }
-    DCHECK(hint != nullptr);
+    DCHECK_NOT_NULL(hint);
+
     LifetimePosition block_start = LifetimePosition::GapFromInstructionIndex(
         block->first_instruction_index());
     UsePosition* use_pos = Define(block_start, &phi->output(), hint,
@@ -2372,7 +2536,13 @@
       num_allocatable_registers_(
           GetAllocatableRegisterCount(data->config(), kind)),
       allocatable_register_codes_(
-          GetAllocatableRegisterCodes(data->config(), kind)) {}
+          GetAllocatableRegisterCodes(data->config(), kind)),
+      check_fp_aliasing_(false) {
+  if (!kSimpleFPAliasing && kind == FP_REGISTERS) {
+    check_fp_aliasing_ = (data->code()->representation_mask() &
+                          (kFloatRepBit | kSimd128RepBit)) != 0;
+  }
+}
 
 LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
     const LiveRange* range, int instruction_index) {
@@ -2401,7 +2571,13 @@
     if (next_pos.IsGapPosition()) {
       next_pos = next_pos.NextStart();
     }
-    UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
+
+    // With splinters, we can be more strict and skip over positions
+    // not strictly needing registers.
+    UsePosition* pos =
+        range->IsSplinter()
+            ? range->NextRegisterPosition(next_pos)
+            : range->NextUsePositionRegisterIsBeneficial(next_pos);
     // If the range already has a spill operand and it doesn't need a
     // register immediately, split it and spill the first part of the range.
     if (pos == nullptr) {
@@ -2593,6 +2769,14 @@
     for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
       if (current != nullptr) AddToInactive(current);
     }
+    if (!kSimpleFPAliasing && check_fp_aliasing()) {
+      for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
+        if (current != nullptr) AddToInactive(current);
+      }
+      for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
+        if (current != nullptr) AddToInactive(current);
+      }
+    }
   }
 
   while (!unhandled_live_ranges().empty()) {
@@ -2634,14 +2818,30 @@
 
     DCHECK(!current->HasRegisterAssigned() && !current->spilled());
 
-    bool result = TryAllocateFreeReg(current);
-    if (!result) AllocateBlockedReg(current);
-    if (current->HasRegisterAssigned()) {
-      AddToActive(current);
-    }
+    ProcessCurrentRange(current);
   }
 }
 
+bool LinearScanAllocator::TrySplitAndSpillSplinter(LiveRange* range) {
+  DCHECK(range->TopLevel()->IsSplinter());
+  // If we can spill the whole range, great. Otherwise, split above the
+  // first use needing a register and spill the top part.
+  const UsePosition* next_reg = range->NextRegisterPosition(range->Start());
+  if (next_reg == nullptr) {
+    Spill(range);
+    return true;
+  } else if (range->FirstHintPosition() == nullptr) {
+    // If there was no hint, but we have a use position requiring a
+    // register, apply the hot path heuristics.
+    return false;
+  } else if (next_reg->pos().PrevStart() > range->Start()) {
+    LiveRange* tail = SplitRangeAt(range, next_reg->pos().PrevStart());
+    AddToUnhandledSorted(tail);
+    Spill(range);
+    return true;
+  }
+  return false;
+}
 
 void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
                                                        int reg) {
@@ -2757,35 +2957,133 @@
         range->TopLevel()->vreg(), range->relative_id());
 }
 
+void LinearScanAllocator::GetFPRegisterSet(MachineRepresentation rep,
+                                           int* num_regs, int* num_codes,
+                                           const int** codes) const {
+  DCHECK(!kSimpleFPAliasing);
+  if (rep == MachineRepresentation::kFloat32) {
+    *num_regs = data()->config()->num_float_registers();
+    *num_codes = data()->config()->num_allocatable_float_registers();
+    *codes = data()->config()->allocatable_float_codes();
+  } else if (rep == MachineRepresentation::kSimd128) {
+    *num_regs = data()->config()->num_simd128_registers();
+    *num_codes = data()->config()->num_allocatable_simd128_registers();
+    *codes = data()->config()->allocatable_simd128_codes();
+  } else {
+    UNREACHABLE();
+  }
+}
 
-bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
+void LinearScanAllocator::FindFreeRegistersForRange(
+    LiveRange* range, Vector<LifetimePosition> positions) {
   int num_regs = num_registers();
   int num_codes = num_allocatable_registers();
   const int* codes = allocatable_register_codes();
+  MachineRepresentation rep = range->representation();
+  if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
+                             rep == MachineRepresentation::kSimd128))
+    GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
+  DCHECK_GE(positions.length(), num_regs);
 
-  LifetimePosition free_until_pos[RegisterConfiguration::kMaxFPRegisters];
   for (int i = 0; i < num_regs; i++) {
-    free_until_pos[i] = LifetimePosition::MaxPosition();
+    positions[i] = LifetimePosition::MaxPosition();
   }
 
   for (LiveRange* cur_active : active_live_ranges()) {
     int cur_reg = cur_active->assigned_register();
-    free_until_pos[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
-    TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
-          LifetimePosition::GapFromInstructionIndex(0).value());
+    if (kSimpleFPAliasing || !check_fp_aliasing()) {
+      positions[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
+      TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
+            LifetimePosition::GapFromInstructionIndex(0).value());
+    } else {
+      int alias_base_index = -1;
+      int aliases = data()->config()->GetAliases(
+          cur_active->representation(), cur_reg, rep, &alias_base_index);
+      DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+      while (aliases--) {
+        int aliased_reg = alias_base_index + aliases;
+        positions[aliased_reg] = LifetimePosition::GapFromInstructionIndex(0);
+      }
+    }
   }
 
   for (LiveRange* cur_inactive : inactive_live_ranges()) {
-    DCHECK(cur_inactive->End() > current->Start());
-    LifetimePosition next_intersection =
-        cur_inactive->FirstIntersection(current);
+    DCHECK(cur_inactive->End() > range->Start());
+    LifetimePosition next_intersection = cur_inactive->FirstIntersection(range);
     if (!next_intersection.IsValid()) continue;
     int cur_reg = cur_inactive->assigned_register();
-    free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
-    TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
-          Min(free_until_pos[cur_reg], next_intersection).value());
+    if (kSimpleFPAliasing || !check_fp_aliasing()) {
+      positions[cur_reg] = Min(positions[cur_reg], next_intersection);
+      TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
+            Min(positions[cur_reg], next_intersection).value());
+    } else {
+      int alias_base_index = -1;
+      int aliases = data()->config()->GetAliases(
+          cur_inactive->representation(), cur_reg, rep, &alias_base_index);
+      DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+      while (aliases--) {
+        int aliased_reg = alias_base_index + aliases;
+        positions[aliased_reg] = Min(positions[aliased_reg], next_intersection);
+      }
+    }
   }
+}
 
+// High-level register allocation summary:
+//
+// For regular, or hot (i.e. not splinter) ranges, we attempt to first
+// allocate first the preferred (hint) register. If that is not possible,
+// we find a register that's free, and allocate that. If that's not possible,
+// we search for a register to steal from a range that was allocated. The
+// goal is to optimize for throughput by avoiding register-to-memory
+// moves, which are expensive.
+//
+// For splinters, the goal is to minimize the number of moves. First we try
+// to allocate the preferred register (more discussion follows). Failing that,
+// we bail out and spill as far as we can, unless the first use is at start,
+// case in which we apply the same behavior as we do for regular ranges.
+// If there is no hint, we apply the hot-path behavior.
+//
+// For the splinter, the hint register may come from:
+//
+// - the hot path (we set it at splintering time with SetHint). In this case, if
+// we cannot offer the hint register, spilling is better because it's at most
+// 1 move, while trying to find and offer another register is at least 1 move.
+//
+// - a constraint. If we cannot offer that register, it's because  there is some
+// interference. So offering the hint register up to the interference would
+// result
+// in a move at the interference, plus a move to satisfy the constraint. This is
+// also the number of moves if we spill, with the potential of the range being
+// already spilled and thus saving a move (the spill).
+// Note that this can only be an input constraint, if it were an output one,
+// the range wouldn't be a splinter because it means it'd be defined in a
+// deferred
+// block, and we don't mark those as splinters (they live in deferred blocks
+// only).
+//
+// - a phi. The same analysis as in the case of the input constraint applies.
+//
+void LinearScanAllocator::ProcessCurrentRange(LiveRange* current) {
+  LifetimePosition free_until_pos_buff[RegisterConfiguration::kMaxFPRegisters];
+  Vector<LifetimePosition> free_until_pos(
+      free_until_pos_buff, RegisterConfiguration::kMaxFPRegisters);
+  FindFreeRegistersForRange(current, free_until_pos);
+  if (!TryAllocatePreferredReg(current, free_until_pos)) {
+    if (current->TopLevel()->IsSplinter()) {
+      if (TrySplitAndSpillSplinter(current)) return;
+    }
+    if (!TryAllocateFreeReg(current, free_until_pos)) {
+      AllocateBlockedReg(current);
+    }
+  }
+  if (current->HasRegisterAssigned()) {
+    AddToActive(current);
+  }
+}
+
+bool LinearScanAllocator::TryAllocatePreferredReg(
+    LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
   int hint_register;
   if (current->FirstHintPosition(&hint_register) != nullptr) {
     TRACE(
@@ -2803,6 +3101,20 @@
       return true;
     }
   }
+  return false;
+}
+
+bool LinearScanAllocator::TryAllocateFreeReg(
+    LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
+  int num_regs = 0;  // used only for the call to GetFPRegisterSet.
+  int num_codes = num_allocatable_registers();
+  const int* codes = allocatable_register_codes();
+  MachineRepresentation rep = current->representation();
+  if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
+                             rep == MachineRepresentation::kSimd128))
+    GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
+
+  DCHECK_GE(free_until_pos.length(), num_codes);
 
   // Find the register which stays free for the longest time.
   int reg = codes[0];
@@ -2837,7 +3149,6 @@
   return true;
 }
 
-
 void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
   UsePosition* register_use = current->NextRegisterPosition(current->Start());
   if (register_use == nullptr) {
@@ -2850,6 +3161,10 @@
   int num_regs = num_registers();
   int num_codes = num_allocatable_registers();
   const int* codes = allocatable_register_codes();
+  MachineRepresentation rep = current->representation();
+  if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
+                             rep == MachineRepresentation::kSimd128))
+    GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
 
   LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
   LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
@@ -2861,16 +3176,28 @@
     int cur_reg = range->assigned_register();
     bool is_fixed_or_cant_spill =
         range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
-    if (is_fixed_or_cant_spill) {
-      block_pos[cur_reg] = use_pos[cur_reg] =
-          LifetimePosition::GapFromInstructionIndex(0);
-    } else {
-      UsePosition* next_use =
-          range->NextUsePositionRegisterIsBeneficial(current->Start());
-      if (next_use == nullptr) {
-        use_pos[cur_reg] = range->End();
+    if (kSimpleFPAliasing || !check_fp_aliasing()) {
+      if (is_fixed_or_cant_spill) {
+        block_pos[cur_reg] = use_pos[cur_reg] =
+            LifetimePosition::GapFromInstructionIndex(0);
       } else {
-        use_pos[cur_reg] = next_use->pos();
+        use_pos[cur_reg] =
+            range->NextLifetimePositionRegisterIsBeneficial(current->Start());
+      }
+    } else {
+      int alias_base_index = -1;
+      int aliases = data()->config()->GetAliases(
+          range->representation(), cur_reg, rep, &alias_base_index);
+      DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+      while (aliases--) {
+        int aliased_reg = alias_base_index + aliases;
+        if (is_fixed_or_cant_spill) {
+          block_pos[aliased_reg] = use_pos[aliased_reg] =
+              LifetimePosition::GapFromInstructionIndex(0);
+        } else {
+          use_pos[aliased_reg] =
+              range->NextLifetimePositionRegisterIsBeneficial(current->Start());
+        }
       }
     }
   }
@@ -2881,11 +3208,29 @@
     if (!next_intersection.IsValid()) continue;
     int cur_reg = range->assigned_register();
     bool is_fixed = range->TopLevel()->IsFixed();
-    if (is_fixed) {
-      block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
-      use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
+    if (kSimpleFPAliasing || !check_fp_aliasing()) {
+      if (is_fixed) {
+        block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
+        use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
+      } else {
+        use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+      }
     } else {
-      use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+      int alias_base_index = -1;
+      int aliases = data()->config()->GetAliases(
+          range->representation(), cur_reg, rep, &alias_base_index);
+      DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+      while (aliases--) {
+        int aliased_reg = alias_base_index + aliases;
+        if (is_fixed) {
+          block_pos[aliased_reg] =
+              Min(block_pos[aliased_reg], next_intersection);
+          use_pos[aliased_reg] =
+              Min(block_pos[aliased_reg], use_pos[aliased_reg]);
+        } else {
+          use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
+        }
+      }
     }
   }
 
@@ -2937,7 +3282,15 @@
   LifetimePosition split_pos = current->Start();
   for (size_t i = 0; i < active_live_ranges().size(); ++i) {
     LiveRange* range = active_live_ranges()[i];
-    if (range->assigned_register() != reg) continue;
+    if (kSimpleFPAliasing || !check_fp_aliasing()) {
+      if (range->assigned_register() != reg) continue;
+    } else {
+      if (!data()->config()->AreAliases(current->representation(), reg,
+                                        range->representation(),
+                                        range->assigned_register())) {
+        continue;
+      }
+    }
 
     UsePosition* next_pos = range->NextRegisterPosition(current->Start());
     LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
@@ -2964,7 +3317,14 @@
     LiveRange* range = inactive_live_ranges()[i];
     DCHECK(range->End() > current->Start());
     if (range->TopLevel()->IsFixed()) continue;
-    if (range->assigned_register() != reg) continue;
+    if (kSimpleFPAliasing || !check_fp_aliasing()) {
+      if (range->assigned_register() != reg) continue;
+    } else {
+      if (!data()->config()->AreAliases(current->representation(), reg,
+                                        range->representation(),
+                                        range->assigned_register()))
+        continue;
+    }
 
     LifetimePosition next_intersection = range->FirstIntersection(current);
     if (next_intersection.IsValid()) {
@@ -3455,7 +3815,6 @@
   return gap_index;
 }
 
-
 void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
   DelayedInsertionMap delayed_insertion_map(local_zone);
   for (TopLevelLiveRange* top_range : data()->live_ranges()) {
@@ -3543,9 +3902,8 @@
     // Gather all MoveOperands for a single ParallelMove.
     MoveOperands* move =
         new (code_zone()) MoveOperands(it->first.second, it->second);
-    MoveOperands* eliminate = moves->PrepareInsertAfter(move);
+    moves->PrepareInsertAfter(move, &to_eliminate);
     to_insert.push_back(move);
-    if (eliminate != nullptr) to_eliminate.push_back(eliminate);
   }
 }
 
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h
index 2089ea2..7698a90 100644
--- a/src/compiler/register-allocator.h
+++ b/src/compiler/register-allocator.h
@@ -5,7 +5,9 @@
 #ifndef V8_REGISTER_ALLOCATOR_H_
 #define V8_REGISTER_ALLOCATOR_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/instruction.h"
+#include "src/globals.h"
 #include "src/ostreams.h"
 #include "src/register-configuration.h"
 #include "src/zone/zone-containers.h"
@@ -246,7 +248,8 @@
               "kUnassignedRegister too small");
 
 // Representation of a use position.
-class UsePosition final : public ZoneObject {
+class V8_EXPORT_PRIVATE UsePosition final
+    : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   UsePosition(LifetimePosition pos, InstructionOperand* operand, void* hint,
               UsePositionHintType hint_type);
@@ -275,6 +278,7 @@
   }
   bool HasHint() const;
   bool HintRegister(int* register_code) const;
+  void SetHint(UsePosition* use_pos);
   void ResolveHint(UsePosition* use_pos);
   bool IsResolved() const {
     return hint_type() != UsePositionHintType::kUnresolved;
@@ -304,7 +308,7 @@
 
 // Representation of SSA values' live ranges as a collection of (continuous)
 // intervals over the instruction ordering.
-class LiveRange : public ZoneObject {
+class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   UseInterval* first_interval() const { return first_interval_; }
   UsePosition* first_pos() const { return first_pos_; }
@@ -353,6 +357,11 @@
   UsePosition* NextUsePositionRegisterIsBeneficial(
       LifetimePosition start) const;
 
+  // Returns lifetime position for which register is beneficial in this live
+  // range and which follows both start and last processed use position.
+  LifetimePosition NextLifetimePositionRegisterIsBeneficial(
+      const LifetimePosition& start) const;
+
   // Returns use position for which register is beneficial in this live
   // range and which precedes start.
   UsePosition* PreviousUsePositionRegisterIsBeneficial(
@@ -368,8 +377,12 @@
   // live range to the result live range.
   // The current range will terminate at position, while result will start from
   // position.
+  enum HintConnectionOption : bool {
+    DoNotConnectHints = false,
+    ConnectHints = true
+  };
   UsePosition* DetachAt(LifetimePosition position, LiveRange* result,
-                        Zone* zone);
+                        Zone* zone, HintConnectionOption connect_hints);
 
   // Detaches at position, and then links the resulting ranges. Returns the
   // child, which starts at position.
@@ -471,8 +484,7 @@
   DISALLOW_COPY_AND_ASSIGN(LiveRangeGroup);
 };
 
-
-class TopLevelLiveRange final : public LiveRange {
+class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
  public:
   explicit TopLevelLiveRange(int vreg, MachineRepresentation rep);
   int spill_start_index() const { return spill_start_index_; }
@@ -766,12 +778,24 @@
   ZoneVector<TopLevelLiveRange*>& fixed_live_ranges() {
     return fixed_live_ranges_;
   }
+  ZoneVector<TopLevelLiveRange*>& fixed_float_live_ranges() {
+    return fixed_float_live_ranges_;
+  }
+  const ZoneVector<TopLevelLiveRange*>& fixed_float_live_ranges() const {
+    return fixed_float_live_ranges_;
+  }
   ZoneVector<TopLevelLiveRange*>& fixed_double_live_ranges() {
     return fixed_double_live_ranges_;
   }
   const ZoneVector<TopLevelLiveRange*>& fixed_double_live_ranges() const {
     return fixed_double_live_ranges_;
   }
+  ZoneVector<TopLevelLiveRange*>& fixed_simd128_live_ranges() {
+    return fixed_simd128_live_ranges_;
+  }
+  const ZoneVector<TopLevelLiveRange*>& fixed_simd128_live_ranges() const {
+    return fixed_simd128_live_ranges_;
+  }
   ZoneVector<BitVector*>& live_in_sets() { return live_in_sets_; }
   ZoneVector<BitVector*>& live_out_sets() { return live_out_sets_; }
   ZoneVector<SpillRange*>& spill_ranges() { return spill_ranges_; }
@@ -833,7 +857,9 @@
   ZoneVector<BitVector*> live_out_sets_;
   ZoneVector<TopLevelLiveRange*> live_ranges_;
   ZoneVector<TopLevelLiveRange*> fixed_live_ranges_;
+  ZoneVector<TopLevelLiveRange*> fixed_float_live_ranges_;
   ZoneVector<TopLevelLiveRange*> fixed_double_live_ranges_;
+  ZoneVector<TopLevelLiveRange*> fixed_simd128_live_ranges_;
   ZoneVector<SpillRange*> spill_ranges_;
   DelayedReferences delayed_references_;
   BitVector* assigned_registers_;
@@ -956,6 +982,8 @@
   const int* allocatable_register_codes() const {
     return allocatable_register_codes_;
   }
+  // Returns true iff. we must check float register aliasing.
+  bool check_fp_aliasing() const { return check_fp_aliasing_; }
 
   // TODO(mtrofin): explain why splitting in gap START is always OK.
   LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
@@ -1006,6 +1034,7 @@
   const int num_registers_;
   int num_allocatable_registers_;
   const int* allocatable_register_codes_;
+  bool check_fp_aliasing_;
 
  private:
   bool no_combining_;
@@ -1047,8 +1076,17 @@
 
   // Helper methods for allocating registers.
   bool TryReuseSpillForPhi(TopLevelLiveRange* range);
-  bool TryAllocateFreeReg(LiveRange* range);
+  bool TryAllocateFreeReg(LiveRange* range,
+                          const Vector<LifetimePosition>& free_until_pos);
+  bool TryAllocatePreferredReg(LiveRange* range,
+                               const Vector<LifetimePosition>& free_until_pos);
+  void GetFPRegisterSet(MachineRepresentation rep, int* num_regs,
+                        int* num_codes, const int** codes) const;
+  void FindFreeRegistersForRange(LiveRange* range,
+                                 Vector<LifetimePosition> free_until_pos);
+  void ProcessCurrentRange(LiveRange* current);
   void AllocateBlockedReg(LiveRange* range);
+  bool TrySplitAndSpillSplinter(LiveRange* range);
 
   // Spill the given life range after position pos.
   void SpillAfter(LiveRange* range, LifetimePosition pos);
diff --git a/src/compiler/representation-change.cc b/src/compiler/representation-change.cc
index 22d809b..e3e5108 100644
--- a/src/compiler/representation-change.cc
+++ b/src/compiler/representation-change.cc
@@ -24,8 +24,6 @@
       return "truncate-to-word32";
     case TruncationKind::kWord64:
       return "truncate-to-word64";
-    case TruncationKind::kFloat32:
-      return "truncate-to-float32";
     case TruncationKind::kFloat64:
       return "truncate-to-float64";
     case TruncationKind::kAny:
@@ -42,15 +40,15 @@
 //     ^            ^
 //     \            |
 //      \         kFloat64  <--+
-//       \        ^    ^       |
-//        \       /    |       |
-//         kWord32  kFloat32  kBool
-//               ^     ^      ^
-//               \     |      /
-//                \    |     /
-//                 \   |    /
-//                  \  |   /
-//                   \ |  /
+//       \        ^            |
+//        \       /            |
+//         kWord32           kBool
+//               ^            ^
+//               \            /
+//                \          /
+//                 \        /
+//                  \      /
+//                   \    /
 //                   kNone
 
 // static
@@ -87,9 +85,6 @@
              rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
     case TruncationKind::kWord64:
       return rep2 == TruncationKind::kWord64;
-    case TruncationKind::kFloat32:
-      return rep2 == TruncationKind::kFloat32 ||
-             rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
     case TruncationKind::kFloat64:
       return rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
     case TruncationKind::kAny:
@@ -147,8 +142,10 @@
       return GetTaggedSignedRepresentationFor(node, output_rep, output_type,
                                               use_node, use_info);
     case MachineRepresentation::kTaggedPointer:
-      DCHECK(use_info.type_check() == TypeCheckKind::kNone);
-      return GetTaggedPointerRepresentationFor(node, output_rep, output_type);
+      DCHECK(use_info.type_check() == TypeCheckKind::kNone ||
+             use_info.type_check() == TypeCheckKind::kHeapObject);
+      return GetTaggedPointerRepresentationFor(node, output_rep, output_type,
+                                               use_node, use_info);
     case MachineRepresentation::kTagged:
       DCHECK(use_info.type_check() == TypeCheckKind::kNone);
       return GetTaggedRepresentationFor(node, output_rep, output_type,
@@ -255,6 +252,24 @@
       return TypeError(node, output_rep, output_type,
                        MachineRepresentation::kTaggedSigned);
     }
+  } else if (output_rep == MachineRepresentation::kFloat32) {
+    if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+      op = machine()->ChangeFloat32ToFloat64();
+      node = InsertConversion(node, op, use_node);
+      op = simplified()->CheckedFloat64ToInt32(
+          output_type->Maybe(Type::MinusZero())
+              ? CheckForMinusZeroMode::kCheckForMinusZero
+              : CheckForMinusZeroMode::kDontCheckForMinusZero);
+      node = InsertConversion(node, op, use_node);
+      if (SmiValuesAre32Bits()) {
+        op = simplified()->ChangeInt32ToTagged();
+      } else {
+        op = simplified()->CheckedInt32ToTaggedSigned();
+      }
+    } else {
+      return TypeError(node, output_rep, output_type,
+                       MachineRepresentation::kTaggedSigned);
+    }
   } else if (CanBeTaggedPointer(output_rep) &&
              use_info.type_check() == TypeCheckKind::kSignedSmall) {
     op = simplified()->CheckedTaggedToTaggedSigned();
@@ -272,34 +287,59 @@
 }
 
 Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
-    Node* node, MachineRepresentation output_rep, Type* output_type) {
+    Node* node, MachineRepresentation output_rep, Type* output_type,
+    Node* use_node, UseInfo use_info) {
   // Eagerly fold representation changes for constants.
   switch (node->opcode()) {
     case IrOpcode::kHeapConstant:
       return node;  // No change necessary.
     case IrOpcode::kInt32Constant:
-      if (output_type->Is(Type::Boolean())) {
-        return OpParameter<int32_t>(node) == 0 ? jsgraph()->FalseConstant()
-                                               : jsgraph()->TrueConstant();
-      } else {
-        return TypeError(node, output_rep, output_type,
-                         MachineRepresentation::kTaggedPointer);
-      }
     case IrOpcode::kFloat64Constant:
     case IrOpcode::kFloat32Constant:
-      return TypeError(node, output_rep, output_type,
-                       MachineRepresentation::kTaggedPointer);
+      UNREACHABLE();
     default:
       break;
   }
-  // Select the correct X -> Tagged operator.
+  // Select the correct X -> TaggedPointer operator.
+  Operator const* op;
   if (output_type->Is(Type::None())) {
     // This is an impossible value; it should not be used at runtime.
     // We just provide a dummy value here.
     return jsgraph()->TheHoleConstant();
+  } else if (output_rep == MachineRepresentation::kBit) {
+    return node;
+  } else if (IsWord(output_rep)) {
+    if (output_type->Is(Type::Unsigned32())) {
+      // uint32 -> float64 -> tagged
+      node = InsertChangeUint32ToFloat64(node);
+    } else if (output_type->Is(Type::Signed32())) {
+      // int32 -> float64 -> tagged
+      node = InsertChangeInt32ToFloat64(node);
+    } else {
+      return TypeError(node, output_rep, output_type,
+                       MachineRepresentation::kTaggedPointer);
+    }
+    op = simplified()->ChangeFloat64ToTaggedPointer();
+  } else if (output_rep == MachineRepresentation::kFloat32) {
+    // float32 -> float64 -> tagged
+    node = InsertChangeFloat32ToFloat64(node);
+    op = simplified()->ChangeFloat64ToTaggedPointer();
+  } else if (output_rep == MachineRepresentation::kFloat64) {
+    // float64 -> tagged
+    op = simplified()->ChangeFloat64ToTaggedPointer();
+  } else if (CanBeTaggedSigned(output_rep) &&
+             use_info.type_check() == TypeCheckKind::kHeapObject) {
+    if (!output_type->Maybe(Type::SignedSmall())) {
+      return node;
+    }
+    // TODO(turbofan): Consider adding a Bailout operator that just deopts
+    // for TaggedSigned output representation.
+    op = simplified()->CheckedTaggedToTaggedPointer();
+  } else {
+    return TypeError(node, output_rep, output_type,
+                     MachineRepresentation::kTaggedPointer);
   }
-  return TypeError(node, output_rep, output_type,
-                   MachineRepresentation::kTaggedPointer);
+  return InsertConversion(node, op, use_node);
 }
 
 Node* RepresentationChanger::GetTaggedRepresentationFor(
@@ -311,23 +351,10 @@
     case IrOpcode::kHeapConstant:
       return node;  // No change necessary.
     case IrOpcode::kInt32Constant:
-      if (output_type->Is(Type::Signed32())) {
-        int32_t value = OpParameter<int32_t>(node);
-        return jsgraph()->Constant(value);
-      } else if (output_type->Is(Type::Unsigned32())) {
-        uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
-        return jsgraph()->Constant(static_cast<double>(value));
-      } else if (output_type->Is(Type::Boolean())) {
-        return OpParameter<int32_t>(node) == 0 ? jsgraph()->FalseConstant()
-                                               : jsgraph()->TrueConstant();
-      } else {
-        return TypeError(node, output_rep, output_type,
-                         MachineRepresentation::kTagged);
-      }
     case IrOpcode::kFloat64Constant:
-      return jsgraph()->Constant(OpParameter<double>(node));
     case IrOpcode::kFloat32Constant:
-      return jsgraph()->Constant(OpParameter<float>(node));
+      UNREACHABLE();
+      break;
     default:
       break;
   }
@@ -395,20 +422,14 @@
     Truncation truncation) {
   // Eagerly fold representation changes for constants.
   switch (node->opcode()) {
-    case IrOpcode::kFloat64Constant:
     case IrOpcode::kNumberConstant:
       return jsgraph()->Float32Constant(
           DoubleToFloat32(OpParameter<double>(node)));
     case IrOpcode::kInt32Constant:
-      if (output_type->Is(Type::Unsigned32())) {
-        uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
-        return jsgraph()->Float32Constant(static_cast<float>(value));
-      } else {
-        int32_t value = OpParameter<int32_t>(node);
-        return jsgraph()->Float32Constant(static_cast<float>(value));
-      }
+    case IrOpcode::kFloat64Constant:
     case IrOpcode::kFloat32Constant:
-      return node;  // No change necessary.
+      UNREACHABLE();
+      break;
     default:
       break;
   }
@@ -466,18 +487,10 @@
       case IrOpcode::kNumberConstant:
         return jsgraph()->Float64Constant(OpParameter<double>(node));
       case IrOpcode::kInt32Constant:
-        if (output_type->Is(Type::Signed32())) {
-          int32_t value = OpParameter<int32_t>(node);
-          return jsgraph()->Float64Constant(value);
-        } else {
-          DCHECK(output_type->Is(Type::Unsigned32()));
-          uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
-          return jsgraph()->Float64Constant(static_cast<double>(value));
-        }
       case IrOpcode::kFloat64Constant:
-        return node;  // No change necessary.
       case IrOpcode::kFloat32Constant:
-        return jsgraph()->Float64Constant(OpParameter<float>(node));
+        UNREACHABLE();
+        break;
       default:
         break;
     }
@@ -542,19 +555,11 @@
   // Eagerly fold representation changes for constants.
   switch (node->opcode()) {
     case IrOpcode::kInt32Constant:
-      return node;  // No change necessary.
-    case IrOpcode::kFloat32Constant: {
-      float const fv = OpParameter<float>(node);
-      if (use_info.type_check() == TypeCheckKind::kNone ||
-          ((use_info.type_check() == TypeCheckKind::kSignedSmall ||
-            use_info.type_check() == TypeCheckKind::kSigned32) &&
-           IsInt32Double(fv))) {
-        return MakeTruncatedInt32Constant(fv);
-      }
+    case IrOpcode::kFloat32Constant:
+    case IrOpcode::kFloat64Constant:
+      UNREACHABLE();
       break;
-    }
-    case IrOpcode::kNumberConstant:
-    case IrOpcode::kFloat64Constant: {
+    case IrOpcode::kNumberConstant: {
       double const fv = OpParameter<double>(node);
       if (use_info.type_check() == TypeCheckKind::kNone ||
           ((use_info.type_check() == TypeCheckKind::kSignedSmall ||
@@ -587,7 +592,7 @@
                use_info.type_check() == TypeCheckKind::kSigned32) {
       op = simplified()->CheckedFloat64ToInt32(
           output_type->Maybe(Type::MinusZero())
-              ? CheckForMinusZeroMode::kCheckForMinusZero
+              ? use_info.minus_zero_check()
               : CheckForMinusZeroMode::kDontCheckForMinusZero);
     }
   } else if (output_rep == MachineRepresentation::kFloat32) {
@@ -706,7 +711,7 @@
     }
   } else if (output_rep == MachineRepresentation::kTaggedSigned) {
     node = jsgraph()->graph()->NewNode(machine()->WordEqual(), node,
-                                       jsgraph()->ZeroConstant());
+                                       jsgraph()->IntPtrConstant(0));
     return jsgraph()->graph()->NewNode(machine()->Word32Equal(), node,
                                        jsgraph()->Int32Constant(0));
   } else if (IsWord(output_rep)) {
@@ -983,6 +988,10 @@
   return jsgraph()->graph()->NewNode(machine()->ChangeFloat64ToInt32(), node);
 }
 
+Node* RepresentationChanger::InsertChangeInt32ToFloat64(Node* node) {
+  return jsgraph()->graph()->NewNode(machine()->ChangeInt32ToFloat64(), node);
+}
+
 Node* RepresentationChanger::InsertChangeTaggedSignedToInt32(Node* node) {
   return jsgraph()->graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(),
                                      node);
@@ -993,6 +1002,10 @@
                                      node);
 }
 
+Node* RepresentationChanger::InsertChangeUint32ToFloat64(Node* node) {
+  return jsgraph()->graph()->NewNode(machine()->ChangeUint32ToFloat64(), node);
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/representation-change.h b/src/compiler/representation-change.h
index f27108e..d7895da 100644
--- a/src/compiler/representation-change.h
+++ b/src/compiler/representation-change.h
@@ -19,7 +19,6 @@
   static Truncation Bool() { return Truncation(TruncationKind::kBool); }
   static Truncation Word32() { return Truncation(TruncationKind::kWord32); }
   static Truncation Word64() { return Truncation(TruncationKind::kWord64); }
-  static Truncation Float32() { return Truncation(TruncationKind::kFloat32); }
   static Truncation Float64() { return Truncation(TruncationKind::kFloat64); }
   static Truncation Any() { return Truncation(TruncationKind::kAny); }
 
@@ -63,7 +62,6 @@
     kBool,
     kWord32,
     kWord64,
-    kFloat32,
     kFloat64,
     kAny
   };
@@ -82,7 +80,8 @@
   kSignedSmall,
   kSigned32,
   kNumber,
-  kNumberOrOddball
+  kNumberOrOddball,
+  kHeapObject
 };
 
 inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
@@ -97,6 +96,8 @@
       return os << "Number";
     case TypeCheckKind::kNumberOrOddball:
       return os << "NumberOrOddball";
+    case TypeCheckKind::kHeapObject:
+      return os << "HeapObject";
   }
   UNREACHABLE();
   return os;
@@ -108,7 +109,8 @@
 //
 // 1. During propagation, the use info is used to inform the input node
 //    about what part of the input is used (we call this truncation) and what
-//    is the preferred representation.
+//    is the preferred representation. For conversions that will require
+//    checks, we also keep track of whether a minus zero check is needed.
 //
 // 2. During lowering, the use info is used to properly convert the input
 //    to the preferred representation. The preferred representation might be
@@ -117,10 +119,13 @@
 class UseInfo {
  public:
   UseInfo(MachineRepresentation representation, Truncation truncation,
-          TypeCheckKind type_check = TypeCheckKind::kNone)
+          TypeCheckKind type_check = TypeCheckKind::kNone,
+          CheckForMinusZeroMode minus_zero_check =
+              CheckForMinusZeroMode::kCheckForMinusZero)
       : representation_(representation),
         truncation_(truncation),
-        type_check_(type_check) {}
+        type_check_(type_check),
+        minus_zero_check_(minus_zero_check) {}
   static UseInfo TruncatingWord32() {
     return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
   }
@@ -130,8 +135,8 @@
   static UseInfo Bool() {
     return UseInfo(MachineRepresentation::kBit, Truncation::Bool());
   }
-  static UseInfo TruncatingFloat32() {
-    return UseInfo(MachineRepresentation::kFloat32, Truncation::Float32());
+  static UseInfo Float32() {
+    return UseInfo(MachineRepresentation::kFloat32, Truncation::Any());
   }
   static UseInfo TruncatingFloat64() {
     return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64());
@@ -150,17 +155,25 @@
   }
 
   // Possibly deoptimizing conversions.
+  static UseInfo CheckedHeapObjectAsTaggedPointer() {
+    return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(),
+                   TypeCheckKind::kHeapObject);
+  }
   static UseInfo CheckedSignedSmallAsTaggedSigned() {
     return UseInfo(MachineRepresentation::kTaggedSigned, Truncation::Any(),
                    TypeCheckKind::kSignedSmall);
   }
-  static UseInfo CheckedSignedSmallAsWord32() {
+  static UseInfo CheckedSignedSmallAsWord32(
+      CheckForMinusZeroMode minus_zero_mode =
+          CheckForMinusZeroMode::kCheckForMinusZero) {
     return UseInfo(MachineRepresentation::kWord32, Truncation::Any(),
-                   TypeCheckKind::kSignedSmall);
+                   TypeCheckKind::kSignedSmall, minus_zero_mode);
   }
-  static UseInfo CheckedSigned32AsWord32() {
+  static UseInfo CheckedSigned32AsWord32(
+      CheckForMinusZeroMode minus_zero_mode =
+          CheckForMinusZeroMode::kCheckForMinusZero) {
     return UseInfo(MachineRepresentation::kWord32, Truncation::Any(),
-                   TypeCheckKind::kSigned32);
+                   TypeCheckKind::kSigned32, minus_zero_mode);
   }
   static UseInfo CheckedNumberAsFloat64() {
     return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64(),
@@ -195,11 +208,14 @@
   MachineRepresentation representation() const { return representation_; }
   Truncation truncation() const { return truncation_; }
   TypeCheckKind type_check() const { return type_check_; }
+  CheckForMinusZeroMode minus_zero_check() const { return minus_zero_check_; }
 
  private:
   MachineRepresentation representation_;
   Truncation truncation_;
   TypeCheckKind type_check_;
+  // TODO(jarin) Integrate with truncations.
+  CheckForMinusZeroMode minus_zero_check_;
 };
 
 // Contains logic related to changing the representation of values for constants
@@ -251,7 +267,8 @@
                                          UseInfo use_info);
   Node* GetTaggedPointerRepresentationFor(Node* node,
                                           MachineRepresentation output_rep,
-                                          Type* output_type);
+                                          Type* output_type, Node* use_node,
+                                          UseInfo use_info);
   Node* GetTaggedRepresentationFor(Node* node, MachineRepresentation output_rep,
                                    Type* output_type, Truncation truncation);
   Node* GetFloat32RepresentationFor(Node* node,
@@ -275,8 +292,10 @@
   Node* InsertChangeFloat32ToFloat64(Node* node);
   Node* InsertChangeFloat64ToInt32(Node* node);
   Node* InsertChangeFloat64ToUint32(Node* node);
+  Node* InsertChangeInt32ToFloat64(Node* node);
   Node* InsertChangeTaggedSignedToInt32(Node* node);
   Node* InsertChangeTaggedToFloat64(Node* node);
+  Node* InsertChangeUint32ToFloat64(Node* node);
 
   Node* InsertConversion(Node* node, const Operator* op, Node* use_node);
 
diff --git a/src/compiler/s390/code-generator-s390.cc b/src/compiler/s390/code-generator-s390.cc
index 284c3fc..5dcc82f 100644
--- a/src/compiler/s390/code-generator-s390.cc
+++ b/src/compiler/s390/code-generator-s390.cc
@@ -924,8 +924,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchTailCallJSFunctionFromJSFunction:
-    case kArchTailCallJSFunction: {
+    case kArchTailCallJSFunctionFromJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
         // Check the function's context matches the context argument.
@@ -934,11 +933,9 @@
         __ CmpP(cp, kScratchReg);
         __ Assert(eq, kWrongFunctionContext);
       }
-      if (opcode == kArchTailCallJSFunctionFromJSFunction) {
-        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
-                                         i.TempRegister(0), i.TempRegister(1),
-                                         i.TempRegister(2));
-      }
+      AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                       i.TempRegister(0), i.TempRegister(1),
+                                       i.TempRegister(2));
       __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(ip);
       frame_access_state()->ClearSPDelta();
@@ -995,7 +992,7 @@
       break;
     }
     case kArchRet:
-      AssembleReturn();
+      AssembleReturn(instr->InputAt(0));
       break;
     case kArchStackPointer:
       __ LoadRR(i.OutputRegister(), sp);
@@ -1159,39 +1156,46 @@
       __ lr(i.OutputRegister(0), r1);
       __ srag(i.OutputRegister(1), r1, Operand(32));
       break;
-    case kS390_ShiftLeftPair:
+    case kS390_ShiftLeftPair: {
+      Register second_output =
+          instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
       if (instr->InputAt(2)->IsImmediate()) {
-        __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
-                         i.InputRegister(0), i.InputRegister(1),
-                         i.InputInt32(2));
+        __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
+                         i.InputRegister(1), i.InputInt32(2));
       } else {
-        __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
-                         i.InputRegister(0), i.InputRegister(1), kScratchReg,
-                         i.InputRegister(2));
+        __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
+                         i.InputRegister(1), kScratchReg, i.InputRegister(2));
       }
       break;
-    case kS390_ShiftRightPair:
+    }
+    case kS390_ShiftRightPair: {
+      Register second_output =
+          instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
       if (instr->InputAt(2)->IsImmediate()) {
-        __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+        __ ShiftRightPair(i.OutputRegister(0), second_output,
                           i.InputRegister(0), i.InputRegister(1),
                           i.InputInt32(2));
       } else {
-        __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+        __ ShiftRightPair(i.OutputRegister(0), second_output,
                           i.InputRegister(0), i.InputRegister(1), kScratchReg,
                           i.InputRegister(2));
       }
       break;
-    case kS390_ShiftRightArithPair:
+    }
+    case kS390_ShiftRightArithPair: {
+      Register second_output =
+          instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
       if (instr->InputAt(2)->IsImmediate()) {
-        __ ShiftRightArithPair(i.OutputRegister(0), i.OutputRegister(1),
+        __ ShiftRightArithPair(i.OutputRegister(0), second_output,
                                i.InputRegister(0), i.InputRegister(1),
                                i.InputInt32(2));
       } else {
-        __ ShiftRightArithPair(i.OutputRegister(0), i.OutputRegister(1),
+        __ ShiftRightArithPair(i.OutputRegister(0), second_output,
                                i.InputRegister(0), i.InputRegister(1),
                                kScratchReg, i.InputRegister(2));
       }
       break;
+    }
 #endif
     case kS390_RotRight32:
       if (HasRegisterInput(instr, 1)) {
@@ -1240,7 +1244,21 @@
       break;
 #if V8_TARGET_ARCH_S390X
     case kS390_RotLeftAndClear64:
-      UNIMPLEMENTED();  // Find correct instruction
+      if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+        int shiftAmount = i.InputInt32(1);
+        int endBit = 63 - shiftAmount;
+        int startBit = 63 - i.InputInt32(2);
+        __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
+                 Operand(endBit), Operand(shiftAmount), true);
+      } else {
+        int shiftAmount = i.InputInt32(1);
+        int clearBit = 63 - i.InputInt32(2);
+        __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
+        __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
+        __ srlg(i.OutputRegister(), i.OutputRegister(),
+                Operand(clearBit + shiftAmount));
+        __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(shiftAmount));
+      }
       break;
     case kS390_RotLeftAndClearLeft64:
       if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
@@ -2202,7 +2220,7 @@
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   DeoptimizeReason deoptimization_reason =
       GetDeoptimizationReason(deoptimization_id);
-  __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -2238,6 +2256,9 @@
       __ LoadRR(fp, sp);
     } else if (descriptor->IsJSFunctionCall()) {
       __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
+      if (descriptor->PushArgumentCount()) {
+        __ Push(kJavaScriptCallArgCountRegister);
+      }
     } else {
       StackFrame::Type type = info()->GetOutputStackFrameType();
       // TODO(mbrandy): Detect cases where ip is the entrypoint (for
@@ -2246,7 +2267,8 @@
     }
   }
 
-  int shrink_slots = frame()->GetSpillSlotCount();
+  int shrink_slots =
+      frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
     __ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -2280,7 +2302,7 @@
   }
 }
 
-void CodeGenerator::AssembleReturn() {
+void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   int pop_count = static_cast<int>(descriptor->StackParameterCount());
 
@@ -2296,19 +2318,32 @@
     __ MultiPopDoubles(double_saves);
   }
 
+  S390OperandConverter g(this, nullptr);
   if (descriptor->IsCFunctionCall()) {
     AssembleDeconstructFrame();
   } else if (frame_access_state()->has_frame()) {
-    // Canonicalize JSFunction return sites for now.
-    if (return_label_.is_bound()) {
-      __ b(&return_label_);
-      return;
+    // Canonicalize JSFunction return sites for now unless they have an variable
+    // number of stack slot pops
+    if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+      if (return_label_.is_bound()) {
+        __ b(&return_label_);
+        return;
+      } else {
+        __ bind(&return_label_);
+        AssembleDeconstructFrame();
+      }
     } else {
-      __ bind(&return_label_);
       AssembleDeconstructFrame();
     }
   }
-  __ Ret(pop_count);
+  if (pop->IsImmediate()) {
+    DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+    pop_count += g.ToConstant(pop).ToInt32();
+  } else {
+    __ Drop(g.ToRegister(pop));
+  }
+  __ Drop(pop_count);
+  __ Ret();
 }
 
 void CodeGenerator::AssembleMove(InstructionOperand* source,
diff --git a/src/compiler/s390/instruction-selector-s390.cc b/src/compiler/s390/instruction-selector-s390.cc
index f1aa332..eed08a9 100644
--- a/src/compiler/s390/instruction-selector-s390.cc
+++ b/src/compiler/s390/instruction-selector-s390.cc
@@ -343,7 +343,7 @@
   MachineRepresentation rep = store_rep.representation();
 
   if (write_barrier_kind != kNoWriteBarrier) {
-    DCHECK_EQ(MachineRepresentation::kTagged, rep);
+    DCHECK(CanBeTaggedPointer(rep));
     AddressingMode addressing_mode;
     InstructionOperand inputs[3];
     size_t input_count = 0;
@@ -835,48 +835,69 @@
 }
 
 #if !V8_TARGET_ARCH_S390X
-void VisitPairBinop(InstructionSelector* selector, ArchOpcode opcode,
-                    Node* node) {
+void VisitPairBinop(InstructionSelector* selector, InstructionCode opcode,
+                    InstructionCode opcode2, Node* node) {
   S390OperandGenerator g(selector);
 
-  // We use UseUniqueRegister here to avoid register sharing with the output
-  // registers.
-  InstructionOperand inputs[] = {
-      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
-      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
+  if (projection1) {
+    // We use UseUniqueRegister here to avoid register sharing with the output
+    // registers.
+    InstructionOperand inputs[] = {
+        g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+        g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
 
-  InstructionOperand outputs[] = {
-      g.DefineAsRegister(node),
-      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+    InstructionOperand outputs[] = {
+        g.DefineAsRegister(node),
+        g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
 
-  selector->Emit(opcode, 2, outputs, 4, inputs);
+    selector->Emit(opcode, 2, outputs, 4, inputs);
+  } else {
+    // The high word of the result is not used, so we emit the standard 32 bit
+    // instruction.
+    selector->Emit(opcode2, g.DefineSameAsFirst(node),
+                   g.UseRegister(node->InputAt(0)),
+                   g.UseRegister(node->InputAt(2)));
+  }
 }
 
 void InstructionSelector::VisitInt32PairAdd(Node* node) {
-  VisitPairBinop(this, kS390_AddPair, node);
+  VisitPairBinop(this, kS390_AddPair, kS390_Add32, node);
 }
 
 void InstructionSelector::VisitInt32PairSub(Node* node) {
-  VisitPairBinop(this, kS390_SubPair, node);
+  VisitPairBinop(this, kS390_SubPair, kS390_Sub32, node);
 }
 
 void InstructionSelector::VisitInt32PairMul(Node* node) {
   S390OperandGenerator g(this);
-  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
-                                 g.UseUniqueRegister(node->InputAt(1)),
-                                 g.UseUniqueRegister(node->InputAt(2)),
-                                 g.UseUniqueRegister(node->InputAt(3))};
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
+  if (projection1) {
+    InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+                                   g.UseUniqueRegister(node->InputAt(1)),
+                                   g.UseUniqueRegister(node->InputAt(2)),
+                                   g.UseUniqueRegister(node->InputAt(3))};
 
-  InstructionOperand outputs[] = {
-      g.DefineAsRegister(node),
-      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+    InstructionOperand outputs[] = {
+        g.DefineAsRegister(node),
+        g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
 
-  Emit(kS390_MulPair, 2, outputs, 4, inputs);
+    Emit(kS390_MulPair, 2, outputs, 4, inputs);
+  } else {
+    // The high word of the result is not used, so we emit the standard 32 bit
+    // instruction.
+    Emit(kS390_Mul32, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(2)));
+  }
 }
 
-void VisitPairShift(InstructionSelector* selector, ArchOpcode opcode,
+namespace {
+// Shared routine for multiple shift operations.
+void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
                     Node* node) {
   S390OperandGenerator g(selector);
+  // We use g.UseUniqueRegister here to guarantee that there is
+  // no register aliasing of input registers with output registers.
   Int32Matcher m(node->InputAt(2));
   InstructionOperand shift_operand;
   if (m.HasValue()) {
@@ -885,16 +906,27 @@
     shift_operand = g.UseUniqueRegister(m.node());
   }
 
-  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
-                                 g.UseRegister(node->InputAt(1)),
+  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+                                 g.UseUniqueRegister(node->InputAt(1)),
                                  shift_operand};
 
-  InstructionOperand outputs[] = {
-      g.DefineSameAsFirst(node),
-      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
 
-  selector->Emit(opcode, 2, outputs, 3, inputs);
+  InstructionOperand outputs[2];
+  InstructionOperand temps[1];
+  int32_t output_count = 0;
+  int32_t temp_count = 0;
+
+  outputs[output_count++] = g.DefineAsRegister(node);
+  if (projection1) {
+    outputs[output_count++] = g.DefineAsRegister(projection1);
+  } else {
+    temps[temp_count++] = g.TempRegister();
+  }
+
+  selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
 }
+}  // namespace
 
 void InstructionSelector::VisitWord32PairShl(Node* node) {
   VisitPairShift(this, kS390_ShiftLeftPair, node);
@@ -1575,21 +1607,22 @@
 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
                           Node* value, InstructionCode opcode,
                           FlagsContinuation* cont) {
-  while (selector->CanCover(user, value)) {
+  // Try to combine with comparisons against 0 by simply inverting the branch.
+  while (value->opcode() == IrOpcode::kWord32Equal &&
+         selector->CanCover(user, value)) {
+    Int32BinopMatcher m(value);
+    if (!m.right().Is(0)) break;
+
+    user = value;
+    value = m.left().node();
+    cont->Negate();
+  }
+
+  if (selector->CanCover(user, value)) {
     switch (value->opcode()) {
-      case IrOpcode::kWord32Equal: {
-        // Combine with comparisons against 0 by simply inverting the
-        // continuation.
-        Int32BinopMatcher m(value);
-        if (m.right().Is(0)) {
-          user = value;
-          value = m.left().node();
-          cont->Negate();
-          continue;
-        }
+      case IrOpcode::kWord32Equal:
         cont->OverwriteAndNegateIfEqual(kEqual);
         return VisitWord32Compare(selector, value, cont);
-      }
       case IrOpcode::kInt32LessThan:
         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
         return VisitWord32Compare(selector, value, cont);
@@ -1708,7 +1741,6 @@
       default:
         break;
     }
-    break;
   }
 
   // Branch could not be combined with a compare, emit compare against 0.
diff --git a/src/compiler/schedule.cc b/src/compiler/schedule.cc
index 6bd1a17..eb3dda8 100644
--- a/src/compiler/schedule.cc
+++ b/src/compiler/schedule.cc
@@ -344,7 +344,7 @@
       split_edge_block->set_control(BasicBlock::kGoto);
       split_edge_block->successors().push_back(block);
       split_edge_block->predecessors().push_back(pred);
-      split_edge_block->set_deferred(pred->deferred());
+      split_edge_block->set_deferred(block->deferred());
       *current_pred = split_edge_block;
       // Find a corresponding successor in the previous block, replace it
       // with the split edge block... but only do it once, since we only
diff --git a/src/compiler/schedule.h b/src/compiler/schedule.h
index 4fc0d0a..3f9750c 100644
--- a/src/compiler/schedule.h
+++ b/src/compiler/schedule.h
@@ -7,6 +7,8 @@
 
 #include <iosfwd>
 
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
 #include "src/zone/zone-containers.h"
 
 namespace v8 {
@@ -26,7 +28,8 @@
 // A basic block contains an ordered list of nodes and ends with a control
 // node. Note that if a basic block has phis, then all phis must appear as the
 // first nodes in the block.
-class BasicBlock final : public ZoneObject {
+class V8_EXPORT_PRIVATE BasicBlock final
+    : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   // Possible control nodes that can end a block.
   enum Control {
@@ -177,7 +180,7 @@
 // and ordering them within basic blocks. Prior to computing a schedule,
 // a graph has no notion of control flow ordering other than that induced
 // by the graph's dependencies. A schedule is required to generate code.
-class Schedule final : public ZoneObject {
+class V8_EXPORT_PRIVATE Schedule final : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   explicit Schedule(Zone* zone, size_t node_count_hint = 0);
 
@@ -282,7 +285,7 @@
   DISALLOW_COPY_AND_ASSIGN(Schedule);
 };
 
-std::ostream& operator<<(std::ostream&, const Schedule&);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, const Schedule&);
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/scheduler.h b/src/compiler/scheduler.h
index 416ba5c..1a08e4c 100644
--- a/src/compiler/scheduler.h
+++ b/src/compiler/scheduler.h
@@ -9,7 +9,8 @@
 #include "src/compiler/node.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/schedule.h"
-#include "src/compiler/zone-pool.h"
+#include "src/compiler/zone-stats.h"
+#include "src/globals.h"
 #include "src/zone/zone-containers.h"
 
 namespace v8 {
@@ -25,7 +26,7 @@
 
 // Computes a schedule from a graph, placing nodes into basic blocks and
 // ordering the basic blocks in the special RPO order.
-class Scheduler {
+class V8_EXPORT_PRIVATE Scheduler {
  public:
   // Flags that control the mode of operation.
   enum Flag { kNoFlags = 0u, kSplitNodes = 1u << 1 };
diff --git a/src/compiler/simd-scalar-lowering.cc b/src/compiler/simd-scalar-lowering.cc
new file mode 100644
index 0000000..c5a94b4
--- /dev/null
+++ b/src/compiler/simd-scalar-lowering.cc
@@ -0,0 +1,410 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/simd-scalar-lowering.h"
+#include "src/compiler/diamond.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+
+#include "src/compiler/node.h"
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+SimdScalarLowering::SimdScalarLowering(
+    Graph* graph, MachineOperatorBuilder* machine,
+    CommonOperatorBuilder* common, Zone* zone,
+    Signature<MachineRepresentation>* signature)
+    : zone_(zone),
+      graph_(graph),
+      machine_(machine),
+      common_(common),
+      state_(graph, 3),
+      stack_(zone),
+      replacements_(nullptr),
+      signature_(signature),
+      placeholder_(
+          graph->NewNode(common->Parameter(-2, "placeholder"), graph->start())),
+      parameter_count_after_lowering_(-1) {
+  DCHECK_NOT_NULL(graph);
+  DCHECK_NOT_NULL(graph->end());
+  replacements_ = zone->NewArray<Replacement>(graph->NodeCount());
+  memset(replacements_, 0, sizeof(Replacement) * graph->NodeCount());
+}
+
+void SimdScalarLowering::LowerGraph() {
+  stack_.push_back({graph()->end(), 0});
+  state_.Set(graph()->end(), State::kOnStack);
+  replacements_[graph()->end()->id()].type = SimdType::kInt32;
+
+  while (!stack_.empty()) {
+    NodeState& top = stack_.back();
+    if (top.input_index == top.node->InputCount()) {
+      // All inputs of top have already been lowered, now lower top.
+      stack_.pop_back();
+      state_.Set(top.node, State::kVisited);
+      LowerNode(top.node);
+    } else {
+      // Push the next input onto the stack.
+      Node* input = top.node->InputAt(top.input_index++);
+      if (state_.Get(input) == State::kUnvisited) {
+        SetLoweredType(input, top.node);
+        if (input->opcode() == IrOpcode::kPhi) {
+          // To break cycles with phi nodes we push phis on a separate stack so
+          // that they are processed after all other nodes.
+          PreparePhiReplacement(input);
+          stack_.push_front({input, 0});
+        } else {
+          stack_.push_back({input, 0});
+        }
+        state_.Set(input, State::kOnStack);
+      }
+    }
+  }
+}
+
+#define FOREACH_INT32X4_OPCODE(V) \
+  V(Int32x4Add)                   \
+  V(Int32x4ExtractLane)           \
+  V(CreateInt32x4)
+
+#define FOREACH_FLOAT32X4_OPCODE(V) \
+  V(Float32x4Add)                   \
+  V(Float32x4ExtractLane)           \
+  V(CreateFloat32x4)
+
+void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
+  switch (node->opcode()) {
+#define CASE_STMT(name) case IrOpcode::k##name:
+    FOREACH_INT32X4_OPCODE(CASE_STMT)
+    case IrOpcode::kReturn:
+    case IrOpcode::kParameter:
+    case IrOpcode::kCall: {
+      replacements_[node->id()].type = SimdType::kInt32;
+      break;
+    }
+      FOREACH_FLOAT32X4_OPCODE(CASE_STMT) {
+        replacements_[node->id()].type = SimdType::kFloat32;
+        break;
+      }
+#undef CASE_STMT
+    default:
+      replacements_[node->id()].type = replacements_[output->id()].type;
+  }
+}
+
+static int GetParameterIndexAfterLowering(
+    Signature<MachineRepresentation>* signature, int old_index) {
+  // In function calls, the simd128 types are passed as 4 Int32 types. The
+  // parameters are typecast to the types as needed for various operations.
+  int result = old_index;
+  for (int i = 0; i < old_index; i++) {
+    if (signature->GetParam(i) == MachineRepresentation::kSimd128) {
+      result += 3;
+    }
+  }
+  return result;
+}
+
+int SimdScalarLowering::GetParameterCountAfterLowering() {
+  if (parameter_count_after_lowering_ == -1) {
+    // GetParameterIndexAfterLowering(parameter_count) returns the parameter
+    // count after lowering.
+    parameter_count_after_lowering_ = GetParameterIndexAfterLowering(
+        signature(), static_cast<int>(signature()->parameter_count()));
+  }
+  return parameter_count_after_lowering_;
+}
+
+static int GetReturnCountAfterLowering(
+    Signature<MachineRepresentation>* signature) {
+  int result = static_cast<int>(signature->return_count());
+  for (int i = 0; i < static_cast<int>(signature->return_count()); i++) {
+    if (signature->GetReturn(i) == MachineRepresentation::kSimd128) {
+      result += 3;
+    }
+  }
+  return result;
+}
+
+void SimdScalarLowering::LowerNode(Node* node) {
+  SimdType rep_type = ReplacementType(node);
+  switch (node->opcode()) {
+    case IrOpcode::kStart: {
+      int parameter_count = GetParameterCountAfterLowering();
+      // Only exchange the node if the parameter count actually changed.
+      if (parameter_count != static_cast<int>(signature()->parameter_count())) {
+        int delta =
+            parameter_count - static_cast<int>(signature()->parameter_count());
+        int new_output_count = node->op()->ValueOutputCount() + delta;
+        NodeProperties::ChangeOp(node, common()->Start(new_output_count));
+      }
+      break;
+    }
+    case IrOpcode::kParameter: {
+      DCHECK(node->InputCount() == 1);
+      // Only exchange the node if the parameter count actually changed. We do
+      // not even have to do the default lowering because the the start node,
+      // the only input of a parameter node, only changes if the parameter count
+      // changes.
+      if (GetParameterCountAfterLowering() !=
+          static_cast<int>(signature()->parameter_count())) {
+        int old_index = ParameterIndexOf(node->op());
+        int new_index = GetParameterIndexAfterLowering(signature(), old_index);
+        if (old_index == new_index) {
+          NodeProperties::ChangeOp(node, common()->Parameter(new_index));
+
+          Node* new_node[kMaxLanes];
+          for (int i = 0; i < kMaxLanes; i++) {
+            new_node[i] = nullptr;
+          }
+          new_node[0] = node;
+          if (signature()->GetParam(old_index) ==
+              MachineRepresentation::kSimd128) {
+            for (int i = 1; i < kMaxLanes; i++) {
+              new_node[i] = graph()->NewNode(common()->Parameter(new_index + i),
+                                             graph()->start());
+            }
+          }
+          ReplaceNode(node, new_node);
+        }
+      }
+      break;
+    }
+    case IrOpcode::kReturn: {
+      DefaultLowering(node);
+      int new_return_count = GetReturnCountAfterLowering(signature());
+      if (static_cast<int>(signature()->return_count()) != new_return_count) {
+        NodeProperties::ChangeOp(node, common()->Return(new_return_count));
+      }
+      break;
+    }
+    case IrOpcode::kCall: {
+      // TODO(turbofan): Make WASM code const-correct wrt. CallDescriptor.
+      CallDescriptor* descriptor =
+          const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
+      if (DefaultLowering(node) ||
+          (descriptor->ReturnCount() == 1 &&
+           descriptor->GetReturnType(0) == MachineType::Simd128())) {
+        // We have to adjust the call descriptor.
+        const Operator* op =
+            common()->Call(wasm::ModuleEnv::GetI32WasmCallDescriptorForSimd(
+                zone(), descriptor));
+        NodeProperties::ChangeOp(node, op);
+      }
+      if (descriptor->ReturnCount() == 1 &&
+          descriptor->GetReturnType(0) == MachineType::Simd128()) {
+        // We access the additional return values through projections.
+        Node* rep_node[kMaxLanes];
+        for (int i = 0; i < kMaxLanes; i++) {
+          rep_node[i] =
+              graph()->NewNode(common()->Projection(i), node, graph()->start());
+        }
+        ReplaceNode(node, rep_node);
+      }
+      break;
+    }
+    case IrOpcode::kPhi: {
+      MachineRepresentation rep = PhiRepresentationOf(node->op());
+      if (rep == MachineRepresentation::kSimd128) {
+        // The replacement nodes have already been created, we only have to
+        // replace placeholder nodes.
+        Node** rep_node = GetReplacements(node);
+        for (int i = 0; i < node->op()->ValueInputCount(); i++) {
+          Node** rep_input =
+              GetReplacementsWithType(node->InputAt(i), rep_type);
+          for (int j = 0; j < kMaxLanes; j++) {
+            rep_node[j]->ReplaceInput(i, rep_input[j]);
+          }
+        }
+      } else {
+        DefaultLowering(node);
+      }
+      break;
+    }
+
+    case IrOpcode::kInt32x4Add: {
+      DCHECK(node->InputCount() == 2);
+      Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
+      Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
+      Node* rep_node[kMaxLanes];
+      for (int i = 0; i < kMaxLanes; i++) {
+        rep_node[i] =
+            graph()->NewNode(machine()->Int32Add(), rep_left[i], rep_right[i]);
+      }
+      ReplaceNode(node, rep_node);
+      break;
+    }
+
+    case IrOpcode::kCreateInt32x4: {
+      Node* rep_node[kMaxLanes];
+      for (int i = 0; i < kMaxLanes; i++) {
+        DCHECK(!HasReplacement(1, node->InputAt(i)));
+        rep_node[i] = node->InputAt(i);
+      }
+      ReplaceNode(node, rep_node);
+      break;
+    }
+
+    case IrOpcode::kInt32x4ExtractLane: {
+      Node* laneNode = node->InputAt(1);
+      DCHECK_EQ(laneNode->opcode(), IrOpcode::kInt32Constant);
+      int32_t lane = OpParameter<int32_t>(laneNode);
+      Node* rep_node[kMaxLanes] = {
+          GetReplacementsWithType(node->InputAt(0), rep_type)[lane], nullptr,
+          nullptr, nullptr};
+      ReplaceNode(node, rep_node);
+      break;
+    }
+
+    case IrOpcode::kFloat32x4Add: {
+      DCHECK(node->InputCount() == 2);
+      Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
+      Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
+      Node* rep_node[kMaxLanes];
+      for (int i = 0; i < kMaxLanes; i++) {
+        rep_node[i] = graph()->NewNode(machine()->Float32Add(), rep_left[i],
+                                       rep_right[i]);
+      }
+      ReplaceNode(node, rep_node);
+      break;
+    }
+
+    case IrOpcode::kCreateFloat32x4: {
+      Node* rep_node[kMaxLanes];
+      for (int i = 0; i < kMaxLanes; i++) {
+        DCHECK(!HasReplacement(1, node->InputAt(i)));
+        rep_node[i] = node->InputAt(i);
+      }
+      ReplaceNode(node, rep_node);
+      break;
+    }
+
+    case IrOpcode::kFloat32x4ExtractLane: {
+      Node* laneNode = node->InputAt(1);
+      DCHECK_EQ(laneNode->opcode(), IrOpcode::kInt32Constant);
+      int32_t lane = OpParameter<int32_t>(laneNode);
+      Node* rep_node[kMaxLanes] = {
+          GetReplacementsWithType(node->InputAt(0), rep_type)[lane], nullptr,
+          nullptr, nullptr};
+      ReplaceNode(node, rep_node);
+      break;
+    }
+
+    default: { DefaultLowering(node); }
+  }
+}
+
+bool SimdScalarLowering::DefaultLowering(Node* node) {
+  bool something_changed = false;
+  for (int i = NodeProperties::PastValueIndex(node) - 1; i >= 0; i--) {
+    Node* input = node->InputAt(i);
+    if (HasReplacement(0, input)) {
+      something_changed = true;
+      node->ReplaceInput(i, GetReplacements(input)[0]);
+    }
+    if (HasReplacement(1, input)) {
+      something_changed = true;
+      for (int j = 1; j < kMaxLanes; j++) {
+        node->InsertInput(zone(), i + j, GetReplacements(input)[j]);
+      }
+    }
+  }
+  return something_changed;
+}
+
+void SimdScalarLowering::ReplaceNode(Node* old, Node** new_node) {
+  // if new_low == nullptr, then also new_high == nullptr.
+  DCHECK(new_node[0] != nullptr ||
+         (new_node[1] == nullptr && new_node[2] == nullptr &&
+          new_node[3] == nullptr));
+  for (int i = 0; i < kMaxLanes; i++) {
+    replacements_[old->id()].node[i] = new_node[i];
+  }
+}
+
+bool SimdScalarLowering::HasReplacement(size_t index, Node* node) {
+  return replacements_[node->id()].node[index] != nullptr;
+}
+
+SimdScalarLowering::SimdType SimdScalarLowering::ReplacementType(Node* node) {
+  return replacements_[node->id()].type;
+}
+
+Node** SimdScalarLowering::GetReplacements(Node* node) {
+  Node** result = replacements_[node->id()].node;
+  DCHECK(result);
+  return result;
+}
+
+Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
+  Node** replacements = GetReplacements(node);
+  if (ReplacementType(node) == type) {
+    return GetReplacements(node);
+  }
+  Node** result = zone()->NewArray<Node*>(kMaxLanes);
+  if (ReplacementType(node) == SimdType::kInt32 && type == SimdType::kFloat32) {
+    for (int i = 0; i < kMaxLanes; i++) {
+      if (replacements[i] != nullptr) {
+        result[i] = graph()->NewNode(machine()->BitcastInt32ToFloat32(),
+                                     replacements[i]);
+      } else {
+        result[i] = nullptr;
+      }
+    }
+  } else {
+    for (int i = 0; i < kMaxLanes; i++) {
+      if (replacements[i] != nullptr) {
+        result[i] = graph()->NewNode(machine()->BitcastFloat32ToInt32(),
+                                     replacements[i]);
+      } else {
+        result[i] = nullptr;
+      }
+    }
+  }
+  return result;
+}
+
+void SimdScalarLowering::PreparePhiReplacement(Node* phi) {
+  MachineRepresentation rep = PhiRepresentationOf(phi->op());
+  if (rep == MachineRepresentation::kSimd128) {
+    // We have to create the replacements for a phi node before we actually
+    // lower the phi to break potential cycles in the graph. The replacements of
+    // input nodes do not exist yet, so we use a placeholder node to pass the
+    // graph verifier.
+    int value_count = phi->op()->ValueInputCount();
+    SimdType type = ReplacementType(phi);
+    Node** inputs_rep[kMaxLanes];
+    for (int i = 0; i < kMaxLanes; i++) {
+      inputs_rep[i] = zone()->NewArray<Node*>(value_count + 1);
+      inputs_rep[i][value_count] = NodeProperties::GetControlInput(phi, 0);
+    }
+    for (int i = 0; i < value_count; i++) {
+      for (int j = 0; j < kMaxLanes; j++) {
+        inputs_rep[j][i] = placeholder_;
+      }
+    }
+    Node* rep_nodes[kMaxLanes];
+    for (int i = 0; i < kMaxLanes; i++) {
+      if (type == SimdType::kInt32) {
+        rep_nodes[i] = graph()->NewNode(
+            common()->Phi(MachineRepresentation::kWord32, value_count),
+            value_count + 1, inputs_rep[i], false);
+      } else if (type == SimdType::kFloat32) {
+        rep_nodes[i] = graph()->NewNode(
+            common()->Phi(MachineRepresentation::kFloat32, value_count),
+            value_count + 1, inputs_rep[i], false);
+      } else {
+        UNREACHABLE();
+      }
+    }
+    ReplaceNode(phi, rep_nodes);
+  }
+}
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/simd-scalar-lowering.h b/src/compiler/simd-scalar-lowering.h
new file mode 100644
index 0000000..39449f4
--- /dev/null
+++ b/src/compiler/simd-scalar-lowering.h
@@ -0,0 +1,78 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SIMD_SCALAR_LOWERING_H_
+#define V8_COMPILER_SIMD_SCALAR_LOWERING_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-marker.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SimdScalarLowering {
+ public:
+  SimdScalarLowering(Graph* graph, MachineOperatorBuilder* machine,
+                     CommonOperatorBuilder* common, Zone* zone,
+                     Signature<MachineRepresentation>* signature);
+
+  void LowerGraph();
+
+  int GetParameterCountAfterLowering();
+
+ private:
+  enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
+
+  enum class SimdType : uint8_t { kInt32, kFloat32 };
+
+  static const int kMaxLanes = 4;
+
+  struct Replacement {
+    Node* node[kMaxLanes];
+    SimdType type;  // represents what input type is expected
+  };
+
+  Zone* zone() const { return zone_; }
+  Graph* graph() const { return graph_; }
+  MachineOperatorBuilder* machine() const { return machine_; }
+  CommonOperatorBuilder* common() const { return common_; }
+  Signature<MachineRepresentation>* signature() const { return signature_; }
+
+  void LowerNode(Node* node);
+  bool DefaultLowering(Node* node);
+
+  void ReplaceNode(Node* old, Node** new_nodes);
+  bool HasReplacement(size_t index, Node* node);
+  Node** GetReplacements(Node* node);
+  Node** GetReplacementsWithType(Node* node, SimdType type);
+  SimdType ReplacementType(Node* node);
+  void PreparePhiReplacement(Node* phi);
+  void SetLoweredType(Node* node, Node* output);
+
+  struct NodeState {
+    Node* node;
+    int input_index;
+  };
+
+  Zone* zone_;
+  Graph* const graph_;
+  MachineOperatorBuilder* machine_;
+  CommonOperatorBuilder* common_;
+  NodeMarker<State> state_;
+  ZoneDeque<NodeState> stack_;
+  Replacement* replacements_;
+  Signature<MachineRepresentation>* signature_;
+  Node* placeholder_;
+  int parameter_count_after_lowering_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_SIMD_SCALAR_LOWERING_H_
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index 97aacd6..c90d743 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -11,6 +11,7 @@
 #include "src/code-factory.h"
 #include "src/compiler/access-builder.h"
 #include "src/compiler/common-operator.h"
+#include "src/compiler/compiler-source-position-table.h"
 #include "src/compiler/diamond.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/node-matchers.h"
@@ -19,7 +20,6 @@
 #include "src/compiler/operator-properties.h"
 #include "src/compiler/representation-change.h"
 #include "src/compiler/simplified-operator.h"
-#include "src/compiler/source-position.h"
 #include "src/compiler/type-cache.h"
 #include "src/conversions-inl.h"
 #include "src/objects.h"
@@ -87,12 +87,14 @@
   return MachineRepresentation::kNone;
 }
 
-UseInfo CheckedUseInfoAsWord32FromHint(NumberOperationHint hint) {
+UseInfo CheckedUseInfoAsWord32FromHint(
+    NumberOperationHint hint, CheckForMinusZeroMode minus_zero_mode =
+                                  CheckForMinusZeroMode::kCheckForMinusZero) {
   switch (hint) {
     case NumberOperationHint::kSignedSmall:
-      return UseInfo::CheckedSignedSmallAsWord32();
+      return UseInfo::CheckedSignedSmallAsWord32(minus_zero_mode);
     case NumberOperationHint::kSigned32:
-      return UseInfo::CheckedSigned32AsWord32();
+      return UseInfo::CheckedSigned32AsWord32(minus_zero_mode);
     case NumberOperationHint::kNumber:
       return UseInfo::CheckedNumberAsWord32();
     case NumberOperationHint::kNumberOrOddball:
@@ -127,7 +129,7 @@
     case MachineRepresentation::kFloat64:
       return UseInfo::TruncatingFloat64();
     case MachineRepresentation::kFloat32:
-      return UseInfo::TruncatingFloat32();
+      return UseInfo::Float32();
     case MachineRepresentation::kWord64:
       return UseInfo::TruncatingWord64();
     case MachineRepresentation::kWord8:
@@ -161,7 +163,8 @@
     } else if (NodeProperties::IsEffectEdge(edge)) {
       edge.UpdateTo(effect);
     } else {
-      DCHECK(NodeProperties::IsValueEdge(edge));
+      DCHECK(NodeProperties::IsValueEdge(edge) ||
+             NodeProperties::IsContextEdge(edge));
     }
   }
 }
@@ -746,6 +749,23 @@
     }
   }
 
+  void VisitReturn(Node* node) {
+    int tagged_limit = node->op()->ValueInputCount() +
+                       OperatorProperties::GetContextInputCount(node->op()) +
+                       OperatorProperties::GetFrameStateInputCount(node->op());
+    // Visit integer slot count to pop
+    ProcessInput(node, 0, UseInfo::TruncatingWord32());
+
+    // Visit value, context and frame state inputs as tagged.
+    for (int i = 1; i < tagged_limit; i++) {
+      ProcessInput(node, i, UseInfo::AnyTagged());
+    }
+    // Only enqueue other inputs (effects, control).
+    for (int i = tagged_limit; i < node->InputCount(); i++) {
+      EnqueueInput(node, i);
+    }
+  }
+
   // Helper for an unused node.
   void VisitUnused(Node* node) {
     int value_count = node->op()->ValueInputCount() +
@@ -838,26 +858,8 @@
       return MachineRepresentation::kTagged;
     } else if (type->Is(Type::Number())) {
       return MachineRepresentation::kFloat64;
-    } else if (type->Is(Type::Internal())) {
-      // We mark (u)int64 as Type::Internal.
-      // TODO(jarin) This is a workaround for our lack of (u)int64
-      // types. This can be removed once we can represent (u)int64
-      // unambiguously. (At the moment internal objects, such as the hole,
-      // are also Type::Internal()).
-      bool is_word64 = GetInfo(node->InputAt(0))->representation() ==
-                       MachineRepresentation::kWord64;
-#ifdef DEBUG
-      if (node->opcode() != IrOpcode::kTypeGuard) {
-        // Check that all the inputs agree on being Word64.
-        DCHECK_EQ(IrOpcode::kPhi, node->opcode());  // This only works for phis.
-        for (int i = 1; i < node->op()->ValueInputCount(); i++) {
-          DCHECK_EQ(is_word64, GetInfo(node->InputAt(i))->representation() ==
-                                   MachineRepresentation::kWord64);
-        }
-      }
-#endif
-      return is_word64 ? MachineRepresentation::kWord64
-                       : MachineRepresentation::kTagged;
+    } else if (type->Is(Type::ExternalPointer())) {
+      return MachineType::PointerRepresentation();
     }
     return MachineRepresentation::kTagged;
   }
@@ -992,6 +994,53 @@
     SetOutput(node, MachineRepresentation::kTagged);
   }
 
+  void VisitObjectState(Node* node) {
+    if (propagate()) {
+      for (int i = 0; i < node->InputCount(); i++) {
+        Node* input = node->InputAt(i);
+        Type* input_type = TypeOf(input);
+        // TODO(turbofan): Special treatment for ExternalPointer here,
+        // to avoid incompatible truncations. We really need a story
+        // for the JSFunction::entry field.
+        UseInfo use_info = input_type->Is(Type::ExternalPointer())
+                               ? UseInfo::PointerInt()
+                               : UseInfo::Any();
+        EnqueueInput(node, i, use_info);
+      }
+    } else if (lower()) {
+      Zone* zone = jsgraph_->zone();
+      ZoneVector<MachineType>* types =
+          new (zone->New(sizeof(ZoneVector<MachineType>)))
+              ZoneVector<MachineType>(node->InputCount(), zone);
+      for (int i = 0; i < node->InputCount(); i++) {
+        Node* input = node->InputAt(i);
+        NodeInfo* input_info = GetInfo(input);
+        Type* input_type = TypeOf(input);
+        // TODO(turbofan): Special treatment for ExternalPointer here,
+        // to avoid incompatible truncations. We really need a story
+        // for the JSFunction::entry field.
+        if (input_type->Is(Type::ExternalPointer())) {
+          (*types)[i] = MachineType::Pointer();
+        } else {
+          MachineRepresentation rep = input_type->IsInhabited()
+                                          ? input_info->representation()
+                                          : MachineRepresentation::kNone;
+          MachineType machine_type(rep, DeoptValueSemanticOf(input_type));
+          DCHECK(machine_type.representation() !=
+                     MachineRepresentation::kWord32 ||
+                 machine_type.semantic() == MachineSemantic::kInt32 ||
+                 machine_type.semantic() == MachineSemantic::kUint32);
+          DCHECK(machine_type.representation() != MachineRepresentation::kBit ||
+                 input_type->Is(Type::Boolean()));
+          (*types)[i] = machine_type;
+        }
+      }
+      NodeProperties::ChangeOp(node,
+                               jsgraph_->common()->TypedObjectState(types));
+    }
+    SetOutput(node, MachineRepresentation::kTagged);
+  }
+
   const Operator* Int32Op(Node* node) {
     return changer_->Int32OperatorFor(node->opcode());
   }
@@ -1030,10 +1079,8 @@
         // undefined, because these special oddballs are always in the root set.
         return kNoWriteBarrier;
       }
-      if (value_type->IsConstant() &&
-          value_type->AsConstant()->Value()->IsHeapObject()) {
-        Handle<HeapObject> value_object =
-            Handle<HeapObject>::cast(value_type->AsConstant()->Value());
+      if (value_type->IsHeapConstant()) {
+        Handle<HeapObject> value_object = value_type->AsHeapConstant()->Value();
         RootIndexMap root_index_map(jsgraph_->isolate());
         int root_index = root_index_map.Lookup(*value_object);
         if (root_index != RootIndexMap::kInvalidRootIndex &&
@@ -1147,8 +1194,15 @@
 
     if (hint == NumberOperationHint::kSignedSmall ||
         hint == NumberOperationHint::kSigned32) {
-      VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
-                 MachineRepresentation::kWord32, Type::Signed32());
+      UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint);
+      // For CheckedInt32Add and CheckedInt32Sub, we don't need to do
+      // a minus zero check for the right hand side, since we already
+      // know that the left hand side is a proper Signed32 value,
+      // potentially guarded by a check.
+      UseInfo right_use = CheckedUseInfoAsWord32FromHint(
+          hint, CheckForMinusZeroMode::kDontCheckForMinusZero);
+      VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32,
+                 Type::Signed32());
       if (lower()) ChangeToInt32OverflowOp(node);
       return;
     }
@@ -1266,6 +1320,30 @@
     return;
   }
 
+  void VisitOsrGuard(Node* node) {
+    VisitInputs(node);
+
+    // Insert a dynamic check for the OSR value type if necessary.
+    switch (OsrGuardTypeOf(node->op())) {
+      case OsrGuardType::kUninitialized:
+        // At this point, we should always have a type for the OsrValue.
+        UNREACHABLE();
+        break;
+      case OsrGuardType::kSignedSmall:
+        if (lower()) {
+          NodeProperties::ChangeOp(node,
+                                   simplified()->CheckedTaggedToTaggedSigned());
+        }
+        return SetOutput(node, MachineRepresentation::kTaggedSigned);
+      case OsrGuardType::kAny:  // Nothing to check.
+        if (lower()) {
+          DeferReplacement(node, node->InputAt(0));
+        }
+        return SetOutput(node, MachineRepresentation::kTagged);
+    }
+    UNREACHABLE();
+  }
+
   // Dispatching routine for visiting the node {node} with the usage {use}.
   // Depending on the operator, propagate new usage info to the inputs.
   void VisitNode(Node* node, Truncation truncation,
@@ -1304,7 +1382,15 @@
       case IrOpcode::kNumberConstant:
         return VisitLeaf(node, MachineRepresentation::kTagged);
       case IrOpcode::kHeapConstant:
-        return VisitLeaf(node, MachineRepresentation::kTagged);
+        return VisitLeaf(node, MachineRepresentation::kTaggedPointer);
+      case IrOpcode::kPointerConstant: {
+        VisitLeaf(node, MachineType::PointerRepresentation());
+        if (lower()) {
+          intptr_t const value = OpParameter<intptr_t>(node);
+          DeferReplacement(node, lowering->jsgraph()->IntPtrConstant(value));
+        }
+        return;
+      }
 
       case IrOpcode::kBranch:
         ProcessInput(node, 0, UseInfo::Bool());
@@ -1332,7 +1418,7 @@
           if (lower()) DeferReplacement(node, node->InputAt(0));
         } else {
           VisitInputs(node);
-          SetOutput(node, MachineRepresentation::kTagged);
+          SetOutput(node, MachineRepresentation::kTaggedPointer);
         }
         return;
       }
@@ -1362,8 +1448,7 @@
             node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
             NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
           } else {
-            DCHECK_EQ(input_info->representation(),
-                      MachineRepresentation::kTagged);
+            DCHECK(CanBeTaggedPointer(input_info->representation()));
             // BooleanNot(x: kRepTagged) => WordEqual(x, #false)
             node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
             NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
@@ -2033,6 +2118,31 @@
         if (lower()) DeferReplacement(node, node->InputAt(0));
         return;
       }
+      case IrOpcode::kNumberToUint8Clamped: {
+        Type* const input_type = TypeOf(node->InputAt(0));
+        if (input_type->Is(type_cache_.kUint8OrMinusZeroOrNaN)) {
+          VisitUnop(node, UseInfo::TruncatingWord32(),
+                    MachineRepresentation::kWord32);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else if (input_type->Is(Type::Unsigned32OrMinusZeroOrNaN())) {
+          VisitUnop(node, UseInfo::TruncatingWord32(),
+                    MachineRepresentation::kWord32);
+          if (lower()) lowering->DoUnsigned32ToUint8Clamped(node);
+        } else if (input_type->Is(Type::Signed32OrMinusZeroOrNaN())) {
+          VisitUnop(node, UseInfo::TruncatingWord32(),
+                    MachineRepresentation::kWord32);
+          if (lower()) lowering->DoSigned32ToUint8Clamped(node);
+        } else if (input_type->Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+          VisitUnop(node, UseInfo::TruncatingFloat64(),
+                    MachineRepresentation::kFloat64);
+          if (lower()) lowering->DoIntegerToUint8Clamped(node);
+        } else {
+          VisitUnop(node, UseInfo::TruncatingFloat64(),
+                    MachineRepresentation::kFloat64);
+          if (lower()) lowering->DoNumberToUint8Clamped(node);
+        }
+        return;
+      }
       case IrOpcode::kReferenceEqual: {
         VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
         if (lower()) {
@@ -2044,7 +2154,7 @@
       case IrOpcode::kStringLessThan:
       case IrOpcode::kStringLessThanOrEqual: {
         return VisitBinop(node, UseInfo::AnyTagged(),
-                          MachineRepresentation::kTagged);
+                          MachineRepresentation::kTaggedPointer);
       }
       case IrOpcode::kStringCharCodeAt: {
         VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
@@ -2053,12 +2163,12 @@
       }
       case IrOpcode::kStringFromCharCode: {
         VisitUnop(node, UseInfo::TruncatingWord32(),
-                  MachineRepresentation::kTagged);
+                  MachineRepresentation::kTaggedPointer);
         return;
       }
       case IrOpcode::kStringFromCodePoint: {
         VisitUnop(node, UseInfo::TruncatingWord32(),
-                  MachineRepresentation::kTagged);
+                  MachineRepresentation::kTaggedPointer);
         return;
       }
 
@@ -2082,11 +2192,13 @@
       }
       case IrOpcode::kCheckHeapObject: {
         if (InputCannotBe(node, Type::SignedSmall())) {
-          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
-          if (lower()) DeferReplacement(node, node->InputAt(0));
+          VisitUnop(node, UseInfo::AnyTagged(),
+                    MachineRepresentation::kTaggedPointer);
         } else {
-          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+          VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
+                    MachineRepresentation::kTaggedPointer);
         }
+        if (lower()) DeferReplacement(node, node->InputAt(0));
         return;
       }
       case IrOpcode::kCheckIf: {
@@ -2127,10 +2239,12 @@
       }
       case IrOpcode::kCheckString: {
         if (InputIs(node, Type::String())) {
-          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+          VisitUnop(node, UseInfo::AnyTagged(),
+                    MachineRepresentation::kTaggedPointer);
           if (lower()) DeferReplacement(node, node->InputAt(0));
         } else {
-          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+          VisitUnop(node, UseInfo::AnyTagged(),
+                    MachineRepresentation::kTaggedPointer);
         }
         return;
       }
@@ -2138,7 +2252,7 @@
       case IrOpcode::kAllocate: {
         ProcessInput(node, 0, UseInfo::TruncatingWord32());
         ProcessRemainingInputs(node, 1);
-        SetOutput(node, MachineRepresentation::kTagged);
+        SetOutput(node, MachineRepresentation::kTaggedPointer);
         return;
       }
       case IrOpcode::kLoadField: {
@@ -2352,7 +2466,8 @@
         return;
       }
       case IrOpcode::kCheckTaggedHole: {
-        VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+        VisitUnop(node, UseInfo::AnyTagged(),
+                  MachineRepresentation::kTaggedPointer);
         return;
       }
       case IrOpcode::kConvertTaggedHoleToUndefined: {
@@ -2385,14 +2500,14 @@
       }
       case IrOpcode::kEnsureWritableFastElements:
         return VisitBinop(node, UseInfo::AnyTagged(),
-                          MachineRepresentation::kTagged);
+                          MachineRepresentation::kTaggedPointer);
       case IrOpcode::kMaybeGrowFastElements: {
         ProcessInput(node, 0, UseInfo::AnyTagged());         // object
         ProcessInput(node, 1, UseInfo::AnyTagged());         // elements
         ProcessInput(node, 2, UseInfo::TruncatingWord32());  // index
         ProcessInput(node, 3, UseInfo::TruncatingWord32());  // length
         ProcessRemainingInputs(node, 4);
-        SetOutput(node, MachineRepresentation::kTagged);
+        SetOutput(node, MachineRepresentation::kTaggedPointer);
         return;
       }
 
@@ -2403,6 +2518,8 @@
         return;
       case IrOpcode::kStateValues:
         return VisitStateValues(node);
+      case IrOpcode::kObjectState:
+        return VisitObjectState(node);
       case IrOpcode::kTypeGuard: {
         // We just get rid of the sigma here. In principle, it should be
         // possible to refine the truncation and representation based on
@@ -2414,10 +2531,22 @@
         return;
       }
 
+      case IrOpcode::kOsrGuard:
+        return VisitOsrGuard(node);
+
+      case IrOpcode::kFinishRegion:
+        VisitInputs(node);
+        // Assume the output is tagged pointer.
+        return SetOutput(node, MachineRepresentation::kTaggedPointer);
+
+      case IrOpcode::kReturn:
+        VisitReturn(node);
+        // Assume the output is tagged.
+        return SetOutput(node, MachineRepresentation::kTagged);
+
       // Operators with all inputs tagged and no or tagged output have uniform
       // handling.
       case IrOpcode::kEnd:
-      case IrOpcode::kReturn:
       case IrOpcode::kIfSuccess:
       case IrOpcode::kIfException:
       case IrOpcode::kIfTrue:
@@ -2431,10 +2560,8 @@
       case IrOpcode::kMerge:
       case IrOpcode::kThrow:
       case IrOpcode::kBeginRegion:
-      case IrOpcode::kFinishRegion:
-      case IrOpcode::kOsrValue:
       case IrOpcode::kProjection:
-      case IrOpcode::kObjectState:
+      case IrOpcode::kOsrValue:
 // All JavaScript operators except JSToNumber have uniform handling.
 #define OPCODE_CASE(name) case IrOpcode::k##name:
         JS_SIMPLE_BINOP_LIST(OPCODE_CASE)
@@ -3199,6 +3326,71 @@
   NodeProperties::ChangeOp(node, machine()->Float64LessThan());
 }
 
+void SimplifiedLowering::DoIntegerToUint8Clamped(Node* node) {
+  Node* const input = node->InputAt(0);
+  Node* const min = jsgraph()->Float64Constant(0.0);
+  Node* const max = jsgraph()->Float64Constant(255.0);
+
+  node->ReplaceInput(
+      0, graph()->NewNode(machine()->Float64LessThan(), min, input));
+  node->AppendInput(
+      graph()->zone(),
+      graph()->NewNode(
+          common()->Select(MachineRepresentation::kFloat64),
+          graph()->NewNode(machine()->Float64LessThan(), input, max), input,
+          max));
+  node->AppendInput(graph()->zone(), min);
+  NodeProperties::ChangeOp(node,
+                           common()->Select(MachineRepresentation::kFloat64));
+}
+
+void SimplifiedLowering::DoNumberToUint8Clamped(Node* node) {
+  Node* const input = node->InputAt(0);
+  Node* const min = jsgraph()->Float64Constant(0.0);
+  Node* const max = jsgraph()->Float64Constant(255.0);
+
+  node->ReplaceInput(
+      0, graph()->NewNode(
+             common()->Select(MachineRepresentation::kFloat64),
+             graph()->NewNode(machine()->Float64LessThan(), min, input),
+             graph()->NewNode(
+                 common()->Select(MachineRepresentation::kFloat64),
+                 graph()->NewNode(machine()->Float64LessThan(), input, max),
+                 input, max),
+             min));
+  NodeProperties::ChangeOp(node,
+                           machine()->Float64RoundTiesEven().placeholder());
+}
+
+void SimplifiedLowering::DoSigned32ToUint8Clamped(Node* node) {
+  Node* const input = node->InputAt(0);
+  Node* const min = jsgraph()->Int32Constant(0);
+  Node* const max = jsgraph()->Int32Constant(255);
+
+  node->ReplaceInput(
+      0, graph()->NewNode(machine()->Int32LessThanOrEqual(), input, max));
+  node->AppendInput(
+      graph()->zone(),
+      graph()->NewNode(common()->Select(MachineRepresentation::kWord32),
+                       graph()->NewNode(machine()->Int32LessThan(), input, min),
+                       min, input));
+  node->AppendInput(graph()->zone(), max);
+  NodeProperties::ChangeOp(node,
+                           common()->Select(MachineRepresentation::kWord32));
+}
+
+void SimplifiedLowering::DoUnsigned32ToUint8Clamped(Node* node) {
+  Node* const input = node->InputAt(0);
+  Node* const max = jsgraph()->Uint32Constant(255u);
+
+  node->ReplaceInput(
+      0, graph()->NewNode(machine()->Uint32LessThanOrEqual(), input, max));
+  node->AppendInput(graph()->zone(), input);
+  node->AppendInput(graph()->zone(), max);
+  NodeProperties::ChangeOp(node,
+                           common()->Select(MachineRepresentation::kWord32));
+}
+
 Node* SimplifiedLowering::ToNumberCode() {
   if (!to_number_code_.is_set()) {
     Callable callable = CodeFactory::ToNumber(isolate());
diff --git a/src/compiler/simplified-lowering.h b/src/compiler/simplified-lowering.h
index 9e2a499..09e58ff 100644
--- a/src/compiler/simplified-lowering.h
+++ b/src/compiler/simplified-lowering.h
@@ -44,6 +44,10 @@
   void DoIntegral32ToBit(Node* node);
   void DoOrderedNumberToBit(Node* node);
   void DoNumberToBit(Node* node);
+  void DoIntegerToUint8Clamped(Node* node);
+  void DoNumberToUint8Clamped(Node* node);
+  void DoSigned32ToUint8Clamped(Node* node);
+  void DoUnsigned32ToUint8Clamped(Node* node);
 
  private:
   JSGraph* const jsgraph_;
diff --git a/src/compiler/simplified-operator-reducer.cc b/src/compiler/simplified-operator-reducer.cc
index d172adc..b8a486d 100644
--- a/src/compiler/simplified-operator-reducer.cc
+++ b/src/compiler/simplified-operator-reducer.cc
@@ -80,7 +80,9 @@
     case IrOpcode::kTruncateTaggedToFloat64: {
       NumberMatcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceFloat64(m.Value());
-      if (m.IsChangeFloat64ToTagged()) return Replace(m.node()->InputAt(0));
+      if (m.IsChangeFloat64ToTagged() || m.IsChangeFloat64ToTaggedPointer()) {
+        return Replace(m.node()->InputAt(0));
+      }
       if (m.IsChangeInt31ToTaggedSigned() || m.IsChangeInt32ToTagged()) {
         return Change(node, machine()->ChangeInt32ToFloat64(), m.InputAt(0));
       }
@@ -89,10 +91,11 @@
       }
       break;
     }
+    case IrOpcode::kChangeTaggedSignedToInt32:
     case IrOpcode::kChangeTaggedToInt32: {
       NumberMatcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
-      if (m.IsChangeFloat64ToTagged()) {
+      if (m.IsChangeFloat64ToTagged() || m.IsChangeFloat64ToTaggedPointer()) {
         return Change(node, machine()->ChangeFloat64ToInt32(), m.InputAt(0));
       }
       if (m.IsChangeInt31ToTaggedSigned() || m.IsChangeInt32ToTagged()) {
@@ -103,7 +106,7 @@
     case IrOpcode::kChangeTaggedToUint32: {
       NumberMatcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceUint32(DoubleToUint32(m.Value()));
-      if (m.IsChangeFloat64ToTagged()) {
+      if (m.IsChangeFloat64ToTagged() || m.IsChangeFloat64ToTaggedPointer()) {
         return Change(node, machine()->ChangeFloat64ToUint32(), m.InputAt(0));
       }
       if (m.IsChangeUint32ToTagged()) return Replace(m.InputAt(0));
@@ -121,11 +124,12 @@
           m.IsChangeUint32ToTagged()) {
         return Replace(m.InputAt(0));
       }
-      if (m.IsChangeFloat64ToTagged()) {
+      if (m.IsChangeFloat64ToTagged() || m.IsChangeFloat64ToTaggedPointer()) {
         return Change(node, machine()->TruncateFloat64ToWord32(), m.InputAt(0));
       }
       break;
     }
+    case IrOpcode::kCheckedTaggedToInt32:
     case IrOpcode::kCheckedTaggedSignedToInt32: {
       NodeMatcher m(node->InputAt(0));
       if (m.IsConvertTaggedHoleToUndefined()) {
diff --git a/src/compiler/simplified-operator-reducer.h b/src/compiler/simplified-operator-reducer.h
index 44bfdff..266cb23 100644
--- a/src/compiler/simplified-operator-reducer.h
+++ b/src/compiler/simplified-operator-reducer.h
@@ -5,7 +5,9 @@
 #ifndef V8_COMPILER_SIMPLIFIED_OPERATOR_REDUCER_H_
 #define V8_COMPILER_SIMPLIFIED_OPERATOR_REDUCER_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -21,7 +23,8 @@
 class MachineOperatorBuilder;
 class SimplifiedOperatorBuilder;
 
-class SimplifiedOperatorReducer final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE SimplifiedOperatorReducer final
+    : public NON_EXPORTED_BASE(AdvancedReducer) {
  public:
   SimplifiedOperatorReducer(Editor* editor, JSGraph* jsgraph);
   ~SimplifiedOperatorReducer() final;
diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc
index 400db97..345a2c5 100644
--- a/src/compiler/simplified-operator.cc
+++ b/src/compiler/simplified-operator.cc
@@ -393,6 +393,7 @@
   V(NumberToBoolean, Operator::kNoProperties, 1, 0)              \
   V(NumberToInt32, Operator::kNoProperties, 1, 0)                \
   V(NumberToUint32, Operator::kNoProperties, 1, 0)               \
+  V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0)         \
   V(NumberSilenceNaN, Operator::kNoProperties, 1, 0)             \
   V(StringCharCodeAt, Operator::kNoProperties, 2, 1)             \
   V(StringFromCharCode, Operator::kNoProperties, 1, 0)           \
@@ -404,6 +405,7 @@
   V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0)         \
   V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0)        \
   V(ChangeFloat64ToTagged, Operator::kNoProperties, 1, 0)        \
+  V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
   V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0)    \
   V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0)          \
   V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0)         \
@@ -430,25 +432,26 @@
   V(SpeculativeNumberLessThan)                \
   V(SpeculativeNumberLessThanOrEqual)
 
-#define CHECKED_OP_LIST(V)             \
-  V(CheckBounds, 2, 1)                 \
-  V(CheckHeapObject, 1, 1)             \
-  V(CheckIf, 1, 0)                     \
-  V(CheckNumber, 1, 1)                 \
-  V(CheckSmi, 1, 1)                    \
-  V(CheckString, 1, 1)                 \
-  V(CheckTaggedHole, 1, 1)             \
-  V(CheckedInt32Add, 2, 1)             \
-  V(CheckedInt32Sub, 2, 1)             \
-  V(CheckedInt32Div, 2, 1)             \
-  V(CheckedInt32Mod, 2, 1)             \
-  V(CheckedUint32Div, 2, 1)            \
-  V(CheckedUint32Mod, 2, 1)            \
-  V(CheckedUint32ToInt32, 1, 1)        \
-  V(CheckedUint32ToTaggedSigned, 1, 1) \
-  V(CheckedInt32ToTaggedSigned, 1, 1)  \
-  V(CheckedTaggedSignedToInt32, 1, 1)  \
-  V(CheckedTaggedToTaggedSigned, 1, 1) \
+#define CHECKED_OP_LIST(V)              \
+  V(CheckBounds, 2, 1)                  \
+  V(CheckHeapObject, 1, 1)              \
+  V(CheckIf, 1, 0)                      \
+  V(CheckNumber, 1, 1)                  \
+  V(CheckSmi, 1, 1)                     \
+  V(CheckString, 1, 1)                  \
+  V(CheckTaggedHole, 1, 1)              \
+  V(CheckedInt32Add, 2, 1)              \
+  V(CheckedInt32Sub, 2, 1)              \
+  V(CheckedInt32Div, 2, 1)              \
+  V(CheckedInt32Mod, 2, 1)              \
+  V(CheckedUint32Div, 2, 1)             \
+  V(CheckedUint32Mod, 2, 1)             \
+  V(CheckedUint32ToInt32, 1, 1)         \
+  V(CheckedUint32ToTaggedSigned, 1, 1)  \
+  V(CheckedInt32ToTaggedSigned, 1, 1)   \
+  V(CheckedTaggedSignedToInt32, 1, 1)   \
+  V(CheckedTaggedToTaggedSigned, 1, 1)  \
+  V(CheckedTaggedToTaggedPointer, 1, 1) \
   V(CheckedTruncateTaggedToWord32, 1, 1)
 
 struct SimplifiedOperatorGlobalCache final {
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
index a904391..833a055 100644
--- a/src/compiler/simplified-operator.h
+++ b/src/compiler/simplified-operator.h
@@ -7,8 +7,10 @@
 
 #include <iosfwd>
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/operator.h"
 #include "src/compiler/types.h"
+#include "src/globals.h"
 #include "src/handles.h"
 #include "src/machine-type.h"
 #include "src/objects.h"
@@ -45,15 +47,15 @@
   ExternalArrayType const external_array_type_;
 };
 
-bool operator==(BufferAccess, BufferAccess);
+V8_EXPORT_PRIVATE bool operator==(BufferAccess, BufferAccess);
 bool operator!=(BufferAccess, BufferAccess);
 
 size_t hash_value(BufferAccess);
 
-std::ostream& operator<<(std::ostream&, BufferAccess);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BufferAccess);
 
-BufferAccess const BufferAccessOf(const Operator* op) WARN_UNUSED_RESULT;
-
+V8_EXPORT_PRIVATE BufferAccess const BufferAccessOf(const Operator* op)
+    WARN_UNUSED_RESULT;
 
 // An access descriptor for loads/stores of fixed structures like field
 // accesses of heap objects. Accesses from either tagged or untagged base
@@ -69,12 +71,12 @@
   int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
 };
 
-bool operator==(FieldAccess const&, FieldAccess const&);
+V8_EXPORT_PRIVATE bool operator==(FieldAccess const&, FieldAccess const&);
 bool operator!=(FieldAccess const&, FieldAccess const&);
 
 size_t hash_value(FieldAccess const&);
 
-std::ostream& operator<<(std::ostream&, FieldAccess const&);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, FieldAccess const&);
 
 FieldAccess const& FieldAccessOf(const Operator* op) WARN_UNUSED_RESULT;
 
@@ -96,14 +98,15 @@
   int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
 };
 
-bool operator==(ElementAccess const&, ElementAccess const&);
+V8_EXPORT_PRIVATE bool operator==(ElementAccess const&, ElementAccess const&);
 bool operator!=(ElementAccess const&, ElementAccess const&);
 
 size_t hash_value(ElementAccess const&);
 
-std::ostream& operator<<(std::ostream&, ElementAccess const&);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ElementAccess const&);
 
-ElementAccess const& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+V8_EXPORT_PRIVATE ElementAccess const& ElementAccessOf(const Operator* op)
+    WARN_UNUSED_RESULT;
 
 ExternalArrayType ExternalArrayTypeOf(const Operator* op) WARN_UNUSED_RESULT;
 
@@ -178,7 +181,7 @@
 
 size_t hash_value(NumberOperationHint);
 
-std::ostream& operator<<(std::ostream&, NumberOperationHint);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, NumberOperationHint);
 
 NumberOperationHint NumberOperationHintOf(const Operator* op)
     WARN_UNUSED_RESULT;
@@ -209,7 +212,8 @@
 //   - Bool: a tagged pointer to either the canonical JS #false or
 //           the canonical JS #true object
 //   - Bit: an untagged integer 0 or 1, but word-sized
-class SimplifiedOperatorBuilder final : public ZoneObject {
+class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
+    : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   explicit SimplifiedOperatorBuilder(Zone* zone);
 
@@ -265,6 +269,7 @@
   const Operator* NumberToBoolean();
   const Operator* NumberToInt32();
   const Operator* NumberToUint32();
+  const Operator* NumberToUint8Clamped();
 
   const Operator* NumberSilenceNaN();
 
@@ -305,6 +310,7 @@
   const Operator* ChangeInt32ToTagged();
   const Operator* ChangeUint32ToTagged();
   const Operator* ChangeFloat64ToTagged();
+  const Operator* ChangeFloat64ToTaggedPointer();
   const Operator* ChangeTaggedToBit();
   const Operator* ChangeBitToTagged();
   const Operator* TruncateTaggedToWord32();
@@ -335,6 +341,7 @@
   const Operator* CheckedTaggedToInt32(CheckForMinusZeroMode);
   const Operator* CheckedTaggedToFloat64(CheckTaggedInputMode);
   const Operator* CheckedTaggedToTaggedSigned();
+  const Operator* CheckedTaggedToTaggedPointer();
   const Operator* CheckedTruncateTaggedToWord32();
 
   const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
diff --git a/src/compiler/state-values-utils.cc b/src/compiler/state-values-utils.cc
index 77cc227..e8310d7 100644
--- a/src/compiler/state-values-utils.cc
+++ b/src/compiler/state-values-utils.cc
@@ -274,8 +274,7 @@
     return MachineType::AnyTagged();
   } else {
     DCHECK_EQ(IrOpcode::kTypedStateValues, state->opcode());
-    const ZoneVector<MachineType>* types =
-        OpParameter<const ZoneVector<MachineType>*>(state);
+    ZoneVector<MachineType> const* types = MachineTypesOf(state->op());
     return (*types)[Top()->index];
   }
 }
diff --git a/src/compiler/state-values-utils.h b/src/compiler/state-values-utils.h
index 704f5f6..14b1b9e 100644
--- a/src/compiler/state-values-utils.h
+++ b/src/compiler/state-values-utils.h
@@ -6,6 +6,7 @@
 #define V8_COMPILER_STATE_VALUES_UTILS_H_
 
 #include "src/compiler/js-graph.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -14,7 +15,7 @@
 
 class Graph;
 
-class StateValuesCache {
+class V8_EXPORT_PRIVATE StateValuesCache {
  public:
   explicit StateValuesCache(JSGraph* js_graph);
 
@@ -60,7 +61,7 @@
   Node* empty_state_values_;
 };
 
-class StateValuesAccess {
+class V8_EXPORT_PRIVATE StateValuesAccess {
  public:
   struct TypedNode {
     Node* node;
@@ -68,7 +69,7 @@
     TypedNode(Node* node, MachineType type) : node(node), type(type) {}
   };
 
-  class iterator {
+  class V8_EXPORT_PRIVATE iterator {
    public:
     // Bare minimum of operators needed for range iteration.
     bool operator!=(iterator& other);
diff --git a/src/compiler/tail-call-optimization.cc b/src/compiler/tail-call-optimization.cc
index 7e1623a..605b0e7 100644
--- a/src/compiler/tail-call-optimization.cc
+++ b/src/compiler/tail-call-optimization.cc
@@ -7,6 +7,7 @@
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 
 namespace v8 {
@@ -18,12 +19,15 @@
   // The value which is returned must be the result of a potential tail call,
   // there must be no try/catch/finally around the Call, and there must be no
   // other effect between the Call and the Return nodes.
-  Node* const call = NodeProperties::GetValueInput(node, 0);
+  Node* const call = NodeProperties::GetValueInput(node, 1);
   if (call->opcode() == IrOpcode::kCall &&
       CallDescriptorOf(call->op())->SupportsTailCalls() &&
       NodeProperties::GetEffectInput(node) == call &&
       !NodeProperties::IsExceptionalCall(call)) {
     Node* const control = NodeProperties::GetControlInput(node);
+    // Ensure that no additional arguments are being popped other than those in
+    // the CallDescriptor, otherwise the tail call transformation is invalid.
+    DCHECK_EQ(0, Int32Matcher(NodeProperties::GetValueInput(node, 0)).Value());
     if (control->opcode() == IrOpcode::kIfSuccess &&
         call->OwnedBy(node, control) && control->OwnedBy(node)) {
       // Furthermore, control has to flow via an IfSuccess from the Call, so
@@ -62,9 +66,10 @@
       //                 |
 
       DCHECK_EQ(call, NodeProperties::GetControlInput(control, 0));
-      DCHECK_EQ(3, node->InputCount());
+      DCHECK_EQ(4, node->InputCount());
       node->ReplaceInput(0, NodeProperties::GetEffectInput(call));
       node->ReplaceInput(1, NodeProperties::GetControlInput(call));
+      node->RemoveInput(3);
       node->RemoveInput(2);
       for (int index = 0; index < call->op()->ValueInputCount(); ++index) {
         node->InsertInput(graph()->zone(), index,
diff --git a/src/compiler/tail-call-optimization.h b/src/compiler/tail-call-optimization.h
index b5d4f96..d693f36 100644
--- a/src/compiler/tail-call-optimization.h
+++ b/src/compiler/tail-call-optimization.h
@@ -6,6 +6,7 @@
 #define V8_COMPILER_TAIL_CALL_OPTIMIZATION_H_
 
 #include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -18,7 +19,7 @@
 
 // Performs tail call optimization by replacing certain combinations of Return
 // and Call nodes with a single TailCall.
-class TailCallOptimization final : public Reducer {
+class V8_EXPORT_PRIVATE TailCallOptimization final : public Reducer {
  public:
   TailCallOptimization(CommonOperatorBuilder* common, Graph* graph)
       : common_(common), graph_(graph) {}
diff --git a/src/compiler/type-cache.h b/src/compiler/type-cache.h
index aa51dac..69eaf11 100644
--- a/src/compiler/type-cache.h
+++ b/src/compiler/type-cache.h
@@ -21,11 +21,13 @@
  public:
   static TypeCache const& Get();
 
-  TypeCache() : zone_(&allocator) {}
+  TypeCache() : zone_(&allocator, ZONE_NAME) {}
 
   Type* const kInt8 = CreateRange<int8_t>();
   Type* const kUint8 = CreateRange<uint8_t>();
   Type* const kUint8Clamped = kUint8;
+  Type* const kUint8OrMinusZeroOrNaN =
+      Type::Union(kUint8, Type::MinusZeroOrNaN(), zone());
   Type* const kInt16 = CreateRange<int16_t>();
   Type* const kUint16 = CreateRange<uint16_t>();
   Type* const kInt32 = Type::Signed32();
@@ -33,9 +35,8 @@
   Type* const kFloat32 = Type::Number();
   Type* const kFloat64 = Type::Number();
 
-  Type* const kSmi = Type::SignedSmall();
-  Type* const kHoleySmi = Type::Union(kSmi, Type::Hole(), zone());
-  Type* const kHeapNumber = Type::Number();
+  Type* const kHoleySmi =
+      Type::Union(Type::SignedSmall(), Type::Hole(), zone());
 
   Type* const kSingletonZero = CreateRange(0.0, 0.0);
   Type* const kSingletonOne = CreateRange(1.0, 1.0);
diff --git a/src/compiler/type-hint-analyzer.cc b/src/compiler/type-hint-analyzer.cc
index a668a48..da77a0c 100644
--- a/src/compiler/type-hint-analyzer.cc
+++ b/src/compiler/type-hint-analyzer.cc
@@ -92,21 +92,7 @@
   Handle<Code> code = i->second;
   DCHECK_EQ(Code::TO_BOOLEAN_IC, code->kind());
   ToBooleanICStub stub(code->GetIsolate(), code->extra_ic_state());
-// TODO(bmeurer): Replace ToBooleanICStub::Types with ToBooleanHints.
-#define ASSERT_COMPATIBLE(NAME, Name)         \
-  STATIC_ASSERT(1 << ToBooleanICStub::NAME == \
-                static_cast<int>(ToBooleanHint::k##Name))
-  ASSERT_COMPATIBLE(UNDEFINED, Undefined);
-  ASSERT_COMPATIBLE(BOOLEAN, Boolean);
-  ASSERT_COMPATIBLE(NULL_TYPE, Null);
-  ASSERT_COMPATIBLE(SMI, SmallInteger);
-  ASSERT_COMPATIBLE(SPEC_OBJECT, Receiver);
-  ASSERT_COMPATIBLE(STRING, String);
-  ASSERT_COMPATIBLE(SYMBOL, Symbol);
-  ASSERT_COMPATIBLE(HEAP_NUMBER, HeapNumber);
-  ASSERT_COMPATIBLE(SIMD_VALUE, SimdValue);
-#undef ASSERT_COMPATIBLE
-  *hints = ToBooleanHints(stub.types().ToIntegral());
+  *hints = stub.hints();
   return true;
 }
 
diff --git a/src/compiler/typed-optimization.cc b/src/compiler/typed-optimization.cc
index c5e8648..5ebc390 100644
--- a/src/compiler/typed-optimization.cc
+++ b/src/compiler/typed-optimization.cc
@@ -22,8 +22,9 @@
       dependencies_(dependencies),
       flags_(flags),
       jsgraph_(jsgraph),
-      true_type_(Type::Constant(factory()->true_value(), graph()->zone())),
-      false_type_(Type::Constant(factory()->false_value(), graph()->zone())),
+      true_type_(Type::HeapConstant(factory()->true_value(), graph()->zone())),
+      false_type_(
+          Type::HeapConstant(factory()->false_value(), graph()->zone())),
       type_cache_(TypeCache::Get()) {}
 
 TypedOptimization::~TypedOptimization() {}
@@ -43,8 +44,9 @@
     // the Operator::kNoDeopt property).
     Type* upper = NodeProperties::GetType(node);
     if (upper->IsInhabited()) {
-      if (upper->IsConstant()) {
-        Node* replacement = jsgraph()->Constant(upper->AsConstant()->Value());
+      if (upper->IsHeapConstant()) {
+        Node* replacement =
+            jsgraph()->Constant(upper->AsHeapConstant()->Value());
         ReplaceWithValue(node, replacement);
         return Changed(replacement);
       } else if (upper->Is(Type::MinusZero())) {
@@ -72,6 +74,8 @@
     }
   }
   switch (node->opcode()) {
+    case IrOpcode::kCheckHeapObject:
+      return ReduceCheckHeapObject(node);
     case IrOpcode::kCheckMaps:
       return ReduceCheckMaps(node);
     case IrOpcode::kCheckString:
@@ -83,6 +87,8 @@
     case IrOpcode::kNumberRound:
     case IrOpcode::kNumberTrunc:
       return ReduceNumberRoundop(node);
+    case IrOpcode::kNumberToUint8Clamped:
+      return ReduceNumberToUint8Clamped(node);
     case IrOpcode::kPhi:
       return ReducePhi(node);
     case IrOpcode::kSelect:
@@ -96,10 +102,8 @@
 namespace {
 
 MaybeHandle<Map> GetStableMapFromObjectType(Type* object_type) {
-  if (object_type->IsConstant() &&
-      object_type->AsConstant()->Value()->IsHeapObject()) {
-    Handle<Map> object_map(
-        Handle<HeapObject>::cast(object_type->AsConstant()->Value())->map());
+  if (object_type->IsHeapConstant()) {
+    Handle<Map> object_map(object_type->AsHeapConstant()->Value()->map());
     if (object_map->is_stable()) return object_map;
   }
   return MaybeHandle<Map>();
@@ -107,6 +111,16 @@
 
 }  // namespace
 
+Reduction TypedOptimization::ReduceCheckHeapObject(Node* node) {
+  Node* const input = NodeProperties::GetValueInput(node, 0);
+  Type* const input_type = NodeProperties::GetType(input);
+  if (!input_type->Maybe(Type::SignedSmall())) {
+    ReplaceWithValue(node, input);
+    return Replace(input);
+  }
+  return NoChange();
+}
+
 Reduction TypedOptimization::ReduceCheckMaps(Node* node) {
   // The CheckMaps(o, ...map...) can be eliminated if map is stable,
   // o has type Constant(object) and map == object->map, and either
@@ -121,8 +135,8 @@
     for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
       Node* const map = NodeProperties::GetValueInput(node, i);
       Type* const map_type = NodeProperties::GetType(map);
-      if (map_type->IsConstant() &&
-          map_type->AsConstant()->Value().is_identical_to(object_map)) {
+      if (map_type->IsHeapConstant() &&
+          map_type->AsHeapConstant()->Value().is_identical_to(object_map)) {
         if (object_map->CanTransition()) {
           dependencies()->AssumeMapStable(object_map);
         }
@@ -180,6 +194,15 @@
   return NoChange();
 }
 
+Reduction TypedOptimization::ReduceNumberToUint8Clamped(Node* node) {
+  Node* const input = NodeProperties::GetValueInput(node, 0);
+  Type* const input_type = NodeProperties::GetType(input);
+  if (input_type->Is(type_cache_.kUint8)) {
+    return Replace(input);
+  }
+  return NoChange();
+}
+
 Reduction TypedOptimization::ReducePhi(Node* node) {
   // Try to narrow the type of the Phi {node}, which might be more precise now
   // after lowering based on types, i.e. a SpeculativeNumberAdd has a more
diff --git a/src/compiler/typed-optimization.h b/src/compiler/typed-optimization.h
index 54d780c..fb2db72 100644
--- a/src/compiler/typed-optimization.h
+++ b/src/compiler/typed-optimization.h
@@ -5,8 +5,10 @@
 #ifndef V8_COMPILER_TYPED_OPTIMIZATION_H_
 #define V8_COMPILER_TYPED_OPTIMIZATION_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/base/flags.h"
 #include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -23,7 +25,8 @@
 class SimplifiedOperatorBuilder;
 class TypeCache;
 
-class TypedOptimization final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE TypedOptimization final
+    : public NON_EXPORTED_BASE(AdvancedReducer) {
  public:
   // Flags that control the mode of operation.
   enum Flag {
@@ -39,10 +42,12 @@
   Reduction Reduce(Node* node) final;
 
  private:
+  Reduction ReduceCheckHeapObject(Node* node);
   Reduction ReduceCheckMaps(Node* node);
   Reduction ReduceCheckString(Node* node);
   Reduction ReduceLoadField(Node* node);
   Reduction ReduceNumberRoundop(Node* node);
+  Reduction ReduceNumberToUint8Clamped(Node* node);
   Reduction ReducePhi(Node* node);
   Reduction ReduceSelect(Node* node);
 
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
index ec1197b..2642a10 100644
--- a/src/compiler/typer.cc
+++ b/src/compiler/typer.cc
@@ -11,6 +11,7 @@
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/compiler/js-operator.h"
+#include "src/compiler/linkage.h"
 #include "src/compiler/loop-variable-optimizer.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/node.h"
@@ -32,8 +33,9 @@
   Typer* const typer_;
 };
 
-Typer::Typer(Isolate* isolate, Graph* graph)
+Typer::Typer(Isolate* isolate, Flags flags, Graph* graph)
     : isolate_(isolate),
+      flags_(flags),
       graph_(graph),
       decorator_(nullptr),
       cache_(TypeCache::Get()),
@@ -41,9 +43,9 @@
   Zone* zone = this->zone();
   Factory* const factory = isolate->factory();
 
-  singleton_false_ = Type::Constant(factory->false_value(), zone);
-  singleton_true_ = Type::Constant(factory->true_value(), zone);
-  singleton_the_hole_ = Type::Constant(factory->the_hole_value(), zone);
+  singleton_false_ = Type::HeapConstant(factory->false_value(), zone);
+  singleton_true_ = Type::HeapConstant(factory->true_value(), zone);
+  singleton_the_hole_ = Type::HeapConstant(factory->the_hole_value(), zone);
   falsish_ = Type::Union(
       Type::Undetectable(),
       Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
@@ -290,7 +292,6 @@
   JS_SIMPLE_BINOP_LIST(DECLARE_METHOD)
 #undef DECLARE_METHOD
 
-  static Type* JSTypeOfTyper(Type*, Typer*);
   static Type* JSCallFunctionTyper(Type*, Typer*);
 
   static Type* ReferenceEqualTyper(Type*, Type*, Typer*);
@@ -547,25 +548,63 @@
 
 // Common operators.
 
-Type* Typer::Visitor::TypeParameter(Node* node) { return Type::Any(); }
+Type* Typer::Visitor::TypeParameter(Node* node) {
+  Node* const start = node->InputAt(0);
+  DCHECK_EQ(IrOpcode::kStart, start->opcode());
+  int const parameter_count = start->op()->ValueOutputCount() - 4;
+  DCHECK_LE(1, parameter_count);
+  int const index = ParameterIndexOf(node->op());
+  if (index == Linkage::kJSCallClosureParamIndex) {
+    return Type::Function();
+  } else if (index == 0) {
+    if (typer_->flags() & Typer::kThisIsReceiver) {
+      return Type::Receiver();
+    } else {
+      // Parameter[this] can be the_hole for derived class constructors.
+      return Type::Union(Type::Hole(), Type::NonInternal(), typer_->zone());
+    }
+  } else if (index == Linkage::GetJSCallNewTargetParamIndex(parameter_count)) {
+    if (typer_->flags() & Typer::kNewTargetIsReceiver) {
+      return Type::Receiver();
+    } else {
+      return Type::Union(Type::Receiver(), Type::Undefined(), typer_->zone());
+    }
+  } else if (index == Linkage::GetJSCallArgCountParamIndex(parameter_count)) {
+    return Type::Range(0.0, Code::kMaxArguments, typer_->zone());
+  } else if (index == Linkage::GetJSCallContextParamIndex(parameter_count)) {
+    return Type::OtherInternal();
+  }
+  return Type::NonInternal();
+}
 
 Type* Typer::Visitor::TypeOsrValue(Node* node) { return Type::Any(); }
 
+Type* Typer::Visitor::TypeOsrGuard(Node* node) {
+  switch (OsrGuardTypeOf(node->op())) {
+    case OsrGuardType::kUninitialized:
+      return Type::None();
+    case OsrGuardType::kSignedSmall:
+      return Type::SignedSmall();
+    case OsrGuardType::kAny:
+      return Type::Any();
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
 Type* Typer::Visitor::TypeRetain(Node* node) {
   UNREACHABLE();
   return nullptr;
 }
 
 Type* Typer::Visitor::TypeInt32Constant(Node* node) {
-  double number = OpParameter<int32_t>(node);
-  return Type::Intersect(Type::Range(number, number, zone()),
-                         Type::Integral32(), zone());
+  UNREACHABLE();
+  return nullptr;
 }
 
-
 Type* Typer::Visitor::TypeInt64Constant(Node* node) {
-  // TODO(rossberg): This actually seems to be a PointerConstant so far...
-  return Type::Internal();  // TODO(rossberg): Add int64 bitset type?
+  UNREACHABLE();
+  return nullptr;
 }
 
 Type* Typer::Visitor::TypeRelocatableInt32Constant(Node* node) {
@@ -583,32 +622,27 @@
   return nullptr;
 }
 
-
 Type* Typer::Visitor::TypeFloat64Constant(Node* node) {
   UNREACHABLE();
   return nullptr;
 }
 
-
 Type* Typer::Visitor::TypeNumberConstant(Node* node) {
-  Factory* f = isolate()->factory();
   double number = OpParameter<double>(node);
-  if (Type::IsInteger(number)) {
-    return Type::Range(number, number, zone());
-  }
-  return Type::Constant(f->NewNumber(number), zone());
+  return Type::NewConstant(number, zone());
 }
 
-
 Type* Typer::Visitor::TypeHeapConstant(Node* node) {
   return TypeConstant(OpParameter<Handle<HeapObject>>(node));
 }
 
-
 Type* Typer::Visitor::TypeExternalConstant(Node* node) {
-  return Type::Internal();
+  return Type::ExternalPointer();
 }
 
+Type* Typer::Visitor::TypePointerConstant(Node* node) {
+  return Type::ExternalPointer();
+}
 
 Type* Typer::Visitor::TypeSelect(Node* node) {
   return Type::Union(Operand(node, 1), Operand(node, 2), zone());
@@ -784,12 +818,15 @@
 
 Type* Typer::Visitor::TypeStateValues(Node* node) { return Type::Internal(); }
 
-Type* Typer::Visitor::TypeObjectState(Node* node) { return Type::Internal(); }
-
 Type* Typer::Visitor::TypeTypedStateValues(Node* node) {
   return Type::Internal();
 }
 
+Type* Typer::Visitor::TypeObjectState(Node* node) { return Type::Internal(); }
+
+Type* Typer::Visitor::TypeTypedObjectState(Node* node) {
+  return Type::Internal();
+}
 
 Type* Typer::Visitor::TypeCall(Node* node) { return Type::Any(); }
 
@@ -823,7 +860,7 @@
       (lhs->Max() < rhs->Min() || lhs->Min() > rhs->Max())) {
     return t->singleton_false_;
   }
-  if (lhs->IsConstant() && rhs->Is(lhs)) {
+  if (lhs->IsHeapConstant() && rhs->Is(lhs)) {
     // Types are equal and are inhabited only by a single semantic value,
     // which is not nan due to the earlier check.
     return t->singleton_true_;
@@ -860,7 +897,7 @@
       !lhs->Maybe(rhs)) {
     return t->singleton_false_;
   }
-  if (lhs->IsConstant() && rhs->Is(lhs)) {
+  if (lhs->IsHeapConstant() && rhs->Is(lhs)) {
     // Types are equal and are inhabited only by a single semantic value,
     // which is not nan due to the earlier check.
     return t->singleton_true_;
@@ -894,7 +931,7 @@
   if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return kComparisonUndefined;
 
   ComparisonOutcome result;
-  if (lhs->IsConstant() && rhs->Is(lhs)) {
+  if (lhs->IsHeapConstant() && rhs->Is(lhs)) {
     // Types are equal and are inhabited only by a single semantic value.
     result = kComparisonFalse;
   } else if (lhs->Min() >= rhs->Max()) {
@@ -1005,33 +1042,8 @@
 // JS unary operators.
 
 
-Type* Typer::Visitor::JSTypeOfTyper(Type* type, Typer* t) {
-  Factory* const f = t->isolate()->factory();
-  if (type->Is(Type::Boolean())) {
-    return Type::Constant(f->boolean_string(), t->zone());
-  } else if (type->Is(Type::Number())) {
-    return Type::Constant(f->number_string(), t->zone());
-  } else if (type->Is(Type::String())) {
-    return Type::Constant(f->string_string(), t->zone());
-  } else if (type->Is(Type::Symbol())) {
-    return Type::Constant(f->symbol_string(), t->zone());
-  } else if (type->Is(Type::Union(Type::Undefined(), Type::OtherUndetectable(),
-                                  t->zone()))) {
-    return Type::Constant(f->undefined_string(), t->zone());
-  } else if (type->Is(Type::Null())) {
-    return Type::Constant(f->object_string(), t->zone());
-  } else if (type->Is(Type::Function())) {
-    return Type::Constant(f->function_string(), t->zone());
-  } else if (type->IsConstant()) {
-    return Type::Constant(
-        Object::TypeOf(t->isolate(), type->AsConstant()->Value()), t->zone());
-  }
-  return Type::InternalizedString();
-}
-
-
 Type* Typer::Visitor::TypeJSTypeOf(Node* node) {
-  return TypeUnaryOp(node, JSTypeOfTyper);
+  return Type::InternalizedString();
 }
 
 
@@ -1091,6 +1103,9 @@
   return Type::OtherObject();
 }
 
+Type* Typer::Visitor::TypeJSCreateKeyValueArray(Node* node) {
+  return Type::OtherObject();
+}
 
 Type* Typer::Visitor::TypeJSCreateLiteralArray(Node* node) {
   return Type::OtherObject();
@@ -1281,11 +1296,10 @@
   return Type::Receiver();
 }
 
-
 Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
-  if (fun->IsConstant() && fun->AsConstant()->Value()->IsJSFunction()) {
+  if (fun->IsHeapConstant() && fun->AsHeapConstant()->Value()->IsJSFunction()) {
     Handle<JSFunction> function =
-        Handle<JSFunction>::cast(fun->AsConstant()->Value());
+        Handle<JSFunction>::cast(fun->AsHeapConstant()->Value());
     if (function->shared()->HasBuiltinFunctionId()) {
       switch (function->shared()->builtin_function_id()) {
         case kMathRandom:
@@ -1355,6 +1369,8 @@
         case kNumberIsNaN:
         case kNumberIsSafeInteger:
           return Type::Boolean();
+        case kNumberParseFloat:
+          return Type::Number();
         case kNumberParseInt:
           return t->cache_.kIntegerOrMinusZeroOrNaN;
         case kNumberToString:
@@ -1371,9 +1387,19 @@
         case kStringToUpperCase:
           return Type::String();
 
+        case kStringIterator:
         case kStringIteratorNext:
           return Type::OtherObject();
 
+        case kArrayEntries:
+        case kArrayKeys:
+        case kArrayValues:
+        case kTypedArrayEntries:
+        case kTypedArrayKeys:
+        case kTypedArrayValues:
+        case kArrayIteratorNext:
+          return Type::OtherObject();
+
         // Array functions.
         case kArrayIndexOf:
         case kArrayLastIndexOf:
@@ -1428,7 +1454,6 @@
     case Runtime::kInlineIsRegExp:
       return Type::Boolean();
     case Runtime::kInlineCreateIterResultObject:
-    case Runtime::kInlineRegExpConstructResult:
       return Type::OtherObject();
     case Runtime::kInlineSubString:
     case Runtime::kInlineStringCharFromCode:
@@ -1468,7 +1493,7 @@
 Type* Typer::Visitor::TypeJSForInPrepare(Node* node) {
   STATIC_ASSERT(Map::EnumLengthBits::kMax <= FixedArray::kMaxLength);
   Type* const cache_type =
-      Type::Union(typer_->cache_.kSmi, Type::OtherInternal(), zone());
+      Type::Union(Type::SignedSmall(), Type::OtherInternal(), zone());
   Type* const cache_array = Type::OtherInternal();
   Type* const cache_length = typer_->cache_.kFixedArrayLengthType;
   return Type::Tuple(cache_type, cache_array, cache_length, zone());
@@ -1483,13 +1508,20 @@
   return nullptr;
 }
 
+Type* Typer::Visitor::TypeJSLoadModule(Node* node) { return Type::Any(); }
+
+Type* Typer::Visitor::TypeJSStoreModule(Node* node) {
+  UNREACHABLE();
+  return nullptr;
+}
+
 Type* Typer::Visitor::TypeJSGeneratorStore(Node* node) {
   UNREACHABLE();
   return nullptr;
 }
 
 Type* Typer::Visitor::TypeJSGeneratorRestoreContinuation(Node* node) {
-  return typer_->cache_.kSmi;
+  return Type::SignedSmall();
 }
 
 Type* Typer::Visitor::TypeJSGeneratorRestoreRegister(Node* node) {
@@ -1536,7 +1568,7 @@
 
 // static
 Type* Typer::Visitor::ReferenceEqualTyper(Type* lhs, Type* rhs, Typer* t) {
-  if (lhs->IsConstant() && rhs->Is(lhs)) {
+  if (lhs->IsHeapConstant() && rhs->Is(lhs)) {
     return t->singleton_true_;
   }
   return Type::Boolean();
@@ -1556,34 +1588,15 @@
 }
 
 Type* Typer::Visitor::StringFromCharCodeTyper(Type* type, Typer* t) {
-  type = NumberToUint32(ToNumber(type, t), t);
-  Factory* f = t->isolate()->factory();
-  double min = type->Min();
-  double max = type->Max();
-  if (min == max) {
-    uint32_t code = static_cast<uint32_t>(min) & String::kMaxUtf16CodeUnitU;
-    Handle<String> string = f->LookupSingleCharacterStringFromCode(code);
-    return Type::Constant(string, t->zone());
-  }
   return Type::String();
 }
 
 Type* Typer::Visitor::StringFromCodePointTyper(Type* type, Typer* t) {
-  type = NumberToUint32(ToNumber(type, t), t);
-  Factory* f = t->isolate()->factory();
-  double min = type->Min();
-  double max = type->Max();
-  if (min == max) {
-    uint32_t code = static_cast<uint32_t>(min) & String::kMaxUtf16CodeUnitU;
-    Handle<String> string = f->LookupSingleCharacterStringFromCode(code);
-    return Type::Constant(string, t->zone());
-  }
   return Type::String();
 }
 
 Type* Typer::Visitor::TypeStringCharCodeAt(Node* node) {
-  // TODO(bmeurer): We could do better here based on inputs.
-  return Type::Range(0, kMaxUInt16, zone());
+  return typer_->cache_.kUint16;
 }
 
 Type* Typer::Visitor::TypeStringFromCharCode(Node* node) {
@@ -1663,8 +1676,6 @@
 }
 
 Type* Typer::Visitor::TypeLoadBuffer(Node* node) {
-  // TODO(bmeurer): This typing is not yet correct. Since we can still access
-  // out of bounds, the type in the general case has to include Undefined.
   switch (BufferAccessOf(node->op()).external_array_type()) {
 #define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype, size) \
   case kExternal##ElemType##Array:                          \
@@ -1751,7 +1762,7 @@
   if (Type::IsInteger(*value)) {
     return Type::Range(value->Number(), value->Number(), zone());
   }
-  return Type::Constant(value, zone());
+  return Type::NewConstant(value, zone());
 }
 
 }  // namespace compiler
diff --git a/src/compiler/typer.h b/src/compiler/typer.h
index 875b483..7f6f90a 100644
--- a/src/compiler/typer.h
+++ b/src/compiler/typer.h
@@ -7,6 +7,7 @@
 
 #include "src/compiler/graph.h"
 #include "src/compiler/operation-typer.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -15,9 +16,16 @@
 // Forward declarations.
 class LoopVariableOptimizer;
 
-class Typer {
+class V8_EXPORT_PRIVATE Typer {
  public:
-  Typer(Isolate* isolate, Graph* graph);
+  enum Flag : uint8_t {
+    kNoFlags = 0,
+    kThisIsReceiver = 1u << 0,       // Parameter this is an Object.
+    kNewTargetIsReceiver = 1u << 1,  // Parameter new.target is an Object.
+  };
+  typedef base::Flags<Flag> Flags;
+
+  Typer(Isolate* isolate, Flags flags, Graph* graph);
   ~Typer();
 
   void Run();
@@ -29,12 +37,14 @@
   class Visitor;
   class Decorator;
 
+  Flags flags() const { return flags_; }
   Graph* graph() const { return graph_; }
   Zone* zone() const { return graph()->zone(); }
   Isolate* isolate() const { return isolate_; }
   OperationTyper* operation_typer() { return &operation_typer_; }
 
   Isolate* const isolate_;
+  Flags const flags_;
   Graph* const graph_;
   Decorator* decorator_;
   TypeCache const& cache_;
@@ -49,6 +59,8 @@
   DISALLOW_COPY_AND_ASSIGN(Typer);
 };
 
+DEFINE_OPERATORS_FOR_FLAGS(Typer::Flags);
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/types.cc b/src/compiler/types.cc
index 43d2f80..806bd8f 100644
--- a/src/compiler/types.cc
+++ b/src/compiler/types.cc
@@ -56,12 +56,6 @@
   return lhs->Min() <= rhs->Min() && rhs->Max() <= lhs->Max();
 }
 
-bool Type::Contains(RangeType* lhs, ConstantType* rhs) {
-  DisallowHeapAllocation no_allocation;
-  return IsInteger(*rhs->Value()) && lhs->Min() <= rhs->Value()->Number() &&
-         rhs->Value()->Number() <= lhs->Max();
-}
-
 bool Type::Contains(RangeType* range, i::Object* val) {
   DisallowHeapAllocation no_allocation;
   return IsInteger(val) && range->Min() <= val->Number() &&
@@ -82,7 +76,8 @@
     return min;
   }
   if (this->IsRange()) return this->AsRange()->Min();
-  if (this->IsConstant()) return this->AsConstant()->Value()->Number();
+  if (this->IsOtherNumberConstant())
+    return this->AsOtherNumberConstant()->Value();
   UNREACHABLE();
   return 0;
 }
@@ -98,7 +93,8 @@
     return max;
   }
   if (this->IsRange()) return this->AsRange()->Max();
-  if (this->IsConstant()) return this->AsConstant()->Value()->Number();
+  if (this->IsOtherNumberConstant())
+    return this->AsOtherNumberConstant()->Value();
   UNREACHABLE();
   return 0;
 }
@@ -139,7 +135,9 @@
     }
     return bitset;
   }
-  if (type->IsConstant()) return type->AsConstant()->Lub();
+  if (type->IsHeapConstant()) return type->AsHeapConstant()->Lub();
+  if (type->IsOtherNumberConstant())
+    return type->AsOtherNumberConstant()->Lub();
   if (type->IsRange()) return type->AsRange()->Lub();
   if (type->IsTuple()) return kOtherInternal;
   UNREACHABLE();
@@ -205,6 +203,8 @@
     case JS_DATE_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
+    case JS_MODULE_NAMESPACE_TYPE:
+    case JS_FIXED_ARRAY_ITERATOR_TYPE:
     case JS_ARRAY_BUFFER_TYPE:
     case JS_ARRAY_TYPE:
     case JS_REGEXP_TYPE:  // TODO(rossberg): there should be a RegExp type.
@@ -215,6 +215,43 @@
     case JS_SET_ITERATOR_TYPE:
     case JS_MAP_ITERATOR_TYPE:
     case JS_STRING_ITERATOR_TYPE:
+
+    case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
+
     case JS_WEAK_MAP_TYPE:
     case JS_WEAK_SET_TYPE:
     case JS_PROMISE_TYPE:
@@ -231,6 +268,7 @@
     case ALLOCATION_SITE_TYPE:
     case ACCESSOR_INFO_TYPE:
     case SHARED_FUNCTION_INFO_TYPE:
+    case FUNCTION_TEMPLATE_INFO_TYPE:
     case ACCESSOR_PAIR_TYPE:
     case FIXED_ARRAY_TYPE:
     case FIXED_DOUBLE_ARRAY_TYPE:
@@ -242,6 +280,7 @@
     case CODE_TYPE:
     case PROPERTY_CELL_TYPE:
     case MODULE_TYPE:
+    case MODULE_INFO_ENTRY_TYPE:
       return kOtherInternal;
 
     // Remaining instance types are unsupported for now. If any of them do
@@ -257,7 +296,6 @@
     case ACCESS_CHECK_INFO_TYPE:
     case INTERCEPTOR_INFO_TYPE:
     case CALL_HANDLER_INFO_TYPE:
-    case FUNCTION_TEMPLATE_INFO_TYPE:
     case OBJECT_TEMPLATE_INFO_TYPE:
     case SIGNATURE_INFO_TYPE:
     case TYPE_SWITCH_INFO_TYPE:
@@ -265,12 +303,14 @@
     case TYPE_FEEDBACK_INFO_TYPE:
     case ALIASED_ARGUMENTS_ENTRY_TYPE:
     case BOX_TYPE:
-    case PROMISE_CONTAINER_TYPE:
+    case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
+    case PROMISE_REACTION_JOB_INFO_TYPE:
     case DEBUG_INFO_TYPE:
     case BREAK_POINT_INFO_TYPE:
     case CELL_TYPE:
     case WEAK_CELL_TYPE:
     case PROTOTYPE_INFO_TYPE:
+    case TUPLE3_TYPE:
     case CONTEXT_EXTENSION_TYPE:
       UNREACHABLE();
       return kNone;
@@ -390,14 +430,43 @@
   return std::numeric_limits<double>::quiet_NaN();
 }
 
+// static
+bool OtherNumberConstantType::IsOtherNumberConstant(double value) {
+  // Not an integer, not NaN, and not -0.
+  return !std::isnan(value) && !Type::IsInteger(value) &&
+         !i::IsMinusZero(value);
+}
+
+// static
+bool OtherNumberConstantType::IsOtherNumberConstant(Object* value) {
+  return value->IsHeapNumber() &&
+         IsOtherNumberConstant(HeapNumber::cast(value)->value());
+}
+
+HeapConstantType::HeapConstantType(BitsetType::bitset bitset,
+                                   i::Handle<i::HeapObject> object)
+    : TypeBase(kHeapConstant), bitset_(bitset), object_(object) {
+  DCHECK(!object->IsHeapNumber());
+  DCHECK(!object->IsString());
+}
+
 // -----------------------------------------------------------------------------
 // Predicates.
 
 bool Type::SimplyEquals(Type* that) {
   DisallowHeapAllocation no_allocation;
-  if (this->IsConstant()) {
-    return that->IsConstant() &&
-           *this->AsConstant()->Value() == *that->AsConstant()->Value();
+  if (this->IsHeapConstant()) {
+    return that->IsHeapConstant() &&
+           this->AsHeapConstant()->Value().address() ==
+               that->AsHeapConstant()->Value().address();
+  }
+  if (this->IsOtherNumberConstant()) {
+    return that->IsOtherNumberConstant() &&
+           this->AsOtherNumberConstant()->Value() ==
+               that->AsOtherNumberConstant()->Value();
+  }
+  if (this->IsRange()) {
+    if (that->IsHeapConstant() || that->IsOtherNumberConstant()) return false;
   }
   if (this->IsTuple()) {
     if (!that->IsTuple()) return false;
@@ -446,9 +515,7 @@
   }
 
   if (that->IsRange()) {
-    return (this->IsRange() && Contains(that->AsRange(), this->AsRange())) ||
-           (this->IsConstant() &&
-            Contains(that->AsRange(), this->AsConstant()));
+    return (this->IsRange() && Contains(that->AsRange(), this->AsRange()));
   }
   if (this->IsRange()) return false;
 
@@ -481,9 +548,6 @@
   if (this->IsBitset() && that->IsBitset()) return true;
 
   if (this->IsRange()) {
-    if (that->IsConstant()) {
-      return Contains(this->AsRange(), that->AsConstant());
-    }
     if (that->IsRange()) {
       return Overlap(this->AsRange(), that->AsRange());
     }
@@ -673,9 +737,6 @@
       }
       return size;
     }
-    if (rhs->IsConstant() && Contains(lhs->AsRange(), rhs->AsConstant())) {
-      return AddToUnion(rhs, result, size, zone);
-    }
     if (rhs->IsRange()) {
       RangeType::Limits lim = RangeType::Limits::Intersect(
           RangeType::Limits(lhs->AsRange()), RangeType::Limits(rhs->AsRange()));
@@ -743,6 +804,40 @@
   return RangeType::New(range_min, range_max, zone);
 }
 
+Type* Type::NewConstant(double value, Zone* zone) {
+  if (IsInteger(value)) {
+    return Range(value, value, zone);
+  } else if (i::IsMinusZero(value)) {
+    return Type::MinusZero();
+  } else if (std::isnan(value)) {
+    return Type::NaN();
+  }
+
+  DCHECK(OtherNumberConstantType::IsOtherNumberConstant(value));
+  return OtherNumberConstant(value, zone);
+}
+
+Type* Type::NewConstant(i::Handle<i::Object> value, Zone* zone) {
+  if (IsInteger(*value)) {
+    double v = value->Number();
+    return Range(v, v, zone);
+  } else if (value->IsHeapNumber()) {
+    return NewConstant(value->Number(), zone);
+  } else if (value->IsString()) {
+    bitset b = BitsetType::Lub(*value);
+    DCHECK(b == BitsetType::kInternalizedString ||
+           b == BitsetType::kOtherString);
+    if (b == BitsetType::kInternalizedString) {
+      return Type::InternalizedString();
+    } else if (b == BitsetType::kOtherString) {
+      return Type::OtherString();
+    } else {
+      UNREACHABLE();
+    }
+  }
+  return HeapConstant(i::Handle<i::HeapObject>::cast(value), zone);
+}
+
 Type* Type::Union(Type* type1, Type* type2, Zone* zone) {
   // Fast case: bit sets.
   if (type1->IsBitset() && type2->IsBitset()) {
@@ -833,17 +928,14 @@
   return union_type;
 }
 
-// -----------------------------------------------------------------------------
-// Iteration.
-
 int Type::NumConstants() {
   DisallowHeapAllocation no_allocation;
-  if (this->IsConstant()) {
+  if (this->IsHeapConstant() || this->IsOtherNumberConstant()) {
     return 1;
   } else if (this->IsUnion()) {
     int result = 0;
     for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
-      if (this->AsUnion()->Get(i)->IsConstant()) ++result;
+      if (this->AsUnion()->Get(i)->IsHeapConstant()) ++result;
     }
     return result;
   } else {
@@ -905,8 +997,11 @@
   DisallowHeapAllocation no_allocation;
   if (this->IsBitset()) {
     BitsetType::Print(os, this->AsBitset());
-  } else if (this->IsConstant()) {
-    os << "Constant(" << Brief(*this->AsConstant()->Value()) << ")";
+  } else if (this->IsHeapConstant()) {
+    os << "HeapConstant(" << Brief(*this->AsHeapConstant()->Value()) << ")";
+  } else if (this->IsOtherNumberConstant()) {
+    os << "OtherNumberConstant(" << this->AsOtherNumberConstant()->Value()
+       << ")";
   } else if (this->IsRange()) {
     std::ostream::fmtflags saved_flags = os.setf(std::ios::fixed);
     std::streamsize saved_precision = os.precision(0);
diff --git a/src/compiler/types.h b/src/compiler/types.h
index ef5bec3..e783570 100644
--- a/src/compiler/types.h
+++ b/src/compiler/types.h
@@ -5,7 +5,9 @@
 #ifndef V8_COMPILER_TYPES_H_
 #define V8_COMPILER_TYPES_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/conversions.h"
+#include "src/globals.h"
 #include "src/handles.h"
 #include "src/objects.h"
 #include "src/ostreams.h"
@@ -121,6 +123,7 @@
   V(Function,            1u << 19)  \
   V(Hole,                1u << 20)  \
   V(OtherInternal,       1u << 21)  \
+  V(ExternalPointer,     1u << 22)  \
   \
   V(Signed31,                   kUnsigned30 | kNegative31) \
   V(Signed32,                   kSigned31 | kOtherUnsigned31 | kOtherSigned32) \
@@ -155,10 +158,11 @@
   V(DetectableReceiver,         kFunction | kOtherObject | kProxy) \
   V(Object,                     kFunction | kOtherObject | kOtherUndetectable) \
   V(Receiver,                   kObject | kProxy) \
+  V(ReceiverOrUndefined,        kReceiver | kUndefined) \
   V(StringOrReceiver,           kString | kReceiver) \
   V(Unique,                     kBoolean | kUniqueName | kNull | kUndefined | \
                                 kReceiver) \
-  V(Internal,                   kHole | kOtherInternal) \
+  V(Internal,                   kHole | kExternalPointer | kOtherInternal) \
   V(NonInternal,                kPrimitive | kReceiver) \
   V(NonNumber,                  kUnique | kString | kInternal) \
   V(Any,                        0xfffffffeu)
@@ -190,7 +194,7 @@
 // -----------------------------------------------------------------------------
 // Bitset types (internal).
 
-class BitsetType {
+class V8_EXPORT_PRIVATE BitsetType {
  public:
   typedef uint32_t bitset;  // Internal
 
@@ -263,7 +267,7 @@
  protected:
   friend class Type;
 
-  enum Kind { kConstant, kTuple, kUnion, kRange };
+  enum Kind { kHeapConstant, kOtherNumberConstant, kTuple, kUnion, kRange };
 
   Kind kind() const { return kind_; }
   explicit TypeBase(Kind kind) : kind_(kind) {}
@@ -287,34 +291,63 @@
 // -----------------------------------------------------------------------------
 // Constant types.
 
-class ConstantType : public TypeBase {
+class OtherNumberConstantType : public TypeBase {
  public:
-  i::Handle<i::Object> Value() { return object_; }
+  double Value() { return value_; }
+
+  static bool IsOtherNumberConstant(double value);
+  static bool IsOtherNumberConstant(Object* value);
 
  private:
   friend class Type;
   friend class BitsetType;
 
-  static Type* New(i::Handle<i::Object> value, Zone* zone) {
+  static Type* New(double value, Zone* zone) {
+    return AsType(new (zone->New(sizeof(OtherNumberConstantType)))
+                      OtherNumberConstantType(value));  // NOLINT
+  }
+
+  static OtherNumberConstantType* cast(Type* type) {
+    DCHECK(IsKind(type, kOtherNumberConstant));
+    return static_cast<OtherNumberConstantType*>(FromType(type));
+  }
+
+  explicit OtherNumberConstantType(double value)
+      : TypeBase(kOtherNumberConstant), value_(value) {
+    CHECK(IsOtherNumberConstant(value));
+  }
+
+  BitsetType::bitset Lub() { return BitsetType::kOtherNumber; }
+
+  double value_;
+};
+
+class V8_EXPORT_PRIVATE HeapConstantType : public NON_EXPORTED_BASE(TypeBase) {
+ public:
+  i::Handle<i::HeapObject> Value() { return object_; }
+
+ private:
+  friend class Type;
+  friend class BitsetType;
+
+  static Type* New(i::Handle<i::HeapObject> value, Zone* zone) {
     BitsetType::bitset bitset = BitsetType::Lub(*value);
-    return AsType(new (zone->New(sizeof(ConstantType)))
-                      ConstantType(bitset, value));
+    return AsType(new (zone->New(sizeof(HeapConstantType)))
+                      HeapConstantType(bitset, value));
   }
 
-  static ConstantType* cast(Type* type) {
-    DCHECK(IsKind(type, kConstant));
-    return static_cast<ConstantType*>(FromType(type));
+  static HeapConstantType* cast(Type* type) {
+    DCHECK(IsKind(type, kHeapConstant));
+    return static_cast<HeapConstantType*>(FromType(type));
   }
 
-  ConstantType(BitsetType::bitset bitset, i::Handle<i::Object> object)
-      : TypeBase(kConstant), bitset_(bitset), object_(object) {}
+  HeapConstantType(BitsetType::bitset bitset, i::Handle<i::HeapObject> object);
 
   BitsetType::bitset Lub() { return bitset_; }
 
   BitsetType::bitset bitset_;
-  Handle<i::Object> object_;
+  Handle<i::HeapObject> object_;
 };
-// TODO(neis): Also cache value if numerical.
 
 // -----------------------------------------------------------------------------
 // Range types.
@@ -457,7 +490,7 @@
   bool Wellformed();
 };
 
-class Type {
+class V8_EXPORT_PRIVATE Type {
  public:
   typedef BitsetType::bitset bitset;  // Internal
 
@@ -474,8 +507,11 @@
     return BitsetType::New(BitsetType::UnsignedSmall());
   }
 
-  static Type* Constant(i::Handle<i::Object> value, Zone* zone) {
-    return ConstantType::New(value, zone);
+  static Type* OtherNumberConstant(double value, Zone* zone) {
+    return OtherNumberConstantType::New(value, zone);
+  }
+  static Type* HeapConstant(i::Handle<i::HeapObject> value, Zone* zone) {
+    return HeapConstantType::New(value, zone);
   }
   static Type* Range(double min, double max, Zone* zone) {
     return RangeType::New(min, max, zone);
@@ -488,6 +524,10 @@
     return tuple;
   }
 
+  // NewConstant is a factory that returns Constant, Range or Number.
+  static Type* NewConstant(i::Handle<i::Object> value, Zone* zone);
+  static Type* NewConstant(double value, Zone* zone);
+
   static Type* Union(Type* type1, Type* type2, Zone* zone);
   static Type* Intersect(Type* type1, Type* type2, Zone* zone);
 
@@ -515,10 +555,16 @@
 
   // Inspection.
   bool IsRange() { return IsKind(TypeBase::kRange); }
-  bool IsConstant() { return IsKind(TypeBase::kConstant); }
+  bool IsHeapConstant() { return IsKind(TypeBase::kHeapConstant); }
+  bool IsOtherNumberConstant() {
+    return IsKind(TypeBase::kOtherNumberConstant);
+  }
   bool IsTuple() { return IsKind(TypeBase::kTuple); }
 
-  ConstantType* AsConstant() { return ConstantType::cast(this); }
+  HeapConstantType* AsHeapConstant() { return HeapConstantType::cast(this); }
+  OtherNumberConstantType* AsOtherNumberConstant() {
+    return OtherNumberConstantType::cast(this);
+  }
   RangeType* AsRange() { return RangeType::cast(this); }
   TupleType* AsTuple() { return TupleType::cast(this); }
 
@@ -582,7 +628,6 @@
 
   static bool Overlap(RangeType* lhs, RangeType* rhs);
   static bool Contains(RangeType* lhs, RangeType* rhs);
-  static bool Contains(RangeType* range, ConstantType* constant);
   static bool Contains(RangeType* range, i::Object* val);
 
   static int UpdateRange(Type* type, UnionType* result, int size, Zone* zone);
diff --git a/src/compiler/value-numbering-reducer.cc b/src/compiler/value-numbering-reducer.cc
index 4769cb0..30473f2 100644
--- a/src/compiler/value-numbering-reducer.cc
+++ b/src/compiler/value-numbering-reducer.cc
@@ -69,7 +69,7 @@
   }
 
   DCHECK(size_ < capacity_);
-  DCHECK(size_ * kCapacityToSizeRatio < capacity_);
+  DCHECK(size_ + size_ / 4 < capacity_);
 
   const size_t mask = capacity_ - 1;
   size_t dead = capacity_;
@@ -85,10 +85,10 @@
         entries_[i] = node;
         size_++;
 
-        // Resize to keep load factor below 1/kCapacityToSizeRatio.
-        if (size_ * kCapacityToSizeRatio >= capacity_) Grow();
+        // Resize to keep load factor below 80%
+        if (size_ + size_ / 4 >= capacity_) Grow();
       }
-      DCHECK(size_ * kCapacityToSizeRatio < capacity_);
+      DCHECK(size_ + size_ / 4 < capacity_);
       return NoChange();
     }
 
@@ -112,10 +112,31 @@
         if (entry->IsDead()) {
           continue;
         }
+        if (entry == node) {
+          // Collision with ourselves, doesn't count as a real collision.
+          // Opportunistically clean-up the duplicate entry if we're at the end
+          // of a bucket.
+          if (!entries_[(j + 1) & mask]) {
+            entries_[j] = nullptr;
+            size_--;
+            return NoChange();
+          }
+          // Otherwise, keep searching for another collision.
+          continue;
+        }
         if (Equals(entry, node)) {
-          // Overwrite the colliding entry with the actual entry.
-          entries_[i] = entry;
-          return Replace(entry);
+          Reduction reduction = ReplaceIfTypesMatch(node, entry);
+          if (reduction.Changed()) {
+            // Overwrite the colliding entry with the actual entry.
+            entries_[i] = entry;
+            // Opportunistically clean-up the duplicate entry if we're at the
+            // end of a bucket.
+            if (!entries_[(j + 1) & mask]) {
+              entries_[j] = nullptr;
+              size_--;
+            }
+          }
+          return reduction;
         }
       }
     }
@@ -126,37 +147,40 @@
       continue;
     }
     if (Equals(entry, node)) {
-      // Make sure the replacement has at least as good type as the original
-      // node.
-      if (NodeProperties::IsTyped(entry) && NodeProperties::IsTyped(node)) {
-        Type* entry_type = NodeProperties::GetType(entry);
-        Type* node_type = NodeProperties::GetType(node);
-        if (!entry_type->Is(node_type)) {
-          // Ideally, we would set an intersection of {entry_type} and
-          // {node_type} here. However, typing of NumberConstants assigns
-          // different types to constants with the same value (it creates
-          // a fresh heap number), which would make the intersection empty.
-          // To be safe, we use the smaller type if the types are comparable.
-          if (node_type->Is(entry_type)) {
-            NodeProperties::SetType(entry, node_type);
-          } else {
-            // Types are not comparable => do not replace.
-            return NoChange();
-          }
-        }
-      }
-      return Replace(entry);
+      return ReplaceIfTypesMatch(node, entry);
     }
   }
 }
 
+Reduction ValueNumberingReducer::ReplaceIfTypesMatch(Node* node,
+                                                     Node* replacement) {
+  // Make sure the replacement has at least as good type as the original node.
+  if (NodeProperties::IsTyped(replacement) && NodeProperties::IsTyped(node)) {
+    Type* replacement_type = NodeProperties::GetType(replacement);
+    Type* node_type = NodeProperties::GetType(node);
+    if (!replacement_type->Is(node_type)) {
+      // Ideally, we would set an intersection of {replacement_type} and
+      // {node_type} here. However, typing of NumberConstants assigns different
+      // types to constants with the same value (it creates a fresh heap
+      // number), which would make the intersection empty. To be safe, we use
+      // the smaller type if the types are comparable.
+      if (node_type->Is(replacement_type)) {
+        NodeProperties::SetType(replacement, node_type);
+      } else {
+        // Types are not comparable => do not replace.
+        return NoChange();
+      }
+    }
+  }
+  return Replace(replacement);
+}
+
 
 void ValueNumberingReducer::Grow() {
-  // Allocate a new block of entries kCapacityToSizeRatio times the previous
-  // capacity.
+  // Allocate a new block of entries double the previous capacity.
   Node** const old_entries = entries_;
   size_t const old_capacity = capacity_;
-  capacity_ *= kCapacityToSizeRatio;
+  capacity_ *= 2;
   entries_ = temp_zone()->NewArray<Node*>(capacity_);
   memset(entries_, 0, sizeof(*entries_) * capacity_);
   size_ = 0;
diff --git a/src/compiler/value-numbering-reducer.h b/src/compiler/value-numbering-reducer.h
index f700c85..521ce59 100644
--- a/src/compiler/value-numbering-reducer.h
+++ b/src/compiler/value-numbering-reducer.h
@@ -5,13 +5,16 @@
 #ifndef V8_COMPILER_VALUE_NUMBERING_REDUCER_H_
 #define V8_COMPILER_VALUE_NUMBERING_REDUCER_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-class ValueNumberingReducer final : public Reducer {
+class V8_EXPORT_PRIVATE ValueNumberingReducer final
+    : public NON_EXPORTED_BASE(Reducer) {
  public:
   explicit ValueNumberingReducer(Zone* temp_zone, Zone* graph_zone);
   ~ValueNumberingReducer();
@@ -19,8 +22,9 @@
   Reduction Reduce(Node* node) override;
 
  private:
-  enum { kInitialCapacity = 256u, kCapacityToSizeRatio = 2u };
+  enum { kInitialCapacity = 256u };
 
+  Reduction ReplaceIfTypesMatch(Node* node, Node* replacement);
   void Grow();
   Zone* temp_zone() const { return temp_zone_; }
   Zone* graph_zone() const { return graph_zone_; }
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
index b9faeee..872305b 100644
--- a/src/compiler/verifier.cc
+++ b/src/compiler/verifier.cc
@@ -333,40 +333,35 @@
       CheckTypeIs(node, Type::Any());
       break;
     }
-    case IrOpcode::kInt32Constant:  // TODO(rossberg): rename Word32Constant?
-      // Constants have no inputs.
-      CHECK_EQ(0, input_count);
-      // Type is a 32 bit integer, signed or unsigned.
-      CheckTypeIs(node, Type::Integral32());
-      break;
-    case IrOpcode::kInt64Constant:
-      // Constants have no inputs.
-      CHECK_EQ(0, input_count);
-      // Type is internal.
-      // TODO(rossberg): Introduce proper Int64 type.
-      CheckTypeIs(node, Type::Internal());
-      break;
+    case IrOpcode::kInt32Constant:  // TODO(turbofan): rename Word32Constant?
+    case IrOpcode::kInt64Constant:  // TODO(turbofan): rename Word64Constant?
     case IrOpcode::kFloat32Constant:
     case IrOpcode::kFloat64Constant:
+    case IrOpcode::kRelocatableInt32Constant:
+    case IrOpcode::kRelocatableInt64Constant:
+      // Constants have no inputs.
+      CHECK_EQ(0, input_count);
+      // Type is empty.
+      CheckNotTyped(node);
+      break;
     case IrOpcode::kNumberConstant:
       // Constants have no inputs.
       CHECK_EQ(0, input_count);
       // Type is a number.
       CheckTypeIs(node, Type::Number());
       break;
-    case IrOpcode::kRelocatableInt32Constant:
-    case IrOpcode::kRelocatableInt64Constant:
-      CHECK_EQ(0, input_count);
-      break;
     case IrOpcode::kHeapConstant:
       // Constants have no inputs.
       CHECK_EQ(0, input_count);
+      // Type is anything.
+      CheckTypeIs(node, Type::Any());
       break;
     case IrOpcode::kExternalConstant:
+    case IrOpcode::kPointerConstant:
       // Constants have no inputs.
       CHECK_EQ(0, input_count);
-      // Type is considered internal.
-      CheckTypeIs(node, Type::Internal());
+      // Type is an external pointer.
+      CheckTypeIs(node, Type::ExternalPointer());
       break;
     case IrOpcode::kOsrValue:
       // OSR values have a value and a control input.
@@ -375,6 +370,23 @@
       // Type is merged from other values in the graph and could be any.
       CheckTypeIs(node, Type::Any());
       break;
+    case IrOpcode::kOsrGuard:
+      // OSR values have a value and a control input.
+      CHECK_EQ(1, value_count);
+      CHECK_EQ(1, effect_count);
+      CHECK_EQ(1, control_count);
+      switch (OsrGuardTypeOf(node->op())) {
+        case OsrGuardType::kUninitialized:
+          CheckTypeIs(node, Type::None());
+          break;
+        case OsrGuardType::kSignedSmall:
+          CheckTypeIs(node, Type::SignedSmall());
+          break;
+        case OsrGuardType::kAny:
+          CheckTypeIs(node, Type::Any());
+          break;
+      }
+      break;
     case IrOpcode::kProjection: {
       // Projection has an input that produces enough values.
       int index = static_cast<int>(ProjectionIndexOf(node->op()));
@@ -471,8 +483,9 @@
       break;
     }
     case IrOpcode::kStateValues:
-    case IrOpcode::kObjectState:
     case IrOpcode::kTypedStateValues:
+    case IrOpcode::kObjectState:
+    case IrOpcode::kTypedObjectState:
       // TODO(jarin): what are the constraints on these?
       break;
     case IrOpcode::kCall:
@@ -566,6 +579,10 @@
       // Type is OtherObject.
       CheckTypeIs(node, Type::OtherObject());
       break;
+    case IrOpcode::kJSCreateKeyValueArray:
+      // Type is OtherObject.
+      CheckTypeIs(node, Type::OtherObject());
+      break;
     case IrOpcode::kJSCreateLiteralArray:
     case IrOpcode::kJSCreateLiteralObject:
     case IrOpcode::kJSCreateLiteralRegExp:
@@ -643,6 +660,13 @@
     case IrOpcode::kJSStoreMessage:
       break;
 
+    case IrOpcode::kJSLoadModule:
+      CheckTypeIs(node, Type::Any());
+      break;
+    case IrOpcode::kJSStoreModule:
+      CheckNotTyped(node);
+      break;
+
     case IrOpcode::kJSGeneratorStore:
       CheckNotTyped(node);
       break;
@@ -809,6 +833,7 @@
       CheckTypeIs(node, Type::Signed32());
       break;
     case IrOpcode::kNumberToUint32:
+    case IrOpcode::kNumberToUint8Clamped:
       // Number -> Unsigned32
       CheckValueInputIs(node, 0, Type::Number());
       CheckTypeIs(node, Type::Unsigned32());
@@ -972,6 +997,8 @@
       // CheckTypeIs(node, to));
       break;
     }
+    case IrOpcode::kChangeFloat64ToTaggedPointer:
+      break;
     case IrOpcode::kChangeTaggedToBit: {
       // Boolean /\ TaggedPtr -> Boolean /\ UntaggedInt1
       // TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -1049,6 +1076,7 @@
     case IrOpcode::kCheckedTaggedToInt32:
     case IrOpcode::kCheckedTaggedToFloat64:
     case IrOpcode::kCheckedTaggedToTaggedSigned:
+    case IrOpcode::kCheckedTaggedToTaggedPointer:
     case IrOpcode::kCheckedTruncateTaggedToWord32:
       break;
 
@@ -1288,7 +1316,7 @@
 void Verifier::Run(Graph* graph, Typing typing, CheckInputs check_inputs) {
   CHECK_NOT_NULL(graph->start());
   CHECK_NOT_NULL(graph->end());
-  Zone zone(graph->zone()->allocator());
+  Zone zone(graph->zone()->allocator(), ZONE_NAME);
   Visitor visitor(&zone, typing, check_inputs);
   AllNodes all(&zone, graph);
   for (Node* node : all.reachable) visitor.Check(node);
@@ -1378,7 +1406,7 @@
 
 void ScheduleVerifier::Run(Schedule* schedule) {
   const size_t count = schedule->BasicBlockCount();
-  Zone tmp_zone(schedule->zone()->allocator());
+  Zone tmp_zone(schedule->zone()->allocator(), ZONE_NAME);
   Zone* zone = &tmp_zone;
   BasicBlock* start = schedule->start();
   BasicBlockVector* rpo_order = schedule->rpo_order();
diff --git a/src/compiler/verifier.h b/src/compiler/verifier.h
index 60849e0..db0f453 100644
--- a/src/compiler/verifier.h
+++ b/src/compiler/verifier.h
@@ -6,6 +6,7 @@
 #define V8_COMPILER_VERIFIER_H_
 
 #include "src/base/macros.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -54,7 +55,7 @@
 };
 
 // Verifies properties of a schedule, such as dominance, phi placement, etc.
-class ScheduleVerifier {
+class V8_EXPORT_PRIVATE ScheduleVerifier {
  public:
   static void Run(Schedule* schedule);
 };
diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc
index b003e99..1b61c15 100644
--- a/src/compiler/wasm-compiler.cc
+++ b/src/compiler/wasm-compiler.cc
@@ -13,6 +13,7 @@
 
 #include "src/compiler/access-builder.h"
 #include "src/compiler/common-operator.h"
+#include "src/compiler/compiler-source-position-table.h"
 #include "src/compiler/diamond.h"
 #include "src/compiler/graph-visualizer.h"
 #include "src/compiler/graph.h"
@@ -24,8 +25,8 @@
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/pipeline.h"
-#include "src/compiler/source-position.h"
-#include "src/compiler/zone-pool.h"
+#include "src/compiler/simd-scalar-lowering.h"
+#include "src/compiler/zone-stats.h"
 
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
@@ -206,6 +207,9 @@
       case wasm::kAstF64:
         return jsgraph()->Float64Constant(bit_cast<double>(0xdeadbeefdeadbeef));
         break;
+      case wasm::kAstS128:
+        return builder_->CreateS128Value(0xdeadbeef);
+        break;
       default:
         UNREACHABLE();
         return nullptr;
@@ -277,7 +281,8 @@
     } else {
       // End the control flow with returning 0xdeadbeef
       Node* ret_value = GetTrapValue(builder_->GetFunctionSignature());
-      end = graph()->NewNode(jsgraph()->common()->Return(), ret_value,
+      end = graph()->NewNode(jsgraph()->common()->Return(),
+                             jsgraph()->Int32Constant(0), ret_value,
                              *effect_ptr, *control_ptr);
     }
 
@@ -294,6 +299,7 @@
       mem_buffer_(nullptr),
       mem_size_(nullptr),
       function_tables_(zone),
+      function_table_sizes_(zone),
       control_(nullptr),
       effect_(nullptr),
       cur_buffer_(def_buffer_),
@@ -404,37 +410,44 @@
   return jsgraph()->Int64Constant(value);
 }
 
-void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
+void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
+                                  Node** effect, Node** control) {
+  if (effect == nullptr) {
+    effect = effect_;
+  }
+  if (control == nullptr) {
+    control = control_;
+  }
   // We do not generate stack checks for cctests.
   if (module_ && !module_->instance->context.is_null()) {
     Node* limit = graph()->NewNode(
         jsgraph()->machine()->Load(MachineType::Pointer()),
         jsgraph()->ExternalConstant(
             ExternalReference::address_of_stack_limit(jsgraph()->isolate())),
-        jsgraph()->IntPtrConstant(0), *effect_, *control_);
+        jsgraph()->IntPtrConstant(0), *effect, *control);
     Node* pointer = graph()->NewNode(jsgraph()->machine()->LoadStackPointer());
 
     Node* check =
         graph()->NewNode(jsgraph()->machine()->UintLessThan(), limit, pointer);
 
     Diamond stack_check(graph(), jsgraph()->common(), check, BranchHint::kTrue);
-
-    Node* effect_true = *effect_;
+    stack_check.Chain(*control);
+    Node* effect_true = *effect;
 
     Node* effect_false;
     // Generate a call to the runtime if there is a stack check failure.
     {
       Node* node = BuildCallToRuntime(Runtime::kStackGuard, jsgraph(),
                                       module_->instance->context, nullptr, 0,
-                                      effect_, stack_check.if_false);
+                                      effect, stack_check.if_false);
       effect_false = node;
     }
 
     Node* ephi = graph()->NewNode(jsgraph()->common()->EffectPhi(2),
                                   effect_true, effect_false, stack_check.merge);
 
-    *control_ = stack_check.merge;
-    *effect_ = ephi;
+    *control = stack_check.merge;
+    *effect = ephi;
   }
 }
 
@@ -980,16 +993,36 @@
   return jsgraph()->HeapConstant(value);
 }
 
-Node* WasmGraphBuilder::Branch(Node* cond, Node** true_node,
-                               Node** false_node) {
+namespace {
+Node* Branch(JSGraph* jsgraph, Node* cond, Node** true_node, Node** false_node,
+             Node* control, BranchHint hint) {
   DCHECK_NOT_NULL(cond);
-  DCHECK_NOT_NULL(*control_);
+  DCHECK_NOT_NULL(control);
   Node* branch =
-      graph()->NewNode(jsgraph()->common()->Branch(), cond, *control_);
-  *true_node = graph()->NewNode(jsgraph()->common()->IfTrue(), branch);
-  *false_node = graph()->NewNode(jsgraph()->common()->IfFalse(), branch);
+      jsgraph->graph()->NewNode(jsgraph->common()->Branch(hint), cond, control);
+  *true_node = jsgraph->graph()->NewNode(jsgraph->common()->IfTrue(), branch);
+  *false_node = jsgraph->graph()->NewNode(jsgraph->common()->IfFalse(), branch);
   return branch;
 }
+}  // namespace
+
+Node* WasmGraphBuilder::BranchNoHint(Node* cond, Node** true_node,
+                                     Node** false_node) {
+  return Branch(jsgraph(), cond, true_node, false_node, *control_,
+                BranchHint::kNone);
+}
+
+Node* WasmGraphBuilder::BranchExpectTrue(Node* cond, Node** true_node,
+                                         Node** false_node) {
+  return Branch(jsgraph(), cond, true_node, false_node, *control_,
+                BranchHint::kTrue);
+}
+
+Node* WasmGraphBuilder::BranchExpectFalse(Node* cond, Node** true_node,
+                                          Node** false_node) {
+  return Branch(jsgraph(), cond, true_node, false_node, *control_,
+                BranchHint::kFalse);
+}
 
 Node* WasmGraphBuilder::Switch(unsigned count, Node* key) {
   return graph()->NewNode(jsgraph()->common()->Switch(count), key, *control_);
@@ -1009,11 +1042,13 @@
   DCHECK_NOT_NULL(*control_);
   DCHECK_NOT_NULL(*effect_);
 
-  Node** buf = Realloc(vals, count, count + 2);
-  buf[count] = *effect_;
-  buf[count + 1] = *control_;
+  Node** buf = Realloc(vals, count, count + 3);
+  memmove(buf + 1, buf, sizeof(void*) * count);
+  buf[0] = jsgraph()->Int32Constant(0);
+  buf[count + 1] = *effect_;
+  buf[count + 2] = *control_;
   Node* ret =
-      graph()->NewNode(jsgraph()->common()->Return(count), count + 2, vals);
+      graph()->NewNode(jsgraph()->common()->Return(count), count + 3, buf);
 
   MergeControlToEnd(jsgraph(), ret);
   return ret;
@@ -1681,7 +1716,7 @@
       graph(), jsgraph()->common(),
       graph()->NewNode(
           jsgraph()->machine()->Uint32LessThanOrEqual(), input,
-          jsgraph()->Uint32Constant(wasm::WasmModule::kMaxMemPages)),
+          jsgraph()->Uint32Constant(wasm::WasmModule::kV8MaxPages)),
       BranchHint::kTrue);
 
   check_input_range.Chain(*control_);
@@ -1748,7 +1783,7 @@
 
   Node* is_smi;
   Node* is_heap;
-  Branch(BuildTestNotSmi(value), &is_heap, &is_smi);
+  BranchExpectFalse(BuildTestNotSmi(value), &is_heap, &is_smi);
 
   // is_smi
   Node* smi_i32 = BuildChangeSmiToInt32(value);
@@ -1788,7 +1823,7 @@
   Node* before = *control_;
   Node* denom_is_m1;
   Node* denom_is_not_m1;
-  Branch(
+  BranchExpectFalse(
       graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
       &denom_is_m1, &denom_is_not_m1);
   *control_ = denom_is_m1;
@@ -1836,6 +1871,18 @@
 
 Node* WasmGraphBuilder::BuildI32AsmjsDivS(Node* left, Node* right) {
   MachineOperatorBuilder* m = jsgraph()->machine();
+
+  Int32Matcher mr(right);
+  if (mr.HasValue()) {
+    if (mr.Value() == 0) {
+      return jsgraph()->Int32Constant(0);
+    } else if (mr.Value() == -1) {
+      // The result is the negation of the left input.
+      return graph()->NewNode(m->Int32Sub(), jsgraph()->Int32Constant(0), left);
+    }
+    return graph()->NewNode(m->Int32Div(), left, right, *control_);
+  }
+
   // asm.js semantics return 0 on divide or mod by zero.
   if (m->Int32DivIsSafe()) {
     // The hardware instruction does the right thing (e.g. arm).
@@ -1865,6 +1912,17 @@
 
 Node* WasmGraphBuilder::BuildI32AsmjsRemS(Node* left, Node* right) {
   MachineOperatorBuilder* m = jsgraph()->machine();
+
+  Int32Matcher mr(right);
+  if (mr.HasValue()) {
+    if (mr.Value() == 0) {
+      return jsgraph()->Int32Constant(0);
+    } else if (mr.Value() == -1) {
+      return jsgraph()->Int32Constant(0);
+    }
+    return graph()->NewNode(m->Int32Mod(), left, right, *control_);
+  }
+
   // asm.js semantics return 0 on divide or mod by zero.
   // Explicit check for x % 0.
   Diamond z(
@@ -1930,9 +1988,9 @@
   Node* before = *control_;
   Node* denom_is_m1;
   Node* denom_is_not_m1;
-  Branch(graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
-                          jsgraph()->Int64Constant(-1)),
-         &denom_is_m1, &denom_is_not_m1);
+  BranchExpectFalse(graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
+                                     jsgraph()->Int64Constant(-1)),
+                    &denom_is_m1, &denom_is_not_m1);
   *control_ = denom_is_m1;
   trap_->TrapIfEq64(wasm::kTrapDivUnrepresentable, left,
                     std::numeric_limits<int64_t>::min(), position);
@@ -2100,37 +2158,27 @@
   return BuildWasmCall(sig, args, rets, position);
 }
 
-Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args, Node*** rets,
+Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
+                                     Node*** rets,
                                      wasm::WasmCodePosition position) {
   DCHECK_NOT_NULL(args[0]);
   DCHECK(module_ && module_->instance);
 
-  MachineOperatorBuilder* machine = jsgraph()->machine();
+  // Assume only one table for now.
+  uint32_t table_index = 0;
+  wasm::FunctionSig* sig = module_->GetSignature(sig_index);
 
-  // Compute the code object by loading it from the function table.
+  DCHECK(module_->IsValidTable(table_index));
+
+  EnsureFunctionTableNodes();
+  MachineOperatorBuilder* machine = jsgraph()->machine();
   Node* key = args[0];
 
-  // Assume only one table for now.
-  DCHECK_LE(module_->instance->function_tables.size(), 1u);
-  // Bounds check the index.
-  uint32_t table_size =
-      module_->IsValidTable(0) ? module_->GetTable(0)->max_size : 0;
-  wasm::FunctionSig* sig = module_->GetSignature(index);
-  if (table_size > 0) {
-    // Bounds check against the table size.
-    Node* size = Uint32Constant(table_size);
-    Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
-    trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
-  } else {
-    // No function table. Generate a trap and return a constant.
-    trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, Int32Constant(0), position);
-    (*rets) = Buffer(sig->return_count());
-    for (size_t i = 0; i < sig->return_count(); i++) {
-      (*rets)[i] = trap_->GetTrapValue(sig->GetReturn(i));
-    }
-    return trap_->GetTrapValue(sig);
-  }
-  Node* table = FunctionTable(0);
+  // Bounds check against the table size.
+  Node* size = function_table_sizes_[table_index];
+  Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
+  trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
+  Node* table = function_tables_[table_index];
 
   // Load signature from the table and check.
   // The table is a FixedArray; signatures are encoded as SMIs.
@@ -2145,13 +2193,16 @@
                                           Int32Constant(kPointerSizeLog2)),
                          Int32Constant(fixed_offset)),
         *effect_, *control_);
-    Node* sig_match =
-        graph()->NewNode(machine->Word32Equal(),
-                         BuildChangeSmiToInt32(load_sig), Int32Constant(index));
+    auto map = const_cast<wasm::SignatureMap&>(
+        module_->module->function_tables[0].map);
+    Node* sig_match = graph()->NewNode(
+        machine->WordEqual(), load_sig,
+        jsgraph()->SmiConstant(static_cast<int>(map.FindOrInsert(sig))));
     trap_->AddTrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
   }
 
   // Load code object from the table.
+  uint32_t table_size = module_->module->function_tables[table_index].min_size;
   uint32_t offset = fixed_offset + kPointerSize * table_size;
   Node* load_code = graph()->NewNode(
       machine->Load(MachineType::AnyTagged()), table,
@@ -2295,6 +2346,7 @@
   switch (type) {
     case wasm::kAstI32:
       return BuildChangeInt32ToTagged(node);
+    case wasm::kAstS128:
     case wasm::kAstI64:
       // Throw a TypeError. The native context is good enough here because we
       // only throw a TypeError.
@@ -2457,6 +2509,7 @@
                              num);
       break;
     }
+    case wasm::kAstS128:
     case wasm::kAstI64:
       // Throw a TypeError. The native context is good enough here because we
       // only throw a TypeError.
@@ -2617,8 +2670,8 @@
   }
   Node* jsval = ToJS(
       retval, sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
-  Node* ret =
-      graph()->NewNode(jsgraph()->common()->Return(), jsval, call, start);
+  Node* ret = graph()->NewNode(jsgraph()->common()->Return(),
+                               jsgraph()->Int32Constant(0), jsval, call, start);
 
   MergeControlToEnd(jsgraph(), ret);
 }
@@ -2729,14 +2782,16 @@
   Node* val =
       FromJS(call, HeapConstant(isolate->native_context()),
              sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
+  Node* pop_size = jsgraph()->Int32Constant(0);
   if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
       sig->GetReturn() == wasm::kAstI64) {
-    ret = graph()->NewNode(jsgraph()->common()->Return(), val,
+    ret = graph()->NewNode(jsgraph()->common()->Return(), pop_size, val,
                            graph()->NewNode(jsgraph()->machine()->Word32Sar(),
                                             val, jsgraph()->Int32Constant(31)),
                            call, start);
   } else {
-    ret = graph()->NewNode(jsgraph()->common()->Return(), val, call, start);
+    ret = graph()->NewNode(jsgraph()->common()->Return(), pop_size, val, call,
+                           start);
   }
 
   MergeControlToEnd(jsgraph(), ret);
@@ -2796,17 +2851,15 @@
   }
 }
 
-Node* WasmGraphBuilder::FunctionTable(uint32_t index) {
-  DCHECK(module_ && module_->instance &&
-         index < module_->instance->function_tables.size());
-  if (!function_tables_.size()) {
-    for (size_t i = 0; i < module_->instance->function_tables.size(); ++i) {
-      DCHECK(!module_->instance->function_tables[i].is_null());
-      function_tables_.push_back(
-          HeapConstant(module_->instance->function_tables[i]));
-    }
+void WasmGraphBuilder::EnsureFunctionTableNodes() {
+  if (function_tables_.size() > 0) return;
+  for (size_t i = 0; i < module_->instance->function_tables.size(); ++i) {
+    auto handle = module_->instance->function_tables[i];
+    DCHECK(!handle.is_null());
+    function_tables_.push_back(HeapConstant(handle));
+    uint32_t table_size = module_->module->function_tables[i].min_size;
+    function_table_sizes_.push_back(Uint32Constant(table_size));
   }
-  return function_tables_[index];
 }
 
 Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
@@ -2845,25 +2898,32 @@
   uint32_t size = module_->instance->mem_size;
   byte memsize = wasm::WasmOpcodes::MemSize(memtype);
 
-  // Check against the effective size.
   size_t effective_size;
-  if (size == 0) {
-    effective_size = 0;
-  } else if (offset >= size ||
-             (static_cast<uint64_t>(offset) + memsize) > size) {
+  if (size <= offset || size < (static_cast<uint64_t>(offset) + memsize)) {
     // Two checks are needed in the case where the offset is statically
     // out of bounds; one check for the offset being in bounds, and the next for
     // the offset + index being out of bounds for code to be patched correctly
     // on relocation.
-    effective_size = size - memsize + 1;
+
+    // Check for overflows.
+    if ((std::numeric_limits<uint32_t>::max() - memsize) + 1 < offset) {
+      // Always trap. Do not use TrapAlways because it does not create a valid
+      // graph here.
+      trap_->TrapIfEq32(wasm::kTrapMemOutOfBounds, jsgraph()->Int32Constant(0),
+                        0, position);
+      return;
+    }
+    size_t effective_offset = (offset - 1) + memsize;
+
     Node* cond = graph()->NewNode(jsgraph()->machine()->Uint32LessThan(),
-                                  jsgraph()->IntPtrConstant(offset),
+                                  jsgraph()->IntPtrConstant(effective_offset),
                                   jsgraph()->RelocatableInt32Constant(
-                                      static_cast<uint32_t>(effective_size),
+                                      static_cast<uint32_t>(size),
                                       RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
     trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
-    DCHECK(offset >= effective_size);
-    effective_size = offset - effective_size;
+    // For offset > effective size, this relies on check above to fail and
+    // effective size can be negative, relies on wrap around.
+    effective_size = size - offset - memsize + 1;
   } else {
     effective_size = size - offset - memsize + 1;
     CHECK(effective_size <= kMaxUInt32);
@@ -3016,20 +3076,26 @@
   }
 }
 
+void WasmGraphBuilder::SimdScalarLoweringForTesting() {
+  SimdScalarLowering(jsgraph()->graph(), jsgraph()->machine(),
+                     jsgraph()->common(), jsgraph()->zone(),
+                     function_signature_)
+      .LowerGraph();
+}
+
 void WasmGraphBuilder::SetSourcePosition(Node* node,
                                          wasm::WasmCodePosition position) {
   DCHECK_NE(position, wasm::kNoCodePosition);
-  compiler::SourcePosition pos(position);
   if (source_position_table_)
-    source_position_table_->SetSourcePosition(node, pos);
+    source_position_table_->SetSourcePosition(node, SourcePosition(position));
 }
 
-Node* WasmGraphBuilder::DefaultS128Value() {
+Node* WasmGraphBuilder::CreateS128Value(int32_t value) {
   // TODO(gdeepti): Introduce Simd128Constant to common-operator.h and use
   // instead of creating a SIMD Value.
   return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(),
-                          Int32Constant(0), Int32Constant(0), Int32Constant(0),
-                          Int32Constant(0));
+                          Int32Constant(value), Int32Constant(value),
+                          Int32Constant(value), Int32Constant(value));
 }
 
 Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
@@ -3038,6 +3104,18 @@
     case wasm::kExprI32x4Splat:
       return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(), inputs[0],
                               inputs[0], inputs[0], inputs[0]);
+    case wasm::kExprI32x4Add:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4Add(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprF32x4ExtractLane:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4ExtractLane(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprF32x4Splat:
+      return graph()->NewNode(jsgraph()->machine()->CreateFloat32x4(),
+                              inputs[0], inputs[0], inputs[0], inputs[0]);
+    case wasm::kExprF32x4Add:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4Add(), inputs[0],
+                              inputs[1]);
     default:
       return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
   }
@@ -3049,6 +3127,9 @@
     case wasm::kExprI32x4ExtractLane:
       return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(), input,
                               Int32Constant(lane));
+    case wasm::kExprF32x4ExtractLane:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4ExtractLane(),
+                              input, Int32Constant(lane));
     default:
       return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
   }
@@ -3082,7 +3163,7 @@
   //----------------------------------------------------------------------------
   // Create the Graph
   //----------------------------------------------------------------------------
-  Zone zone(isolate->allocator());
+  Zone zone(isolate->allocator(), ZONE_NAME);
   Graph graph(&zone);
   CommonOperatorBuilder common(&zone);
   MachineOperatorBuilder machine(&zone);
@@ -3151,12 +3232,12 @@
 
 Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
                                     wasm::FunctionSig* sig, uint32_t index,
-                                    Handle<String> import_module,
-                                    MaybeHandle<String> import_function) {
+                                    Handle<String> module_name,
+                                    MaybeHandle<String> import_name) {
   //----------------------------------------------------------------------------
   // Create the Graph
   //----------------------------------------------------------------------------
-  Zone zone(isolate->allocator());
+  Zone zone(isolate->allocator(), ZONE_NAME);
   Graph graph(&zone);
   CommonOperatorBuilder common(&zone);
   MachineOperatorBuilder machine(&zone);
@@ -3215,14 +3296,14 @@
   if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
     const char* function_name = nullptr;
     int function_name_size = 0;
-    if (!import_function.is_null()) {
-      Handle<String> handle = import_function.ToHandleChecked();
+    if (!import_name.is_null()) {
+      Handle<String> handle = import_name.ToHandleChecked();
       function_name = handle->ToCString().get();
       function_name_size = handle->length();
     }
     RecordFunctionCompilation(
         CodeEventListener::FUNCTION_TAG, isolate, code, "wasm-to-js", index,
-        {import_module->ToCString().get(), import_module->length()},
+        {module_name->ToCString().get(), module_name->length()},
         {function_name, function_name_size});
   }
 
@@ -3264,6 +3345,9 @@
     r.LowerGraph();
   }
 
+  SimdScalarLowering(graph, machine, common, jsgraph_->zone(), function_->sig)
+      .LowerGraph();
+
   int index = static_cast<int>(function_->func_index);
 
   if (index >= FLAG_trace_wasm_ast_start && index < FLAG_trace_wasm_ast_end) {
@@ -3285,7 +3369,7 @@
       isolate_(isolate),
       module_env_(module_env),
       function_(function),
-      graph_zone_(new Zone(isolate->allocator())),
+      graph_zone_(new Zone(isolate->allocator(), ZONE_NAME)),
       jsgraph_(new (graph_zone()) JSGraph(
           isolate, new (graph_zone()) Graph(graph_zone()),
           new (graph_zone()) CommonOperatorBuilder(graph_zone()), nullptr,
@@ -3293,7 +3377,7 @@
                        graph_zone(), MachineType::PointerRepresentation(),
                        InstructionSelector::SupportedMachineOperatorFlags(),
                        InstructionSelector::AlignmentRequirements()))),
-      compilation_zone_(isolate->allocator()),
+      compilation_zone_(isolate->allocator(), ZONE_NAME),
       info_(function->name_length != 0
                 ? module_env->module->GetNameOrNull(function->name_offset,
                                                     function->name_length)
@@ -3371,7 +3455,7 @@
           function_->name_offset, function_->name_length);
       SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
                function_->func_index, name.length(), name.start());
-      thrower_->Failed(buffer.start(), graph_construction_result_);
+      thrower_->CompileFailed(buffer.start(), graph_construction_result_);
     }
 
     return Handle<Code>::null();
diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h
index c980a87..b4bc350 100644
--- a/src/compiler/wasm-compiler.h
+++ b/src/compiler/wasm-compiler.h
@@ -84,8 +84,8 @@
 // Wraps a JS function, producing a code object that can be called from WASM.
 Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
                                     wasm::FunctionSig* sig, uint32_t index,
-                                    Handle<String> import_module,
-                                    MaybeHandle<String> import_function);
+                                    Handle<String> module_name,
+                                    MaybeHandle<String> import_name);
 
 // Wraps a given wasm code object, producing a code object.
 Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
@@ -142,12 +142,16 @@
   void AppendToMerge(Node* merge, Node* from);
   void AppendToPhi(Node* phi, Node* from);
 
-  void StackCheck(wasm::WasmCodePosition position);
+  void StackCheck(wasm::WasmCodePosition position, Node** effect = nullptr,
+                  Node** control = nullptr);
 
   //-----------------------------------------------------------------------
   // Operations that read and/or write {control} and {effect}.
   //-----------------------------------------------------------------------
-  Node* Branch(Node* cond, Node** true_node, Node** false_node);
+  Node* BranchNoHint(Node* cond, Node** true_node, Node** false_node);
+  Node* BranchExpectTrue(Node* cond, Node** true_node, Node** false_node);
+  Node* BranchExpectFalse(Node* cond, Node** true_node, Node** false_node);
+
   Node* Switch(unsigned count, Node* key);
   Node* IfValue(int32_t value, Node* sw);
   Node* IfDefault(Node* sw);
@@ -166,7 +170,7 @@
   Node* ToJS(Node* node, wasm::LocalType type);
   Node* FromJS(Node* node, Node* context, wasm::LocalType type);
   Node* Invert(Node* node);
-  Node* FunctionTable(uint32_t index);
+  void EnsureFunctionTableNodes();
 
   //-----------------------------------------------------------------------
   // Operations that concern the linear memory.
@@ -196,9 +200,11 @@
 
   void Int64LoweringForTesting();
 
+  void SimdScalarLoweringForTesting();
+
   void SetSourcePosition(Node* node, wasm::WasmCodePosition position);
 
-  Node* DefaultS128Value();
+  Node* CreateS128Value(int32_t value);
 
   Node* SimdOp(wasm::WasmOpcode opcode, const NodeVector& inputs);
   Node* SimdExtractLane(wasm::WasmOpcode opcode, uint8_t lane, Node* input);
@@ -213,6 +219,7 @@
   Node* mem_buffer_;
   Node* mem_size_;
   NodeVector function_tables_;
+  NodeVector function_table_sizes_;
   Node** control_;
   Node** effect_;
   Node** cur_buffer_;
diff --git a/src/compiler/wasm-linkage.cc b/src/compiler/wasm-linkage.cc
index 574db1c..a41c93c 100644
--- a/src/compiler/wasm-linkage.cc
+++ b/src/compiler/wasm-linkage.cc
@@ -178,6 +178,17 @@
       // Allocate a floating point register/stack location.
       if (fp_offset < fp_count) {
         DoubleRegister reg = fp_regs[fp_offset++];
+#if V8_TARGET_ARCH_ARM
+        // Allocate floats using a double register, but modify the code to
+        // reflect how ARM FP registers alias.
+        // TODO(bbudge) Modify wasm linkage to allow use of all float regs.
+        if (type == kAstF32) {
+          int float_reg_code = reg.code() * 2;
+          DCHECK(float_reg_code < RegisterConfiguration::kMaxFPRegisters);
+          return regloc(DoubleRegister::from_code(float_reg_code),
+                        MachineTypeFor(type));
+        }
+#endif
         return regloc(reg, MachineTypeFor(type));
       } else {
         int offset = -1 - stack_offset;
@@ -307,26 +318,23 @@
       "wasm-call");
 }
 
-CallDescriptor* ModuleEnv::GetI32WasmCallDescriptor(
-    Zone* zone, CallDescriptor* descriptor) {
+CallDescriptor* ReplaceTypeInCallDescriptorWith(
+    Zone* zone, CallDescriptor* descriptor, size_t num_replacements,
+    MachineType input_type, MachineRepresentation output_type) {
   size_t parameter_count = descriptor->ParameterCount();
   size_t return_count = descriptor->ReturnCount();
   for (size_t i = 0; i < descriptor->ParameterCount(); i++) {
-    if (descriptor->GetParameterType(i) == MachineType::Int64()) {
-      // For each int64 input we get two int32 inputs.
-      parameter_count++;
+    if (descriptor->GetParameterType(i) == input_type) {
+      parameter_count += num_replacements - 1;
     }
   }
   for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
-    if (descriptor->GetReturnType(i) == MachineType::Int64()) {
-      // For each int64 return we get two int32 returns.
-      return_count++;
+    if (descriptor->GetReturnType(i) == input_type) {
+      return_count += num_replacements - 1;
     }
   }
   if (parameter_count == descriptor->ParameterCount() &&
       return_count == descriptor->ReturnCount()) {
-    // If there is no int64 parameter or return value, we can just return the
-    // original descriptor.
     return descriptor;
   }
 
@@ -335,10 +343,10 @@
   Allocator rets = return_registers.Get();
 
   for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
-    if (descriptor->GetReturnType(i) == MachineType::Int64()) {
-      // For each int64 return we get two int32 returns.
-      locations.AddReturn(rets.Next(MachineRepresentation::kWord32));
-      locations.AddReturn(rets.Next(MachineRepresentation::kWord32));
+    if (descriptor->GetReturnType(i) == input_type) {
+      for (size_t j = 0; j < num_replacements; j++) {
+        locations.AddReturn(rets.Next(output_type));
+      }
     } else {
       locations.AddReturn(
           rets.Next(descriptor->GetReturnType(i).representation()));
@@ -348,10 +356,10 @@
   Allocator params = parameter_registers.Get();
 
   for (size_t i = 0; i < descriptor->ParameterCount(); i++) {
-    if (descriptor->GetParameterType(i) == MachineType::Int64()) {
-      // For each int64 input we get two int32 inputs.
-      locations.AddParam(params.Next(MachineRepresentation::kWord32));
-      locations.AddParam(params.Next(MachineRepresentation::kWord32));
+    if (descriptor->GetParameterType(i) == input_type) {
+      for (size_t j = 0; j < num_replacements; j++) {
+        locations.AddParam(params.Next(output_type));
+      }
     } else {
       locations.AddParam(
           params.Next(descriptor->GetParameterType(i).representation()));
@@ -369,8 +377,20 @@
       descriptor->CalleeSavedFPRegisters(),  // callee-saved fp regs
       descriptor->flags(),                   // flags
       descriptor->debug_name());
+}
 
-  return descriptor;
+CallDescriptor* ModuleEnv::GetI32WasmCallDescriptor(
+    Zone* zone, CallDescriptor* descriptor) {
+  return ReplaceTypeInCallDescriptorWith(zone, descriptor, 2,
+                                         MachineType::Int64(),
+                                         MachineRepresentation::kWord32);
+}
+
+CallDescriptor* ModuleEnv::GetI32WasmCallDescriptorForSimd(
+    Zone* zone, CallDescriptor* descriptor) {
+  return ReplaceTypeInCallDescriptorWith(zone, descriptor, 4,
+                                         MachineType::Simd128(),
+                                         MachineRepresentation::kWord32);
 }
 
 }  // namespace wasm
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index 4d63e9a..745ac50 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -4,6 +4,8 @@
 
 #include "src/compiler/code-generator.h"
 
+#include <limits>
+
 #include "src/compilation-info.h"
 #include "src/compiler/code-generator-impl.h"
 #include "src/compiler/gap-resolver.h"
@@ -445,7 +447,7 @@
     OutOfLineCode* ool;                                                      \
     if (instr->InputAt(3)->IsRegister()) {                                   \
       auto length = i.InputRegister(3);                                      \
-      DCHECK_EQ(0, index2);                                                  \
+      DCHECK_EQ(0u, index2);                                                 \
       __ cmpl(index1, length);                                               \
       ool = new (zone()) OutOfLineLoadNaN(this, result);                     \
     } else {                                                                 \
@@ -500,7 +502,7 @@
     OutOfLineCode* ool;                                                        \
     if (instr->InputAt(3)->IsRegister()) {                                     \
       auto length = i.InputRegister(3);                                        \
-      DCHECK_EQ(0, index2);                                                    \
+      DCHECK_EQ(0u, index2);                                                   \
       __ cmpl(index1, length);                                                 \
       ool = new (zone()) OutOfLineLoadZero(this, result);                      \
     } else {                                                                   \
@@ -557,7 +559,7 @@
     auto value = i.InputDoubleRegister(4);                                   \
     if (instr->InputAt(3)->IsRegister()) {                                   \
       auto length = i.InputRegister(3);                                      \
-      DCHECK_EQ(0, index2);                                                  \
+      DCHECK_EQ(0u, index2);                                                 \
       Label done;                                                            \
       __ cmpl(index1, length);                                               \
       __ j(above_equal, &done, Label::kNear);                                \
@@ -612,7 +614,7 @@
     auto index2 = i.InputUint32(2);                                            \
     if (instr->InputAt(3)->IsRegister()) {                                     \
       auto length = i.InputRegister(3);                                        \
-      DCHECK_EQ(0, index2);                                                    \
+      DCHECK_EQ(0u, index2);                                                   \
       Label done;                                                              \
       __ cmpl(index1, length);                                                 \
       __ j(above_equal, &done, Label::kNear);                                  \
@@ -848,19 +850,16 @@
       RecordCallPosition(instr);
       break;
     }
-    case kArchTailCallJSFunctionFromJSFunction:
-    case kArchTailCallJSFunction: {
+    case kArchTailCallJSFunctionFromJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
         // Check the function's context matches the context argument.
         __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
         __ Assert(equal, kWrongFunctionContext);
       }
-      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
-        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
-                                         i.TempRegister(0), i.TempRegister(1),
-                                         i.TempRegister(2));
-      }
+      AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                       i.TempRegister(0), i.TempRegister(1),
+                                       i.TempRegister(2));
       __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
       frame_access_state()->ClearSPDelta();
       frame_access_state()->SetFrameAccessToDefault();
@@ -921,7 +920,7 @@
       break;
     }
     case kArchRet:
-      AssembleReturn();
+      AssembleReturn(instr->InputAt(0));
       break;
     case kArchStackPointer:
       __ movq(i.OutputRegister(), rsp);
@@ -1997,7 +1996,7 @@
           if (i.InputRegister(1).is(i.OutputRegister())) {
             __ shll(i.OutputRegister(), Immediate(1));
           } else {
-            __ leal(i.OutputRegister(), i.MemoryOperand());
+            __ addl(i.OutputRegister(), i.InputRegister(1));
           }
         } else if (mode == kMode_M2) {
           __ shll(i.OutputRegister(), Immediate(1));
@@ -2008,15 +2007,51 @@
         } else {
           __ leal(i.OutputRegister(), i.MemoryOperand());
         }
+      } else if (mode == kMode_MR1 &&
+                 i.InputRegister(1).is(i.OutputRegister())) {
+        __ addl(i.OutputRegister(), i.InputRegister(0));
       } else {
         __ leal(i.OutputRegister(), i.MemoryOperand());
       }
       __ AssertZeroExtended(i.OutputRegister());
       break;
     }
-    case kX64Lea:
-      __ leaq(i.OutputRegister(), i.MemoryOperand());
+    case kX64Lea: {
+      AddressingMode mode = AddressingModeField::decode(instr->opcode());
+      // Shorten "leaq" to "addq", "subq" or "shlq" if the register allocation
+      // and addressing mode just happens to work out. The "addq"/"subq" forms
+      // in these cases are faster based on measurements.
+      if (i.InputRegister(0).is(i.OutputRegister())) {
+        if (mode == kMode_MRI) {
+          int32_t constant_summand = i.InputInt32(1);
+          if (constant_summand > 0) {
+            __ addq(i.OutputRegister(), Immediate(constant_summand));
+          } else if (constant_summand < 0) {
+            __ subq(i.OutputRegister(), Immediate(-constant_summand));
+          }
+        } else if (mode == kMode_MR1) {
+          if (i.InputRegister(1).is(i.OutputRegister())) {
+            __ shlq(i.OutputRegister(), Immediate(1));
+          } else {
+            __ addq(i.OutputRegister(), i.InputRegister(1));
+          }
+        } else if (mode == kMode_M2) {
+          __ shlq(i.OutputRegister(), Immediate(1));
+        } else if (mode == kMode_M4) {
+          __ shlq(i.OutputRegister(), Immediate(2));
+        } else if (mode == kMode_M8) {
+          __ shlq(i.OutputRegister(), Immediate(3));
+        } else {
+          __ leaq(i.OutputRegister(), i.MemoryOperand());
+        }
+      } else if (mode == kMode_MR1 &&
+                 i.InputRegister(1).is(i.OutputRegister())) {
+        __ addq(i.OutputRegister(), i.InputRegister(0));
+      } else {
+        __ leaq(i.OutputRegister(), i.MemoryOperand());
+      }
       break;
+    }
     case kX64Dec32:
       __ decl(i.OutputRegister());
       break;
@@ -2316,7 +2351,7 @@
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   DeoptimizeReason deoptimization_reason =
       GetDeoptimizationReason(deoptimization_id);
-  __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -2362,6 +2397,9 @@
       __ movq(rbp, rsp);
     } else if (descriptor->IsJSFunctionCall()) {
       __ Prologue(this->info()->GeneratePreagedPrologue());
+      if (descriptor->PushArgumentCount()) {
+        __ pushq(kJavaScriptCallArgCountRegister);
+      }
     } else {
       __ StubPrologue(info()->GetOutputStackFrameType());
     }
@@ -2370,7 +2408,8 @@
       unwinding_info_writer_.MarkFrameConstructed(pc_base);
     }
   }
-  int shrink_slots = frame()->GetSpillSlotCount();
+  int shrink_slots =
+      frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
 
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
@@ -2414,8 +2453,7 @@
   }
 }
 
-
-void CodeGenerator::AssembleReturn() {
+void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
 
   // Restore registers.
@@ -2444,22 +2482,41 @@
 
   unwinding_info_writer_.MarkBlockWillExit();
 
+  // Might need rcx for scratch if pop_size is too big or if there is a variable
+  // pop count.
+  DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & rcx.bit());
+  DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & rdx.bit());
+  size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+  X64OperandConverter g(this, nullptr);
   if (descriptor->IsCFunctionCall()) {
     AssembleDeconstructFrame();
   } else if (frame_access_state()->has_frame()) {
-    // Canonicalize JSFunction return sites for now.
-    if (return_label_.is_bound()) {
-      __ jmp(&return_label_);
-      return;
+    if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+      // Canonicalize JSFunction return sites for now.
+      if (return_label_.is_bound()) {
+        __ jmp(&return_label_);
+        return;
+      } else {
+        __ bind(&return_label_);
+        AssembleDeconstructFrame();
+      }
     } else {
-      __ bind(&return_label_);
       AssembleDeconstructFrame();
     }
   }
-  size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
-  // Might need rcx for scratch if pop_size is too big.
-  DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & rcx.bit());
-  __ Ret(static_cast<int>(pop_size), rcx);
+
+  if (pop->IsImmediate()) {
+    DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+    pop_size += g.ToConstant(pop).ToInt32() * kPointerSize;
+    CHECK_LT(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
+    __ Ret(static_cast<int>(pop_size), rcx);
+  } else {
+    Register pop_reg = g.ToRegister(pop);
+    Register scratch_reg = pop_reg.is(rcx) ? rdx : rcx;
+    __ popq(scratch_reg);
+    __ leaq(rsp, Operand(rsp, pop_reg, times_8, static_cast<int>(pop_size)));
+    __ jmp(scratch_reg);
+  }
 }
 
 
diff --git a/src/compiler/x64/instruction-scheduler-x64.cc b/src/compiler/x64/instruction-scheduler-x64.cc
index 4208d8a..ef0c3ad 100644
--- a/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/src/compiler/x64/instruction-scheduler-x64.cc
@@ -194,8 +194,77 @@
 
 
 int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
-  // TODO(all): Add instruction cost modeling.
-  return 1;
+  // Basic latency modeling for x64 instructions. They have been determined
+  // in an empirical way.
+  switch (instr->arch_opcode()) {
+    case kCheckedLoadInt8:
+    case kCheckedLoadUint8:
+    case kCheckedLoadInt16:
+    case kCheckedLoadUint16:
+    case kCheckedLoadWord32:
+    case kCheckedLoadWord64:
+    case kCheckedLoadFloat32:
+    case kCheckedLoadFloat64:
+    case kCheckedStoreWord8:
+    case kCheckedStoreWord16:
+    case kCheckedStoreWord32:
+    case kCheckedStoreWord64:
+    case kCheckedStoreFloat32:
+    case kCheckedStoreFloat64:
+    case kSSEFloat64Mul:
+      return 5;
+    case kX64Imul:
+    case kX64Imul32:
+    case kX64ImulHigh32:
+    case kX64UmulHigh32:
+    case kSSEFloat32Cmp:
+    case kSSEFloat32Add:
+    case kSSEFloat32Sub:
+    case kSSEFloat32Abs:
+    case kSSEFloat32Neg:
+    case kSSEFloat64Cmp:
+    case kSSEFloat64Add:
+    case kSSEFloat64Sub:
+    case kSSEFloat64Max:
+    case kSSEFloat64Min:
+    case kSSEFloat64Abs:
+    case kSSEFloat64Neg:
+      return 3;
+    case kSSEFloat32Mul:
+    case kSSEFloat32ToFloat64:
+    case kSSEFloat64ToFloat32:
+    case kSSEFloat32Round:
+    case kSSEFloat64Round:
+    case kSSEFloat32ToInt32:
+    case kSSEFloat32ToUint32:
+    case kSSEFloat64ToInt32:
+    case kSSEFloat64ToUint32:
+      return 4;
+    case kX64Idiv:
+      return 49;
+    case kX64Idiv32:
+      return 35;
+    case kX64Udiv:
+      return 38;
+    case kX64Udiv32:
+      return 26;
+    case kSSEFloat32Div:
+    case kSSEFloat64Div:
+    case kSSEFloat32Sqrt:
+    case kSSEFloat64Sqrt:
+      return 13;
+    case kSSEFloat32ToInt64:
+    case kSSEFloat64ToInt64:
+    case kSSEFloat32ToUint64:
+    case kSSEFloat64ToUint64:
+      return 10;
+    case kSSEFloat64Mod:
+      return 50;
+    case kArchTruncateDoubleToI:
+      return 6;
+    default:
+      return 1;
+  }
 }
 
 }  // namespace compiler
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index 9a7657e..878e778 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -1250,6 +1250,19 @@
           return false;
       }
     }
+    case IrOpcode::kLoad: {
+      // The movzxbl/movsxbl/movzxwl/movsxwl operations implicitly zero-extend
+      // to 64-bit on x64,
+      // so the zero-extension is a no-op.
+      LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+      switch (load_rep.representation()) {
+        case MachineRepresentation::kWord8:
+        case MachineRepresentation::kWord16:
+          return true;
+        default:
+          return false;
+      }
+    }
     default:
       return false;
   }
@@ -1775,6 +1788,29 @@
 void VisitWord64Compare(InstructionSelector* selector, Node* node,
                         FlagsContinuation* cont) {
   X64OperandGenerator g(selector);
+  if (selector->CanUseRootsRegister()) {
+    Heap* const heap = selector->isolate()->heap();
+    Heap::RootListIndex root_index;
+    HeapObjectBinopMatcher m(node);
+    if (m.right().HasValue() &&
+        heap->IsRootHandle(m.right().Value(), &root_index)) {
+      if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+      InstructionCode opcode =
+          kX64Cmp | AddressingModeField::encode(kMode_Root);
+      return VisitCompare(
+          selector, opcode,
+          g.TempImmediate((root_index * kPointerSize) - kRootRegisterBias),
+          g.UseRegister(m.left().node()), cont);
+    } else if (m.left().HasValue() &&
+               heap->IsRootHandle(m.left().Value(), &root_index)) {
+      InstructionCode opcode =
+          kX64Cmp | AddressingModeField::encode(kMode_Root);
+      return VisitCompare(
+          selector, opcode,
+          g.TempImmediate((root_index * kPointerSize) - kRootRegisterBias),
+          g.UseRegister(m.right().node()), cont);
+    }
+  }
   Int64BinopMatcher m(node);
   if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
     LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
@@ -1833,21 +1869,22 @@
 // Shared routine for word comparison against zero.
 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
                           Node* value, FlagsContinuation* cont) {
-  while (selector->CanCover(user, value)) {
+  // Try to combine with comparisons against 0 by simply inverting the branch.
+  while (value->opcode() == IrOpcode::kWord32Equal &&
+         selector->CanCover(user, value)) {
+    Int32BinopMatcher m(value);
+    if (!m.right().Is(0)) break;
+
+    user = value;
+    value = m.left().node();
+    cont->Negate();
+  }
+
+  if (selector->CanCover(user, value)) {
     switch (value->opcode()) {
-      case IrOpcode::kWord32Equal: {
-        // Combine with comparisons against 0 by simply inverting the
-        // continuation.
-        Int32BinopMatcher m(value);
-        if (m.right().Is(0)) {
-          user = value;
-          value = m.left().node();
-          cont->Negate();
-          continue;
-        }
+      case IrOpcode::kWord32Equal:
         cont->OverwriteAndNegateIfEqual(kEqual);
         return VisitWordCompare(selector, value, kX64Cmp32, cont);
-      }
       case IrOpcode::kInt32LessThan:
         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
         return VisitWordCompare(selector, value, kX64Cmp32, cont);
@@ -1905,9 +1942,26 @@
       case IrOpcode::kFloat64Equal:
         cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
         return VisitFloat64Compare(selector, value, cont);
-      case IrOpcode::kFloat64LessThan:
+      case IrOpcode::kFloat64LessThan: {
+        Float64BinopMatcher m(value);
+        if (m.left().Is(0.0) && m.right().IsFloat64Abs()) {
+          // This matches the pattern
+          //
+          //   Float64LessThan(#0.0, Float64Abs(x))
+          //
+          // which TurboFan generates for NumberToBoolean in the general case,
+          // and which evaluates to false if x is 0, -0 or NaN. We can compile
+          // this to a simple (v)ucomisd using not_equal flags condition, which
+          // avoids the costly Float64Abs.
+          cont->OverwriteAndNegateIfEqual(kNotEqual);
+          InstructionCode const opcode =
+              selector->IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
+          return VisitCompare(selector, opcode, m.left().node(),
+                              m.right().InputAt(0), cont, false);
+        }
         cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
         return VisitFloat64Compare(selector, value, cont);
+      }
       case IrOpcode::kFloat64LessThanOrEqual:
         cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
         return VisitFloat64Compare(selector, value, cont);
@@ -1956,7 +2010,6 @@
       default:
         break;
     }
-    break;
   }
 
   // Branch could not be combined with a compare, emit compare against 0.
@@ -2169,14 +2222,28 @@
   VisitFloat64Compare(this, node, &cont);
 }
 
-
 void InstructionSelector::VisitFloat64LessThan(Node* node) {
+  Float64BinopMatcher m(node);
+  if (m.left().Is(0.0) && m.right().IsFloat64Abs()) {
+    // This matches the pattern
+    //
+    //   Float64LessThan(#0.0, Float64Abs(x))
+    //
+    // which TurboFan generates for NumberToBoolean in the general case,
+    // and which evaluates to false if x is 0, -0 or NaN. We can compile
+    // this to a simple (v)ucomisd using not_equal flags condition, which
+    // avoids the costly Float64Abs.
+    FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, node);
+    InstructionCode const opcode =
+        IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
+    return VisitCompare(this, opcode, m.left().node(), m.right().InputAt(0),
+                        &cont, false);
+  }
   FlagsContinuation cont =
       FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
-
 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
   FlagsContinuation cont =
       FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
diff --git a/src/compiler/x64/unwinding-info-writer-x64.cc b/src/compiler/x64/unwinding-info-writer-x64.cc
index 4efba32..31338bd 100644
--- a/src/compiler/x64/unwinding-info-writer-x64.cc
+++ b/src/compiler/x64/unwinding-info-writer-x64.cc
@@ -15,7 +15,8 @@
 
   block_will_exit_ = false;
 
-  DCHECK_LT(block->rpo_number().ToInt(), block_initial_states_.size());
+  DCHECK_LT(block->rpo_number().ToInt(),
+            static_cast<int>(block_initial_states_.size()));
   const BlockInitialState* initial_state =
       block_initial_states_[block->rpo_number().ToInt()];
   if (initial_state) {
@@ -47,7 +48,7 @@
 
   for (const RpoNumber& successor : block->successors()) {
     int successor_index = successor.ToInt();
-    DCHECK_LT(successor_index, block_initial_states_.size());
+    DCHECK_LT(successor_index, static_cast<int>(block_initial_states_.size()));
     const BlockInitialState* existing_state =
         block_initial_states_[successor_index];
     // If we already had an entry for this BB, check that the values are the
diff --git a/src/compiler/x87/code-generator-x87.cc b/src/compiler/x87/code-generator-x87.cc
index f5e6634..d2f64e8 100644
--- a/src/compiler/x87/code-generator-x87.cc
+++ b/src/compiler/x87/code-generator-x87.cc
@@ -637,8 +637,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchTailCallJSFunctionFromJSFunction:
-    case kArchTailCallJSFunction: {
+    case kArchTailCallJSFunctionFromJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
         // Check the function's context matches the context argument.
@@ -649,10 +648,8 @@
         __ VerifyX87StackDepth(1);
       }
       __ fstp(0);
-      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
-        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
-                                         no_reg, no_reg, no_reg);
-      }
+      AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, no_reg,
+                                       no_reg, no_reg);
       __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
       frame_access_state()->ClearSPDelta();
       frame_access_state()->SetFrameAccessToDefault();
@@ -749,7 +746,7 @@
       break;
     }
     case kArchRet:
-      AssembleReturn();
+      AssembleReturn(instr->InputAt(0));
       break;
     case kArchFramePointer:
       __ mov(i.OutputRegister(), ebp);
@@ -1870,7 +1867,7 @@
           if (i.InputRegister(1).is(i.OutputRegister())) {
             __ shl(i.OutputRegister(), 1);
           } else {
-            __ lea(i.OutputRegister(), i.MemoryOperand());
+            __ add(i.OutputRegister(), i.InputRegister(1));
           }
         } else if (mode == kMode_M2) {
           __ shl(i.OutputRegister(), 1);
@@ -1881,6 +1878,9 @@
         } else {
           __ lea(i.OutputRegister(), i.MemoryOperand());
         }
+      } else if (mode == kMode_MR1 &&
+                 i.InputRegister(1).is(i.OutputRegister())) {
+        __ add(i.OutputRegister(), i.InputRegister(0));
       } else {
         __ lea(i.OutputRegister(), i.MemoryOperand());
       }
@@ -2245,7 +2245,7 @@
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   DeoptimizeReason deoptimization_reason =
       GetDeoptimizationReason(deoptimization_id);
-  __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -2404,12 +2404,16 @@
       __ mov(ebp, esp);
     } else if (descriptor->IsJSFunctionCall()) {
       __ Prologue(this->info()->GeneratePreagedPrologue());
+      if (descriptor->PushArgumentCount()) {
+        __ push(kJavaScriptCallArgCountRegister);
+      }
     } else {
       __ StubPrologue(info()->GetOutputStackFrameType());
     }
   }
 
-  int shrink_slots = frame()->GetSpillSlotCount();
+  int shrink_slots =
+      frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
 
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
@@ -2444,8 +2448,7 @@
   }
 }
 
-
-void CodeGenerator::AssembleReturn() {
+void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
 
   // Clear the FPU stack only if there is no return value in the stack.
@@ -2453,7 +2456,7 @@
     __ VerifyX87StackDepth(1);
   }
   bool clear_stack = true;
-  for (int i = 0; i < descriptor->ReturnCount(); i++) {
+  for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
     MachineRepresentation rep = descriptor->GetReturnType(i).representation();
     LinkageLocation loc = descriptor->GetReturnLocation(i);
     if (IsFloatingPoint(rep) && loc == LinkageLocation::ForRegister(0)) {
@@ -2463,7 +2466,6 @@
   }
   if (clear_stack) __ fstp(0);
 
-  int pop_count = static_cast<int>(descriptor->StackParameterCount());
   const RegList saves = descriptor->CalleeSavedRegisters();
   // Restore registers.
   if (saves != 0) {
@@ -2473,22 +2475,40 @@
     }
   }
 
+  // Might need ecx for scratch if pop_size is too big or if there is a variable
+  // pop count.
+  DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
+  size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+  X87OperandConverter g(this, nullptr);
   if (descriptor->IsCFunctionCall()) {
     AssembleDeconstructFrame();
   } else if (frame_access_state()->has_frame()) {
-    // Canonicalize JSFunction return sites for now.
-    if (return_label_.is_bound()) {
-      __ jmp(&return_label_);
-      return;
+    // Canonicalize JSFunction return sites for now if they always have the same
+    // number of return args.
+    if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+      if (return_label_.is_bound()) {
+        __ jmp(&return_label_);
+        return;
+      } else {
+        __ bind(&return_label_);
+        AssembleDeconstructFrame();
+      }
     } else {
-      __ bind(&return_label_);
       AssembleDeconstructFrame();
     }
   }
-  if (pop_count == 0) {
-    __ ret(0);
+  DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & edx.bit());
+  DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
+  if (pop->IsImmediate()) {
+    DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+    pop_size += g.ToConstant(pop).ToInt32() * kPointerSize;
+    __ Ret(static_cast<int>(pop_size), ecx);
   } else {
-    __ Ret(pop_count * kPointerSize, ebx);
+    Register pop_reg = g.ToRegister(pop);
+    Register scratch_reg = pop_reg.is(ecx) ? edx : ecx;
+    __ pop(scratch_reg);
+    __ lea(esp, Operand(esp, pop_reg, times_4, static_cast<int>(pop_size)));
+    __ jmp(scratch_reg);
   }
 }
 
diff --git a/src/compiler/x87/instruction-selector-x87.cc b/src/compiler/x87/instruction-selector-x87.cc
index 757eee9..a737d1e 100644
--- a/src/compiler/x87/instruction-selector-x87.cc
+++ b/src/compiler/x87/instruction-selector-x87.cc
@@ -607,55 +607,78 @@
 void InstructionSelector::VisitInt32PairAdd(Node* node) {
   X87OperandGenerator g(this);
 
-  // We use UseUniqueRegister here to avoid register sharing with the temp
-  // register.
-  InstructionOperand inputs[] = {
-      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
-      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
+  if (projection1) {
+    // We use UseUniqueRegister here to avoid register sharing with the temp
+    // register.
+    InstructionOperand inputs[] = {
+        g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+        g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
 
-  InstructionOperand outputs[] = {
-      g.DefineSameAsFirst(node),
-      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+    InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
+                                    g.DefineAsRegister(projection1)};
 
-  InstructionOperand temps[] = {g.TempRegister()};
+    InstructionOperand temps[] = {g.TempRegister()};
 
-  Emit(kX87AddPair, 2, outputs, 4, inputs, 1, temps);
+    Emit(kX87AddPair, 2, outputs, 4, inputs, 1, temps);
+  } else {
+    // The high word of the result is not used, so we emit the standard 32 bit
+    // instruction.
+    Emit(kX87Add, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+         g.Use(node->InputAt(2)));
+  }
 }
 
 void InstructionSelector::VisitInt32PairSub(Node* node) {
   X87OperandGenerator g(this);
 
-  // We use UseUniqueRegister here to avoid register sharing with the temp
-  // register.
-  InstructionOperand inputs[] = {
-      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
-      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
+  if (projection1) {
+    // We use UseUniqueRegister here to avoid register sharing with the temp
+    // register.
+    InstructionOperand inputs[] = {
+        g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+        g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
 
-  InstructionOperand outputs[] = {
-      g.DefineSameAsFirst(node),
-      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+    InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
+                                    g.DefineAsRegister(projection1)};
 
-  InstructionOperand temps[] = {g.TempRegister()};
+    InstructionOperand temps[] = {g.TempRegister()};
 
-  Emit(kX87SubPair, 2, outputs, 4, inputs, 1, temps);
+    Emit(kX87SubPair, 2, outputs, 4, inputs, 1, temps);
+  } else {
+    // The high word of the result is not used, so we emit the standard 32 bit
+    // instruction.
+    Emit(kX87Sub, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+         g.Use(node->InputAt(2)));
+  }
 }
 
 void InstructionSelector::VisitInt32PairMul(Node* node) {
   X87OperandGenerator g(this);
 
-  // InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
-  // register and one mov instruction.
-  InstructionOperand inputs[] = {
-      g.UseUnique(node->InputAt(0)), g.UseUnique(node->InputAt(1)),
-      g.UseUniqueRegister(node->InputAt(2)), g.UseFixed(node->InputAt(3), ecx)};
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
+  if (projection1) {
+    // InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
+    // register and one mov instruction.
+    InstructionOperand inputs[] = {g.UseUnique(node->InputAt(0)),
+                                   g.UseUnique(node->InputAt(1)),
+                                   g.UseUniqueRegister(node->InputAt(2)),
+                                   g.UseFixed(node->InputAt(3), ecx)};
 
-  InstructionOperand outputs[] = {
-      g.DefineAsFixed(node, eax),
-      g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
+    InstructionOperand outputs[] = {
+        g.DefineAsFixed(node, eax),
+        g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
 
-  InstructionOperand temps[] = {g.TempRegister(edx)};
+    InstructionOperand temps[] = {g.TempRegister(edx)};
 
-  Emit(kX87MulPair, 2, outputs, 4, inputs, 1, temps);
+    Emit(kX87MulPair, 2, outputs, 4, inputs, 1, temps);
+  } else {
+    // The high word of the result is not used, so we emit the standard 32 bit
+    // instruction.
+    Emit(kX87Imul, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+         g.Use(node->InputAt(2)));
+  }
 }
 
 void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
@@ -673,11 +696,19 @@
                                  g.UseFixed(node->InputAt(1), edx),
                                  shift_operand};
 
-  InstructionOperand outputs[] = {
-      g.DefineAsFixed(node, eax),
-      g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
+  InstructionOperand outputs[2];
+  InstructionOperand temps[1];
+  int32_t output_count = 0;
+  int32_t temp_count = 0;
+  outputs[output_count++] = g.DefineAsFixed(node, eax);
+  Node* projection1 = NodeProperties::FindProjection(node, 1);
+  if (projection1) {
+    outputs[output_count++] = g.DefineAsFixed(projection1, edx);
+  } else {
+    temps[temp_count++] = g.TempRegister(edx);
+  }
 
-  selector->Emit(opcode, 2, outputs, 3, inputs);
+  selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
 }
 
 void InstructionSelector::VisitWord32PairShl(Node* node) {
@@ -1402,22 +1433,22 @@
 // Shared routine for word comparison with zero.
 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
                           Node* value, FlagsContinuation* cont) {
-  // Try to combine the branch with a comparison.
-  while (selector->CanCover(user, value)) {
+  // Try to combine with comparisons against 0 by simply inverting the branch.
+  while (value->opcode() == IrOpcode::kWord32Equal &&
+         selector->CanCover(user, value)) {
+    Int32BinopMatcher m(value);
+    if (!m.right().Is(0)) break;
+
+    user = value;
+    value = m.left().node();
+    cont->Negate();
+  }
+
+  if (selector->CanCover(user, value)) {
     switch (value->opcode()) {
-      case IrOpcode::kWord32Equal: {
-        // Try to combine with comparisons against 0 by simply inverting the
-        // continuation.
-        Int32BinopMatcher m(value);
-        if (m.right().Is(0)) {
-          user = value;
-          value = m.left().node();
-          cont->Negate();
-          continue;
-        }
+      case IrOpcode::kWord32Equal:
         cont->OverwriteAndNegateIfEqual(kEqual);
         return VisitWordCompare(selector, value, cont);
-      }
       case IrOpcode::kInt32LessThan:
         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
         return VisitWordCompare(selector, value, cont);
@@ -1483,7 +1514,6 @@
       default:
         break;
     }
-    break;
   }
 
   // Continuation could not be combined with a compare, emit compare against 0.
diff --git a/src/compiler/zone-pool.cc b/src/compiler/zone-pool.cc
deleted file mode 100644
index 7681eeb..0000000
--- a/src/compiler/zone-pool.cc
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/zone-pool.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-ZonePool::StatsScope::StatsScope(ZonePool* zone_pool)
-    : zone_pool_(zone_pool),
-      total_allocated_bytes_at_start_(zone_pool->GetTotalAllocatedBytes()),
-      max_allocated_bytes_(0) {
-  zone_pool_->stats_.push_back(this);
-  for (Zone* zone : zone_pool_->used_) {
-    size_t size = static_cast<size_t>(zone->allocation_size());
-    std::pair<InitialValues::iterator, bool> res =
-        initial_values_.insert(std::make_pair(zone, size));
-    USE(res);
-    DCHECK(res.second);
-  }
-}
-
-
-ZonePool::StatsScope::~StatsScope() {
-  DCHECK_EQ(zone_pool_->stats_.back(), this);
-  zone_pool_->stats_.pop_back();
-}
-
-
-size_t ZonePool::StatsScope::GetMaxAllocatedBytes() {
-  return std::max(max_allocated_bytes_, GetCurrentAllocatedBytes());
-}
-
-
-size_t ZonePool::StatsScope::GetCurrentAllocatedBytes() {
-  size_t total = 0;
-  for (Zone* zone : zone_pool_->used_) {
-    total += static_cast<size_t>(zone->allocation_size());
-    // Adjust for initial values.
-    InitialValues::iterator it = initial_values_.find(zone);
-    if (it != initial_values_.end()) {
-      total -= it->second;
-    }
-  }
-  return total;
-}
-
-
-size_t ZonePool::StatsScope::GetTotalAllocatedBytes() {
-  return zone_pool_->GetTotalAllocatedBytes() - total_allocated_bytes_at_start_;
-}
-
-
-void ZonePool::StatsScope::ZoneReturned(Zone* zone) {
-  size_t current_total = GetCurrentAllocatedBytes();
-  // Update max.
-  max_allocated_bytes_ = std::max(max_allocated_bytes_, current_total);
-  // Drop zone from initial value map.
-  InitialValues::iterator it = initial_values_.find(zone);
-  if (it != initial_values_.end()) {
-    initial_values_.erase(it);
-  }
-}
-
-ZonePool::ZonePool(AccountingAllocator* allocator)
-    : max_allocated_bytes_(0), total_deleted_bytes_(0), allocator_(allocator) {}
-
-ZonePool::~ZonePool() {
-  DCHECK(used_.empty());
-  DCHECK(stats_.empty());
-  for (Zone* zone : unused_) {
-    delete zone;
-  }
-}
-
-
-size_t ZonePool::GetMaxAllocatedBytes() {
-  return std::max(max_allocated_bytes_, GetCurrentAllocatedBytes());
-}
-
-
-size_t ZonePool::GetCurrentAllocatedBytes() {
-  size_t total = 0;
-  for (Zone* zone : used_) {
-    total += static_cast<size_t>(zone->allocation_size());
-  }
-  return total;
-}
-
-
-size_t ZonePool::GetTotalAllocatedBytes() {
-  return total_deleted_bytes_ + GetCurrentAllocatedBytes();
-}
-
-
-Zone* ZonePool::NewEmptyZone() {
-  Zone* zone;
-  // Grab a zone from pool if possible.
-  if (!unused_.empty()) {
-    zone = unused_.back();
-    unused_.pop_back();
-  } else {
-    zone = new Zone(allocator_);
-  }
-  used_.push_back(zone);
-  DCHECK_EQ(0u, zone->allocation_size());
-  return zone;
-}
-
-
-void ZonePool::ReturnZone(Zone* zone) {
-  size_t current_total = GetCurrentAllocatedBytes();
-  // Update max.
-  max_allocated_bytes_ = std::max(max_allocated_bytes_, current_total);
-  // Update stats.
-  for (StatsScope* stat_scope : stats_) {
-    stat_scope->ZoneReturned(zone);
-  }
-  // Remove from used.
-  Used::iterator it = std::find(used_.begin(), used_.end(), zone);
-  DCHECK(it != used_.end());
-  used_.erase(it);
-  total_deleted_bytes_ += static_cast<size_t>(zone->allocation_size());
-  // Delete zone or clear and stash on unused_.
-  if (unused_.size() >= kMaxUnusedSize) {
-    delete zone;
-  } else {
-    zone->DeleteAll();
-    DCHECK_EQ(0u, zone->allocation_size());
-    unused_.push_back(zone);
-  }
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/zone-stats.cc b/src/compiler/zone-stats.cc
new file mode 100644
index 0000000..8942df5
--- /dev/null
+++ b/src/compiler/zone-stats.cc
@@ -0,0 +1,111 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/zone-stats.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+ZoneStats::StatsScope::StatsScope(ZoneStats* zone_stats)
+    : zone_stats_(zone_stats),
+      total_allocated_bytes_at_start_(zone_stats->GetTotalAllocatedBytes()),
+      max_allocated_bytes_(0) {
+  zone_stats_->stats_.push_back(this);
+  for (Zone* zone : zone_stats_->zones_) {
+    size_t size = static_cast<size_t>(zone->allocation_size());
+    std::pair<InitialValues::iterator, bool> res =
+        initial_values_.insert(std::make_pair(zone, size));
+    USE(res);
+    DCHECK(res.second);
+  }
+}
+
+ZoneStats::StatsScope::~StatsScope() {
+  DCHECK_EQ(zone_stats_->stats_.back(), this);
+  zone_stats_->stats_.pop_back();
+}
+
+size_t ZoneStats::StatsScope::GetMaxAllocatedBytes() {
+  return std::max(max_allocated_bytes_, GetCurrentAllocatedBytes());
+}
+
+size_t ZoneStats::StatsScope::GetCurrentAllocatedBytes() {
+  size_t total = 0;
+  for (Zone* zone : zone_stats_->zones_) {
+    total += static_cast<size_t>(zone->allocation_size());
+    // Adjust for initial values.
+    InitialValues::iterator it = initial_values_.find(zone);
+    if (it != initial_values_.end()) {
+      total -= it->second;
+    }
+  }
+  return total;
+}
+
+size_t ZoneStats::StatsScope::GetTotalAllocatedBytes() {
+  return zone_stats_->GetTotalAllocatedBytes() -
+         total_allocated_bytes_at_start_;
+}
+
+void ZoneStats::StatsScope::ZoneReturned(Zone* zone) {
+  size_t current_total = GetCurrentAllocatedBytes();
+  // Update max.
+  max_allocated_bytes_ = std::max(max_allocated_bytes_, current_total);
+  // Drop zone from initial value map.
+  InitialValues::iterator it = initial_values_.find(zone);
+  if (it != initial_values_.end()) {
+    initial_values_.erase(it);
+  }
+}
+
+ZoneStats::ZoneStats(AccountingAllocator* allocator)
+    : max_allocated_bytes_(0), total_deleted_bytes_(0), allocator_(allocator) {}
+
+ZoneStats::~ZoneStats() {
+  DCHECK(zones_.empty());
+  DCHECK(stats_.empty());
+}
+
+size_t ZoneStats::GetMaxAllocatedBytes() {
+  return std::max(max_allocated_bytes_, GetCurrentAllocatedBytes());
+}
+
+size_t ZoneStats::GetCurrentAllocatedBytes() {
+  size_t total = 0;
+  for (Zone* zone : zones_) {
+    total += static_cast<size_t>(zone->allocation_size());
+  }
+  return total;
+}
+
+size_t ZoneStats::GetTotalAllocatedBytes() {
+  return total_deleted_bytes_ + GetCurrentAllocatedBytes();
+}
+
+Zone* ZoneStats::NewEmptyZone(const char* zone_name) {
+  Zone* zone = new Zone(allocator_, zone_name);
+  zones_.push_back(zone);
+  return zone;
+}
+
+void ZoneStats::ReturnZone(Zone* zone) {
+  size_t current_total = GetCurrentAllocatedBytes();
+  // Update max.
+  max_allocated_bytes_ = std::max(max_allocated_bytes_, current_total);
+  // Update stats.
+  for (StatsScope* stat_scope : stats_) {
+    stat_scope->ZoneReturned(zone);
+  }
+  // Remove from used.
+  Zones::iterator it = std::find(zones_.begin(), zones_.end(), zone);
+  DCHECK(it != zones_.end());
+  zones_.erase(it);
+  total_deleted_bytes_ += static_cast<size_t>(zone->allocation_size());
+  delete zone;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/zone-pool.h b/src/compiler/zone-stats.h
similarity index 61%
rename from src/compiler/zone-pool.h
rename to src/compiler/zone-stats.h
index 7a3fe75..39adca3 100644
--- a/src/compiler/zone-pool.h
+++ b/src/compiler/zone-stats.h
@@ -2,45 +2,47 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_COMPILER_ZONE_POOL_H_
-#define V8_COMPILER_ZONE_POOL_H_
+#ifndef V8_COMPILER_ZONE_STATS_H_
+#define V8_COMPILER_ZONE_STATS_H_
 
 #include <map>
 #include <set>
 #include <vector>
 
+#include "src/globals.h"
 #include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-class ZonePool final {
+class V8_EXPORT_PRIVATE ZoneStats final {
  public:
   class Scope final {
    public:
-    explicit Scope(ZonePool* zone_pool)
-        : zone_pool_(zone_pool), zone_(nullptr) {}
+    explicit Scope(ZoneStats* zone_stats, const char* zone_name)
+        : zone_name_(zone_name), zone_stats_(zone_stats), zone_(nullptr) {}
     ~Scope() { Destroy(); }
 
     Zone* zone() {
-      if (zone_ == nullptr) zone_ = zone_pool_->NewEmptyZone();
+      if (zone_ == nullptr) zone_ = zone_stats_->NewEmptyZone(zone_name_);
       return zone_;
     }
     void Destroy() {
-      if (zone_ != nullptr) zone_pool_->ReturnZone(zone_);
+      if (zone_ != nullptr) zone_stats_->ReturnZone(zone_);
       zone_ = nullptr;
     }
 
    private:
-    ZonePool* const zone_pool_;
+    const char* zone_name_;
+    ZoneStats* const zone_stats_;
     Zone* zone_;
     DISALLOW_COPY_AND_ASSIGN(Scope);
   };
 
-  class StatsScope final {
+  class V8_EXPORT_PRIVATE StatsScope final {
    public:
-    explicit StatsScope(ZonePool* zone_pool);
+    explicit StatsScope(ZoneStats* zone_stats);
     ~StatsScope();
 
     size_t GetMaxAllocatedBytes();
@@ -48,12 +50,12 @@
     size_t GetTotalAllocatedBytes();
 
    private:
-    friend class ZonePool;
+    friend class ZoneStats;
     void ZoneReturned(Zone* zone);
 
     typedef std::map<Zone*, size_t> InitialValues;
 
-    ZonePool* const zone_pool_;
+    ZoneStats* const zone_stats_;
     InitialValues initial_values_;
     size_t total_allocated_bytes_at_start_;
     size_t max_allocated_bytes_;
@@ -61,34 +63,32 @@
     DISALLOW_COPY_AND_ASSIGN(StatsScope);
   };
 
-  explicit ZonePool(AccountingAllocator* allocator);
-  ~ZonePool();
+  explicit ZoneStats(AccountingAllocator* allocator);
+  ~ZoneStats();
 
   size_t GetMaxAllocatedBytes();
   size_t GetTotalAllocatedBytes();
   size_t GetCurrentAllocatedBytes();
 
  private:
-  Zone* NewEmptyZone();
+  Zone* NewEmptyZone(const char* zone_name);
   void ReturnZone(Zone* zone);
 
   static const size_t kMaxUnusedSize = 3;
-  typedef std::vector<Zone*> Unused;
-  typedef std::vector<Zone*> Used;
+  typedef std::vector<Zone*> Zones;
   typedef std::vector<StatsScope*> Stats;
 
-  Unused unused_;
-  Used used_;
+  Zones zones_;
   Stats stats_;
   size_t max_allocated_bytes_;
   size_t total_deleted_bytes_;
   AccountingAllocator* allocator_;
 
-  DISALLOW_COPY_AND_ASSIGN(ZonePool);
+  DISALLOW_COPY_AND_ASSIGN(ZoneStats);
 };
 
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
 
-#endif
+#endif  // V8_COMPILER_ZONE_STATS_H_
diff --git a/src/contexts.cc b/src/contexts.cc
index 4fb3c83..012944e 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -82,7 +82,7 @@
 Context* Context::closure_context() {
   Context* current = this;
   while (!current->IsFunctionContext() && !current->IsScriptContext() &&
-         !current->IsNativeContext()) {
+         !current->IsModuleContext() && !current->IsNativeContext()) {
     current = current->previous();
     DCHECK(current->closure() == closure());
   }
diff --git a/src/contexts.h b/src/contexts.h
index b927d05..b0b7195 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -51,6 +51,7 @@
   V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen)                     \
   V(OBJECT_IS_SEALED, JSFunction, object_is_sealed)                     \
   V(OBJECT_KEYS, JSFunction, object_keys)                               \
+  V(REGEXP_INTERNAL_MATCH, JSFunction, regexp_internal_match)           \
   V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply)                     \
   V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct)             \
   V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
@@ -58,7 +59,8 @@
   V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments)               \
   V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable)                 \
   V(MATH_FLOOR_INDEX, JSFunction, math_floor)                           \
-  V(MATH_POW_INDEX, JSFunction, math_pow)
+  V(MATH_POW_INDEX, JSFunction, math_pow)                               \
+  V(CREATE_RESOLVING_FUNCTION_INDEX, JSFunction, create_resolving_functions)
 
 #define NATIVE_CONTEXT_IMPORTED_FIELDS(V)                                 \
   V(ARRAY_CONCAT_INDEX, JSFunction, array_concat)                         \
@@ -92,13 +94,15 @@
   V(PROMISE_CATCH_INDEX, JSFunction, promise_catch)                       \
   V(PROMISE_CREATE_INDEX, JSFunction, promise_create)                     \
   V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function)                 \
+  V(PROMISE_HANDLE_INDEX, JSFunction, promise_handle)                     \
   V(PROMISE_HAS_USER_DEFINED_REJECT_HANDLER_INDEX, JSFunction,            \
     promise_has_user_defined_reject_handler)                              \
+  V(PROMISE_DEBUG_GET_INFO_INDEX, JSFunction, promise_debug_get_info)     \
   V(PROMISE_REJECT_INDEX, JSFunction, promise_reject)                     \
+  V(PROMISE_INTERNAL_REJECT_INDEX, JSFunction, promise_internal_reject)   \
   V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve)                   \
   V(PROMISE_THEN_INDEX, JSFunction, promise_then)                         \
   V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function)         \
-  V(REGEXP_LAST_MATCH_INFO_INDEX, JSObject, regexp_last_match_info)       \
   V(REJECT_PROMISE_NO_DEBUG_EVENT_INDEX, JSFunction,                      \
     reject_promise_no_debug_event)                                        \
   V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
@@ -107,7 +111,77 @@
   V(SET_HAS_METHOD_INDEX, JSFunction, set_has)                            \
   V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function)       \
   V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function)           \
-  V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function)
+  V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function)             \
+  V(WASM_COMPILE_ERROR_FUNCTION_INDEX, JSFunction,                        \
+    wasm_compile_error_function)                                          \
+  V(WASM_RUNTIME_ERROR_FUNCTION_INDEX, JSFunction, wasm_runtime_error_function)
+
+#define NATIVE_CONTEXT_JS_ARRAY_ITERATOR_MAPS(V)                               \
+  V(TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX, Map, typed_array_key_iterator_map)     \
+  V(FAST_ARRAY_KEY_ITERATOR_MAP_INDEX, Map, fast_array_key_iterator_map)       \
+  V(GENERIC_ARRAY_KEY_ITERATOR_MAP_INDEX, Map, array_key_iterator_map)         \
+                                                                               \
+  V(UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map,                             \
+    uint8_array_key_value_iterator_map)                                        \
+  V(INT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map,                              \
+    int8_array_key_value_iterator_map)                                         \
+  V(UINT16_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map,                            \
+    uint16_array_key_value_iterator_map)                                       \
+  V(INT16_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map,                             \
+    int16_array_key_value_iterator_map)                                        \
+  V(UINT32_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map,                            \
+    uint32_array_key_value_iterator_map)                                       \
+  V(INT32_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map,                             \
+    int32_array_key_value_iterator_map)                                        \
+  V(FLOAT32_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map,                           \
+    float32_array_key_value_iterator_map)                                      \
+  V(FLOAT64_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map,                           \
+    float64_array_key_value_iterator_map)                                      \
+  V(UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map,                     \
+    uint8_clamped_array_key_value_iterator_map)                                \
+                                                                               \
+  V(FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map,                          \
+    fast_smi_array_key_value_iterator_map)                                     \
+  V(FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map,                    \
+    fast_holey_smi_array_key_value_iterator_map)                               \
+  V(FAST_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map,                              \
+    fast_array_key_value_iterator_map)                                         \
+  V(FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map,                        \
+    fast_holey_array_key_value_iterator_map)                                   \
+  V(FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map,                       \
+    fast_double_array_key_value_iterator_map)                                  \
+  V(FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map,                 \
+    fast_holey_double_array_key_value_iterator_map)                            \
+  V(GENERIC_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map,                           \
+    array_key_value_iterator_map)                                              \
+                                                                               \
+  V(UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, uint8_array_value_iterator_map) \
+  V(INT8_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, int8_array_value_iterator_map)   \
+  V(UINT16_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map,                                \
+    uint16_array_value_iterator_map)                                           \
+  V(INT16_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, int16_array_value_iterator_map) \
+  V(UINT32_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map,                                \
+    uint32_array_value_iterator_map)                                           \
+  V(INT32_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, int32_array_value_iterator_map) \
+  V(FLOAT32_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map,                               \
+    float32_array_value_iterator_map)                                          \
+  V(FLOAT64_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map,                               \
+    float64_array_value_iterator_map)                                          \
+  V(UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map,                         \
+    uint8_clamped_array_value_iterator_map)                                    \
+                                                                               \
+  V(FAST_SMI_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map,                              \
+    fast_smi_array_value_iterator_map)                                         \
+  V(FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map,                        \
+    fast_holey_smi_array_value_iterator_map)                                   \
+  V(FAST_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, fast_array_value_iterator_map)   \
+  V(FAST_HOLEY_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map,                            \
+    fast_holey_array_value_iterator_map)                                       \
+  V(FAST_DOUBLE_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map,                           \
+    fast_double_array_value_iterator_map)                                      \
+  V(FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map,                     \
+    fast_holey_double_array_value_iterator_map)                                \
+  V(GENERIC_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, array_value_iterator_map)
 
 #define NATIVE_CONTEXT_FIELDS(V)                                               \
   V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object)                         \
@@ -153,6 +227,10 @@
   V(GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction,                             \
     generator_function_function)                                               \
   V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \
+  V(INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX, JSObject,                          \
+    initial_array_iterator_prototype)                                          \
+  V(INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX, Map,                           \
+    initial_array_iterator_prototype_map)                                      \
   V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype)          \
   V(INITIAL_GENERATOR_PROTOTYPE_INDEX, JSObject, initial_generator_prototype)  \
   V(INITIAL_ITERATOR_PROTOTYPE_INDEX, JSObject, initial_iterator_prototype)    \
@@ -178,6 +256,7 @@
     js_array_fast_holey_double_elements_map_index)                             \
   V(JS_MAP_FUN_INDEX, JSFunction, js_map_fun)                                  \
   V(JS_MAP_MAP_INDEX, Map, js_map_map)                                         \
+  V(JS_MODULE_NAMESPACE_MAP, Map, js_module_namespace_map)                     \
   V(JS_SET_FUN_INDEX, JSFunction, js_set_fun)                                  \
   V(JS_SET_MAP_INDEX, Map, js_set_map)                                         \
   V(JS_WEAK_MAP_FUN_INDEX, JSFunction, js_weak_map_fun)                        \
@@ -185,12 +264,15 @@
   V(MAP_CACHE_INDEX, Object, map_cache)                                        \
   V(MAP_ITERATOR_MAP_INDEX, Map, map_iterator_map)                             \
   V(STRING_ITERATOR_MAP_INDEX, Map, string_iterator_map)                       \
+  V(MATH_RANDOM_INDEX_INDEX, Smi, math_random_index)                           \
+  V(MATH_RANDOM_CACHE_INDEX, Object, math_random_cache)                        \
   V(MESSAGE_LISTENERS_INDEX, TemplateList, message_listeners)                  \
   V(NATIVES_UTILS_OBJECT_INDEX, Object, natives_utils_object)                  \
   V(NORMALIZED_MAP_CACHE_INDEX, Object, normalized_map_cache)                  \
   V(NUMBER_FUNCTION_INDEX, JSFunction, number_function)                        \
   V(OBJECT_FUNCTION_INDEX, JSFunction, object_function)                        \
-  V(OBJECT_WITH_NULL_PROTOTYPE_MAP, Map, object_with_null_prototype_map)       \
+  V(SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP, Map,                                  \
+    slow_object_with_null_prototype_map)                                       \
   V(OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX, Map, object_function_prototype_map)   \
   V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function)    \
   V(PROXY_CALLABLE_MAP_INDEX, Map, proxy_callable_map)                         \
@@ -198,13 +280,22 @@
   V(PROXY_FUNCTION_INDEX, JSFunction, proxy_function)                          \
   V(PROXY_FUNCTION_MAP_INDEX, Map, proxy_function_map)                         \
   V(PROXY_MAP_INDEX, Map, proxy_map)                                           \
+  V(PROMISE_RESOLVE_SHARED_FUN, SharedFunctionInfo,                            \
+    promise_resolve_shared_fun)                                                \
+  V(PROMISE_REJECT_SHARED_FUN, SharedFunctionInfo, promise_reject_shared_fun)  \
+  V(REGEXP_EXEC_FUNCTION_INDEX, JSFunction, regexp_exec_function)              \
   V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function)                        \
+  V(REGEXP_LAST_MATCH_INFO_INDEX, RegExpMatchInfo, regexp_last_match_info)     \
+  V(REGEXP_INTERNAL_MATCH_INFO_INDEX, RegExpMatchInfo,                         \
+    regexp_internal_match_info)                                                \
+  V(REGEXP_PROTOTYPE_MAP_INDEX, Map, regexp_prototype_map)                     \
   V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)                           \
   V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table)      \
   V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function)                        \
   V(SECURITY_TOKEN_INDEX, Object, security_token)                              \
   V(SELF_WEAK_CELL_INDEX, WeakCell, self_weak_cell)                            \
   V(SET_ITERATOR_MAP_INDEX, Map, set_iterator_map)                             \
+  V(FIXED_ARRAY_ITERATOR_MAP_INDEX, Map, fixed_array_iterator_map)             \
   V(SHARED_ARRAY_BUFFER_FUN_INDEX, JSFunction, shared_array_buffer_fun)        \
   V(SLOPPY_ARGUMENTS_MAP_INDEX, Map, sloppy_arguments_map)                     \
   V(SLOPPY_FUNCTION_MAP_INDEX, Map, sloppy_function_map)                       \
@@ -244,7 +335,8 @@
   V(UINT8X16_FUNCTION_INDEX, JSFunction, uint8x16_function)                    \
   V(CURRENT_MODULE_INDEX, Module, current_module)                              \
   NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V)                                        \
-  NATIVE_CONTEXT_IMPORTED_FIELDS(V)
+  NATIVE_CONTEXT_IMPORTED_FIELDS(V)                                            \
+  NATIVE_CONTEXT_JS_ARRAY_ITERATOR_MAPS(V)
 
 // A table of all script contexts. Every loaded top-level script with top-level
 // lexical declarations contributes its ScriptContext into this table.
@@ -357,7 +449,7 @@
   static inline Context* cast(Object* context);
 
   // The default context slot layout; indices are FixedArray slot indices.
-  enum {
+  enum Field {
     // These slots are in all contexts.
     CLOSURE_INDEX,
     PREVIOUS_INDEX,
@@ -563,6 +655,8 @@
   STATIC_ASSERT(EMBEDDER_DATA_INDEX == Internals::kContextEmbedderDataIndex);
 };
 
+typedef Context::Field ContextField;
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/counters-inl.h b/src/counters-inl.h
index 303e5e3..7219ef7 100644
--- a/src/counters-inl.h
+++ b/src/counters-inl.h
@@ -12,17 +12,20 @@
 
 RuntimeCallTimerScope::RuntimeCallTimerScope(
     Isolate* isolate, RuntimeCallStats::CounterId counter_id) {
-  if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
-                  FLAG_runtime_call_stats)) {
-    Initialize(isolate, counter_id);
+  if (V8_UNLIKELY(FLAG_runtime_stats)) {
+    Initialize(isolate->counters()->runtime_call_stats(), counter_id);
   }
 }
 
 RuntimeCallTimerScope::RuntimeCallTimerScope(
     HeapObject* heap_object, RuntimeCallStats::CounterId counter_id) {
-  if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
-                  FLAG_runtime_call_stats)) {
-    Initialize(heap_object->GetIsolate(), counter_id);
+  RuntimeCallTimerScope(heap_object->GetIsolate(), counter_id);
+}
+
+RuntimeCallTimerScope::RuntimeCallTimerScope(
+    RuntimeCallStats* stats, RuntimeCallStats::CounterId counter_id) {
+  if (V8_UNLIKELY(FLAG_runtime_stats)) {
+    Initialize(stats, counter_id);
   }
 }
 
diff --git a/src/counters.cc b/src/counters.cc
index c4e8646..5089eb2 100644
--- a/src/counters.cc
+++ b/src/counters.cc
@@ -238,7 +238,7 @@
       return count_ < other.count_;
     }
 
-    void Print(std::ostream& os) {
+    V8_NOINLINE void Print(std::ostream& os) {
       os.precision(2);
       os << std::fixed << std::setprecision(2);
       os << std::setw(50) << name_;
@@ -249,7 +249,8 @@
       os << std::endl;
     }
 
-    void SetTotal(base::TimeDelta total_time, uint64_t total_count) {
+    V8_NOINLINE void SetTotal(base::TimeDelta total_time,
+                              uint64_t total_count) {
       if (total_time.InMicroseconds() == 0) {
         time_percent_ = 0;
       } else {
@@ -276,125 +277,121 @@
   time = base::TimeDelta();
 }
 
-void RuntimeCallCounter::Dump(std::stringstream& out) {
-  out << "\"" << name << "\":[" << count << "," << time.InMicroseconds()
-      << "],";
+void RuntimeCallCounter::Dump(v8::tracing::TracedValue* value) {
+  value->BeginArray(name);
+  value->AppendLongInteger(count);
+  value->AppendLongInteger(time.InMicroseconds());
+  value->EndArray();
 }
 
+void RuntimeCallCounter::Add(RuntimeCallCounter* other) {
+  count += other->count;
+  time += other->time;
+}
+
+// static
+const RuntimeCallStats::CounterId RuntimeCallStats::counters[] = {
+#define CALL_RUNTIME_COUNTER(name) &RuntimeCallStats::name,
+    FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER)  //
+#undef CALL_RUNTIME_COUNTER
+#define CALL_RUNTIME_COUNTER(name, nargs, ressize) \
+  &RuntimeCallStats::Runtime_##name,          //
+    FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER)  //
+#undef CALL_RUNTIME_COUNTER
+#define CALL_BUILTIN_COUNTER(name) &RuntimeCallStats::Builtin_##name,
+    BUILTIN_LIST_C(CALL_BUILTIN_COUNTER)  //
+#undef CALL_BUILTIN_COUNTER
+#define CALL_BUILTIN_COUNTER(name) &RuntimeCallStats::API_##name,
+    FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER)  //
+#undef CALL_BUILTIN_COUNTER
+#define CALL_BUILTIN_COUNTER(name) &RuntimeCallStats::Handler_##name,
+    FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER)
+#undef CALL_BUILTIN_COUNTER
+};
+
 // static
 void RuntimeCallStats::Enter(RuntimeCallStats* stats, RuntimeCallTimer* timer,
                              CounterId counter_id) {
   RuntimeCallCounter* counter = &(stats->*counter_id);
-  timer->Start(counter, stats->current_timer_);
-  stats->current_timer_ = timer;
+  DCHECK(counter->name != nullptr);
+  timer->Start(counter, stats->current_timer_.Value());
+  stats->current_timer_.SetValue(timer);
 }
 
 // static
 void RuntimeCallStats::Leave(RuntimeCallStats* stats, RuntimeCallTimer* timer) {
-  if (stats->current_timer_ == timer) {
-    stats->current_timer_ = timer->Stop();
+  if (stats->current_timer_.Value() == timer) {
+    stats->current_timer_.SetValue(timer->Stop());
   } else {
     // Must be a Threading cctest. Walk the chain of Timers to find the
     // buried one that's leaving. We don't care about keeping nested timings
     // accurate, just avoid crashing by keeping the chain intact.
-    RuntimeCallTimer* next = stats->current_timer_;
-    while (next->parent_ != timer) next = next->parent_;
-    next->parent_ = timer->Stop();
+    RuntimeCallTimer* next = stats->current_timer_.Value();
+    while (next && next->parent() != timer) next = next->parent();
+    if (next == nullptr) return;
+    next->parent_.SetValue(timer->Stop());
+  }
+}
+
+void RuntimeCallStats::Add(RuntimeCallStats* other) {
+  for (const RuntimeCallStats::CounterId counter_id :
+       RuntimeCallStats::counters) {
+    RuntimeCallCounter* counter = &(this->*counter_id);
+    RuntimeCallCounter* other_counter = &(other->*counter_id);
+    counter->Add(other_counter);
   }
 }
 
 // static
 void RuntimeCallStats::CorrectCurrentCounterId(RuntimeCallStats* stats,
                                                CounterId counter_id) {
-  DCHECK_NOT_NULL(stats->current_timer_);
-  RuntimeCallCounter* counter = &(stats->*counter_id);
-  stats->current_timer_->counter_ = counter;
+  RuntimeCallTimer* timer = stats->current_timer_.Value();
+  // When RCS are enabled dynamically there might be no current timer set up.
+  if (timer == nullptr) return;
+  timer->counter_ = &(stats->*counter_id);
 }
 
 void RuntimeCallStats::Print(std::ostream& os) {
   RuntimeCallStatEntries entries;
-
-#define PRINT_COUNTER(name) entries.Add(&this->name);
-  FOR_EACH_MANUAL_COUNTER(PRINT_COUNTER)
-#undef PRINT_COUNTER
-
-#define PRINT_COUNTER(name, nargs, ressize) entries.Add(&this->Runtime_##name);
-  FOR_EACH_INTRINSIC(PRINT_COUNTER)
-#undef PRINT_COUNTER
-
-#define PRINT_COUNTER(name) entries.Add(&this->Builtin_##name);
-  BUILTIN_LIST_C(PRINT_COUNTER)
-#undef PRINT_COUNTER
-
-#define PRINT_COUNTER(name) entries.Add(&this->API_##name);
-  FOR_EACH_API_COUNTER(PRINT_COUNTER)
-#undef PRINT_COUNTER
-
-#define PRINT_COUNTER(name) entries.Add(&this->Handler_##name);
-  FOR_EACH_HANDLER_COUNTER(PRINT_COUNTER)
-#undef PRINT_COUNTER
-
+  if (current_timer_.Value() != nullptr) {
+    current_timer_.Value()->Elapsed();
+  }
+  for (const RuntimeCallStats::CounterId counter_id :
+       RuntimeCallStats::counters) {
+    RuntimeCallCounter* counter = &(this->*counter_id);
+    entries.Add(counter);
+  }
   entries.Print(os);
 }
 
 void RuntimeCallStats::Reset() {
-  if (!FLAG_runtime_call_stats &&
-      !TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED())
-    return;
-#define RESET_COUNTER(name) this->name.Reset();
-  FOR_EACH_MANUAL_COUNTER(RESET_COUNTER)
-#undef RESET_COUNTER
+  if (V8_LIKELY(FLAG_runtime_stats == 0)) return;
 
-#define RESET_COUNTER(name, nargs, result_size) this->Runtime_##name.Reset();
-  FOR_EACH_INTRINSIC(RESET_COUNTER)
-#undef RESET_COUNTER
+  // In tracing, we only what to trace the time spent on top level trace events,
+  // if runtime counter stack is not empty, we should clear the whole runtime
+  // counter stack, and then reset counters so that we can dump counters into
+  // top level trace events accurately.
+  while (current_timer_.Value()) {
+    current_timer_.SetValue(current_timer_.Value()->Stop());
+  }
 
-#define RESET_COUNTER(name) this->Builtin_##name.Reset();
-  BUILTIN_LIST_C(RESET_COUNTER)
-#undef RESET_COUNTER
-
-#define RESET_COUNTER(name) this->API_##name.Reset();
-  FOR_EACH_API_COUNTER(RESET_COUNTER)
-#undef RESET_COUNTER
-
-#define RESET_COUNTER(name) this->Handler_##name.Reset();
-  FOR_EACH_HANDLER_COUNTER(RESET_COUNTER)
-#undef RESET_COUNTER
+  for (const RuntimeCallStats::CounterId counter_id :
+       RuntimeCallStats::counters) {
+    RuntimeCallCounter* counter = &(this->*counter_id);
+    counter->Reset();
+  }
 
   in_use_ = true;
 }
 
-std::string RuntimeCallStats::Dump() {
-  buffer_.str(std::string());
-  buffer_.clear();
-  buffer_ << "{";
-#define DUMP_COUNTER(name) \
-  if (this->name.count > 0) this->name.Dump(buffer_);
-  FOR_EACH_MANUAL_COUNTER(DUMP_COUNTER)
-#undef DUMP_COUNTER
+void RuntimeCallStats::Dump(v8::tracing::TracedValue* value) {
+  for (const RuntimeCallStats::CounterId counter_id :
+       RuntimeCallStats::counters) {
+    RuntimeCallCounter* counter = &(this->*counter_id);
+    if (counter->count > 0) counter->Dump(value);
+  }
 
-#define DUMP_COUNTER(name, nargs, result_size) \
-  if (this->Runtime_##name.count > 0) this->Runtime_##name.Dump(buffer_);
-  FOR_EACH_INTRINSIC(DUMP_COUNTER)
-#undef DUMP_COUNTER
-
-#define DUMP_COUNTER(name) \
-  if (this->Builtin_##name.count > 0) this->Builtin_##name.Dump(buffer_);
-  BUILTIN_LIST_C(DUMP_COUNTER)
-#undef DUMP_COUNTER
-
-#define DUMP_COUNTER(name) \
-  if (this->API_##name.count > 0) this->API_##name.Dump(buffer_);
-  FOR_EACH_API_COUNTER(DUMP_COUNTER)
-#undef DUMP_COUNTER
-
-#define DUMP_COUNTER(name) \
-  if (this->Handler_##name.count > 0) this->Handler_##name.Dump(buffer_);
-  FOR_EACH_HANDLER_COUNTER(DUMP_COUNTER)
-#undef DUMP_COUNTER
-  buffer_ << "\"END\":[]}";
   in_use_ = false;
-  return buffer_.str();
 }
 
 }  // namespace internal
diff --git a/src/counters.h b/src/counters.h
index 707ae9f..4415250 100644
--- a/src/counters.h
+++ b/src/counters.h
@@ -7,6 +7,7 @@
 
 #include "include/v8.h"
 #include "src/allocation.h"
+#include "src/base/atomic-utils.h"
 #include "src/base/platform/elapsed-timer.h"
 #include "src/base/platform/time.h"
 #include "src/builtins/builtins.h"
@@ -15,6 +16,8 @@
 #include "src/objects.h"
 #include "src/runtime/runtime.h"
 #include "src/tracing/trace-event.h"
+#include "src/tracing/traced-value.h"
+#include "src/tracing/tracing-category-observer.h"
 
 namespace v8 {
 namespace internal {
@@ -483,8 +486,9 @@
 
 struct RuntimeCallCounter {
   explicit RuntimeCallCounter(const char* name) : name(name) {}
-  void Reset();
-  V8_NOINLINE void Dump(std::stringstream& out);
+  V8_NOINLINE void Reset();
+  V8_NOINLINE void Dump(v8::tracing::TracedValue* value);
+  void Add(RuntimeCallCounter* other);
 
   const char* name;
   int64_t count = 0;
@@ -495,33 +499,49 @@
 // timers used for properly measuring the own time of a RuntimeCallCounter.
 class RuntimeCallTimer {
  public:
-  RuntimeCallTimer() {}
   RuntimeCallCounter* counter() { return counter_; }
   base::ElapsedTimer timer() { return timer_; }
+  RuntimeCallTimer* parent() const { return parent_.Value(); }
 
  private:
   friend class RuntimeCallStats;
 
   inline void Start(RuntimeCallCounter* counter, RuntimeCallTimer* parent) {
     counter_ = counter;
-    parent_ = parent;
-    timer_.Start();
+    parent_.SetValue(parent);
+    if (FLAG_runtime_stats !=
+        v8::tracing::TracingCategoryObserver::ENABLED_BY_SAMPLING) {
+      timer_.Start();
+    }
   }
 
   inline RuntimeCallTimer* Stop() {
+    if (!timer_.IsStarted()) return parent();
     base::TimeDelta delta = timer_.Elapsed();
     timer_.Stop();
     counter_->count++;
     counter_->time += delta;
-    if (parent_ != NULL) {
+    if (parent()) {
       // Adjust parent timer so that it does not include sub timer's time.
-      parent_->counter_->time -= delta;
+      parent()->counter_->time -= delta;
     }
-    return parent_;
+    return parent();
   }
 
+  inline void Elapsed() {
+    base::TimeDelta delta = timer_.Elapsed();
+    counter_->time += delta;
+    if (parent()) {
+      parent()->counter_->time -= delta;
+      parent()->Elapsed();
+    }
+    timer_.Restart();
+  }
+
+  const char* name() { return counter_->name; }
+
   RuntimeCallCounter* counter_ = nullptr;
-  RuntimeCallTimer* parent_ = nullptr;
+  base::AtomicValue<RuntimeCallTimer*> parent_;
   base::ElapsedTimer timer_;
 };
 
@@ -670,6 +690,11 @@
 #define FOR_EACH_MANUAL_COUNTER(V)                  \
   V(AccessorGetterCallback)                         \
   V(AccessorNameGetterCallback)                     \
+  V(AccessorNameGetterCallback_ArrayLength)         \
+  V(AccessorNameGetterCallback_BoundFunctionLength) \
+  V(AccessorNameGetterCallback_BoundFunctionName)   \
+  V(AccessorNameGetterCallback_FunctionPrototype)   \
+  V(AccessorNameGetterCallback_StringLength)        \
   V(AccessorNameSetterCallback)                     \
   V(Compile)                                        \
   V(CompileCode)                                    \
@@ -678,6 +703,7 @@
   V(CompileEval)                                    \
   V(CompileFullCode)                                \
   V(CompileIgnition)                                \
+  V(CompilerDispatcher)                             \
   V(CompileSerialize)                               \
   V(DeoptimizeCode)                                 \
   V(FunctionCallback)                               \
@@ -701,8 +727,14 @@
   V(Map_TransitionToDataProperty)                   \
   V(Object_DeleteProperty)                          \
   V(OptimizeCode)                                   \
-  V(Parse)                                          \
-  V(ParseLazy)                                      \
+  V(ParseArrowFunctionLiteral)                      \
+  V(ParseEval)                                      \
+  V(ParseFunction)                                  \
+  V(ParseFunctionLiteral)                           \
+  V(ParseProgram)                                   \
+  V(PreParseArrowFunctionLiteral)                   \
+  V(PreParseNoVariableResolution)                   \
+  V(PreParseWithVariableResolution)                 \
   V(PropertyCallback)                               \
   V(PrototypeMap_TransitionToAccessorProperty)      \
   V(PrototypeMap_TransitionToDataProperty)          \
@@ -712,46 +744,75 @@
   /* Dummy counter for the unexpected stub miss. */ \
   V(UnexpectedStubMiss)
 
-#define FOR_EACH_HANDLER_COUNTER(V)             \
-  V(IC_HandlerCacheHit)                         \
-  V(KeyedLoadIC_LoadIndexedStringStub)          \
-  V(KeyedLoadIC_LoadIndexedInterceptorStub)     \
-  V(KeyedLoadIC_KeyedLoadSloppyArgumentsStub)   \
-  V(KeyedLoadIC_LoadFastElementStub)            \
-  V(KeyedLoadIC_LoadDictionaryElementStub)      \
-  V(KeyedLoadIC_SlowStub)                       \
-  V(KeyedStoreIC_KeyedStoreSloppyArgumentsStub) \
-  V(KeyedStoreIC_StoreFastElementStub)          \
-  V(KeyedStoreIC_StoreElementStub)              \
-  V(KeyedStoreIC_Polymorphic)                   \
-  V(LoadIC_FunctionPrototypeStub)               \
-  V(LoadIC_LoadApiGetterStub)                   \
-  V(LoadIC_LoadCallback)                        \
-  V(LoadIC_LoadConstant)                        \
-  V(LoadIC_LoadConstantStub)                    \
-  V(LoadIC_LoadField)                           \
-  V(LoadIC_LoadFieldStub)                       \
-  V(LoadIC_LoadGlobal)                          \
-  V(LoadIC_LoadInterceptor)                     \
-  V(LoadIC_LoadNonexistent)                     \
-  V(LoadIC_LoadNormal)                          \
-  V(LoadIC_LoadScriptContextFieldStub)          \
-  V(LoadIC_LoadViaGetter)                       \
-  V(LoadIC_SlowStub)                            \
-  V(LoadIC_StringLengthStub)                    \
-  V(StoreIC_SlowStub)                           \
-  V(StoreIC_StoreCallback)                      \
-  V(StoreIC_StoreField)                         \
-  V(StoreIC_StoreFieldStub)                     \
-  V(StoreIC_StoreGlobal)                        \
-  V(StoreIC_StoreGlobalTransition)              \
-  V(StoreIC_StoreInterceptorStub)               \
-  V(StoreIC_StoreNormal)                        \
-  V(StoreIC_StoreScriptContextFieldStub)        \
-  V(StoreIC_StoreTransition)                    \
+#define FOR_EACH_HANDLER_COUNTER(V)              \
+  V(IC_HandlerCacheHit)                          \
+  V(KeyedLoadIC_LoadIndexedStringStub)           \
+  V(KeyedLoadIC_LoadIndexedInterceptorStub)      \
+  V(KeyedLoadIC_KeyedLoadSloppyArgumentsStub)    \
+  V(KeyedLoadIC_LoadElementDH)                   \
+  V(KeyedLoadIC_LoadFastElementStub)             \
+  V(KeyedLoadIC_LoadDictionaryElementStub)       \
+  V(KeyedLoadIC_SlowStub)                        \
+  V(KeyedStoreIC_ElementsTransitionAndStoreStub) \
+  V(KeyedStoreIC_KeyedStoreSloppyArgumentsStub)  \
+  V(KeyedStoreIC_SlowStub)                       \
+  V(KeyedStoreIC_StoreFastElementStub)           \
+  V(KeyedStoreIC_StoreElementStub)               \
+  V(LoadIC_FunctionPrototypeStub)                \
+  V(LoadIC_HandlerCacheHit_AccessCheck)          \
+  V(LoadIC_HandlerCacheHit_Exotic)               \
+  V(LoadIC_HandlerCacheHit_Interceptor)          \
+  V(LoadIC_HandlerCacheHit_JSProxy)              \
+  V(LoadIC_HandlerCacheHit_NonExistent)          \
+  V(LoadIC_HandlerCacheHit_Accessor)             \
+  V(LoadIC_HandlerCacheHit_Data)                 \
+  V(LoadIC_HandlerCacheHit_Transition)           \
+  V(LoadIC_LoadApiGetterDH)                      \
+  V(LoadIC_LoadApiGetterFromPrototypeDH)         \
+  V(LoadIC_LoadApiGetterStub)                    \
+  V(LoadIC_LoadCallback)                         \
+  V(LoadIC_LoadConstantDH)                       \
+  V(LoadIC_LoadConstantFromPrototypeDH)          \
+  V(LoadIC_LoadConstant)                         \
+  V(LoadIC_LoadConstantStub)                     \
+  V(LoadIC_LoadFieldDH)                          \
+  V(LoadIC_LoadFieldFromPrototypeDH)             \
+  V(LoadIC_LoadField)                            \
+  V(LoadIC_LoadFieldStub)                        \
+  V(LoadIC_LoadGlobal)                           \
+  V(LoadIC_LoadInterceptor)                      \
+  V(LoadIC_LoadNonexistentDH)                    \
+  V(LoadIC_LoadNonexistent)                      \
+  V(LoadIC_LoadNormal)                           \
+  V(LoadIC_LoadScriptContextFieldStub)           \
+  V(LoadIC_LoadViaGetter)                        \
+  V(LoadIC_Premonomorphic)                       \
+  V(LoadIC_SlowStub)                             \
+  V(LoadIC_StringLengthStub)                     \
+  V(StoreIC_HandlerCacheHit_AccessCheck)         \
+  V(StoreIC_HandlerCacheHit_Exotic)              \
+  V(StoreIC_HandlerCacheHit_Interceptor)         \
+  V(StoreIC_HandlerCacheHit_JSProxy)             \
+  V(StoreIC_HandlerCacheHit_NonExistent)         \
+  V(StoreIC_HandlerCacheHit_Accessor)            \
+  V(StoreIC_HandlerCacheHit_Data)                \
+  V(StoreIC_HandlerCacheHit_Transition)          \
+  V(StoreIC_Premonomorphic)                      \
+  V(StoreIC_SlowStub)                            \
+  V(StoreIC_StoreCallback)                       \
+  V(StoreIC_StoreField)                          \
+  V(StoreIC_StoreFieldDH)                        \
+  V(StoreIC_StoreFieldStub)                      \
+  V(StoreIC_StoreGlobal)                         \
+  V(StoreIC_StoreGlobalTransition)               \
+  V(StoreIC_StoreInterceptorStub)                \
+  V(StoreIC_StoreNormal)                         \
+  V(StoreIC_StoreScriptContextFieldStub)         \
+  V(StoreIC_StoreTransition)                     \
+  V(StoreIC_StoreTransitionDH)                   \
   V(StoreIC_StoreViaSetter)
 
-class RuntimeCallStats {
+class RuntimeCallStats : public ZoneObject {
  public:
   typedef RuntimeCallCounter RuntimeCallStats::*CounterId;
 
@@ -776,6 +837,8 @@
   FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER)
 #undef CALL_BUILTIN_COUNTER
 
+  static const CounterId counters[];
+
   // Starting measuring the time for a function. This will establish the
   // connection to the parent counter for properly calculating the own times.
   static void Enter(RuntimeCallStats* stats, RuntimeCallTimer* timer,
@@ -792,37 +855,37 @@
                                       CounterId counter_id);
 
   void Reset();
-  V8_NOINLINE void Print(std::ostream& os);
-  V8_NOINLINE std::string Dump();
+  // Add all entries from another stats object.
+  void Add(RuntimeCallStats* other);
+  void Print(std::ostream& os);
+  V8_NOINLINE void Dump(v8::tracing::TracedValue* value);
 
   RuntimeCallStats() {
     Reset();
     in_use_ = false;
   }
 
-  RuntimeCallTimer* current_timer() { return current_timer_; }
+  RuntimeCallTimer* current_timer() { return current_timer_.Value(); }
   bool InUse() { return in_use_; }
 
  private:
-  std::stringstream buffer_;
   // Counter to track recursive time events.
-  RuntimeCallTimer* current_timer_ = NULL;
+  base::AtomicValue<RuntimeCallTimer*> current_timer_;
   // Used to track nested tracing scopes.
   bool in_use_;
 };
 
-#define TRACE_RUNTIME_CALL_STATS(isolate, counter_name)                 \
-  do {                                                                  \
-    if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() || \
-                    FLAG_runtime_call_stats)) {                         \
-      RuntimeCallStats::CorrectCurrentCounterId(                        \
-          isolate->counters()->runtime_call_stats(),                    \
-          &RuntimeCallStats::counter_name);                             \
-    }                                                                   \
+#define CHANGE_CURRENT_RUNTIME_COUNTER(runtime_call_stats, counter_name) \
+  do {                                                                   \
+    if (V8_UNLIKELY(FLAG_runtime_stats)) {                               \
+      RuntimeCallStats::CorrectCurrentCounterId(                         \
+          runtime_call_stats, &RuntimeCallStats::counter_name);          \
+    }                                                                    \
   } while (false)
 
-#define TRACE_HANDLER_STATS(isolate, counter_name) \
-  TRACE_RUNTIME_CALL_STATS(isolate, Handler_##counter_name)
+#define TRACE_HANDLER_STATS(isolate, counter_name)                          \
+  CHANGE_CURRENT_RUNTIME_COUNTER(isolate->counters()->runtime_call_stats(), \
+                                 Handler_##counter_name)
 
 #define HISTOGRAM_RANGE_LIST(HR)                                              \
   /* Generic range histograms */                                              \
@@ -857,10 +920,6 @@
      MILLISECOND)                                                              \
   HT(gc_low_memory_notification, V8.GCLowMemoryNotification, 10000,            \
      MILLISECOND)                                                              \
-  /* Parsing timers. */                                                        \
-  HT(parse, V8.ParseMicroSeconds, 1000000, MICROSECOND)                        \
-  HT(parse_lazy, V8.ParseLazyMicroSeconds, 1000000, MICROSECOND)               \
-  HT(pre_parse, V8.PreParseMicroSeconds, 1000000, MICROSECOND)                 \
   /* Compilation times. */                                                     \
   HT(compile, V8.CompileMicroSeconds, 1000000, MICROSECOND)                    \
   HT(compile_eval, V8.CompileEvalMicroSeconds, 1000000, MICROSECOND)           \
@@ -1249,23 +1308,23 @@
   // stats are disabled and the isolate is not directly available.
   inline RuntimeCallTimerScope(HeapObject* heap_object,
                                RuntimeCallStats::CounterId counter_id);
+  inline RuntimeCallTimerScope(RuntimeCallStats* stats,
+                               RuntimeCallStats::CounterId counter_id);
 
   inline ~RuntimeCallTimerScope() {
-    if (V8_UNLIKELY(isolate_ != nullptr)) {
-      RuntimeCallStats::Leave(isolate_->counters()->runtime_call_stats(),
-                              &timer_);
+    if (V8_UNLIKELY(stats_ != nullptr)) {
+      RuntimeCallStats::Leave(stats_, &timer_);
     }
   }
 
  private:
-  V8_INLINE void Initialize(Isolate* isolate,
+  V8_INLINE void Initialize(RuntimeCallStats* stats,
                             RuntimeCallStats::CounterId counter_id) {
-    isolate_ = isolate;
-    RuntimeCallStats::Enter(isolate_->counters()->runtime_call_stats(), &timer_,
-                            counter_id);
+    stats_ = stats;
+    RuntimeCallStats::Enter(stats_, &timer_, counter_id);
   }
 
-  Isolate* isolate_ = nullptr;
+  RuntimeCallStats* stats_ = nullptr;
   RuntimeCallTimer timer_;
 };
 
diff --git a/src/crankshaft/arm/lithium-arm.cc b/src/crankshaft/arm/lithium-arm.cc
index 8c4b735..823f5a9 100644
--- a/src/crankshaft/arm/lithium-arm.cc
+++ b/src/crankshaft/arm/lithium-arm.cc
@@ -205,14 +205,6 @@
   stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
 }
 
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if has_cached_array_index(");
-  value()->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
 void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if class_of_test(");
   value()->PrintTo(stream);
@@ -875,15 +867,15 @@
   HValue* value = instr->value();
   Representation r = value->representation();
   HType type = value->type();
-  ToBooleanICStub::Types expected = instr->expected_input_types();
-  if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+  ToBooleanHints expected = instr->expected_input_types();
+  if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
 
   bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
       type.IsJSArray() || type.IsHeapNumber() || type.IsString();
   LInstruction* branch = new(zone()) LBranch(UseRegister(value));
-  if (!easy_case &&
-      ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
-       !expected.IsGeneric())) {
+  if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
+                      (expected & ToBooleanHint::kNeedsMap)) ||
+                     expected != ToBooleanHint::kAny)) {
     branch = AssignEnvironment(branch);
   }
   return branch;
@@ -1725,24 +1717,6 @@
   return new(zone()) LHasInstanceTypeAndBranch(value);
 }
 
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
-    HGetCachedArrayIndex* instr)  {
-  DCHECK(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-
-  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
-    HHasCachedArrayIndexAndBranch* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  return new(zone()) LHasCachedArrayIndexAndBranch(
-      UseRegisterAtStart(instr->value()));
-}
-
-
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
   DCHECK(instr->value()->representation().IsTagged());
@@ -1999,15 +1973,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
-  return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
   LInstruction* result =
@@ -2043,18 +2008,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LInstruction* result =
-      DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), r0);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
     HLoadFunctionPrototype* instr) {
   return AssignEnvironment(DefineAsRegister(
@@ -2114,20 +2067,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LInstruction* result =
-      DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
-                  r0);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
   if (!instr->is_fixed_typed_array()) {
     DCHECK(instr->elements()->representation().IsTagged());
@@ -2405,7 +2344,6 @@
   inner->BindContext(instr->closure_context());
   inner->set_entry(instr);
   current_block_->UpdateEnvironment(inner);
-  chunk_->AddInlinedFunction(instr->shared());
   return NULL;
 }
 
diff --git a/src/crankshaft/arm/lithium-arm.h b/src/crankshaft/arm/lithium-arm.h
index abdfbdd..0d066c9 100644
--- a/src/crankshaft/arm/lithium-arm.h
+++ b/src/crankshaft/arm/lithium-arm.h
@@ -71,9 +71,7 @@
   V(FlooringDivI)                            \
   V(ForInCacheArray)                         \
   V(ForInPrepareMap)                         \
-  V(GetCachedArrayIndex)                     \
   V(Goto)                                    \
-  V(HasCachedArrayIndexAndBranch)            \
   V(HasInPrototypeChainAndBranch)            \
   V(HasInstanceTypeAndBranch)                \
   V(InnerAllocatedObject)                    \
@@ -89,11 +87,8 @@
   V(LoadRoot)                                \
   V(LoadFieldByIndex)                        \
   V(LoadFunctionPrototype)                   \
-  V(LoadGlobalGeneric)                       \
   V(LoadKeyed)                               \
-  V(LoadKeyedGeneric)                        \
   V(LoadNamedField)                          \
-  V(LoadNamedGeneric)                        \
   V(MathAbs)                                 \
   V(MathClz32)                               \
   V(MathCos)                                 \
@@ -1071,35 +1066,6 @@
 };
 
 
-class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LGetCachedArrayIndex(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
-  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
- public:
-  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
-                               "has-cached-array-index-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
-  void PrintDataTo(StringStream* stream) override;
-};
-
-
 class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
  public:
   LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
@@ -1484,25 +1450,6 @@
 };
 
 
-class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
- public:
-  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
 class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadFunctionPrototype(LOperand* function) {
@@ -1551,43 +1498,6 @@
 };
 
 
-class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
- public:
-  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
-                    LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = key;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
- public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
-    inputs_[0] = context;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-  TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
-};
-
-
 class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadContextSlot(LOperand* context) {
@@ -1960,6 +1870,8 @@
 
   DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
   DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToNumber(); }
 };
 
 
diff --git a/src/crankshaft/arm/lithium-codegen-arm.cc b/src/crankshaft/arm/lithium-codegen-arm.cc
index f2cc4b4..e092a9e 100644
--- a/src/crankshaft/arm/lithium-codegen-arm.cc
+++ b/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -253,8 +253,7 @@
 
       HValue* value =
           instructions_->at(code->instruction_index())->hydrogen_value();
-      RecordAndWritePosition(
-          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+      RecordAndWritePosition(value->position());
 
       Comment(";;; <@%d,#%d> "
               "-------------------- Deferred %s --------------------",
@@ -2058,45 +2057,44 @@
       __ cmp(ip, Operand::Zero());
       EmitBranch(instr, ne);
     } else {
-      ToBooleanICStub::Types expected =
-          instr->hydrogen()->expected_input_types();
+      ToBooleanHints expected = instr->hydrogen()->expected_input_types();
       // Avoid deopts in the case where we've never executed this path before.
-      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+      if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
 
-      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+      if (expected & ToBooleanHint::kUndefined) {
         // undefined -> false.
         __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
         __ b(eq, instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+      if (expected & ToBooleanHint::kBoolean) {
         // Boolean -> its value.
         __ CompareRoot(reg, Heap::kTrueValueRootIndex);
         __ b(eq, instr->TrueLabel(chunk_));
         __ CompareRoot(reg, Heap::kFalseValueRootIndex);
         __ b(eq, instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+      if (expected & ToBooleanHint::kNull) {
         // 'null' -> false.
         __ CompareRoot(reg, Heap::kNullValueRootIndex);
         __ b(eq, instr->FalseLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::SMI)) {
+      if (expected & ToBooleanHint::kSmallInteger) {
         // Smis: 0 -> false, all other -> true.
         __ cmp(reg, Operand::Zero());
         __ b(eq, instr->FalseLabel(chunk_));
         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
-      } else if (expected.NeedsMap()) {
+      } else if (expected & ToBooleanHint::kNeedsMap) {
         // If we need a map later and have a Smi -> deopt.
         __ SmiTst(reg);
         DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi);
       }
 
       const Register map = scratch0();
-      if (expected.NeedsMap()) {
+      if (expected & ToBooleanHint::kNeedsMap) {
         __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
 
-        if (expected.CanBeUndetectable()) {
+        if (expected & ToBooleanHint::kCanBeUndetectable) {
           // Undetectable -> false.
           __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
           __ tst(ip, Operand(1 << Map::kIsUndetectable));
@@ -2104,13 +2102,13 @@
         }
       }
 
-      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+      if (expected & ToBooleanHint::kReceiver) {
         // spec object -> true.
         __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
         __ b(ge, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::STRING)) {
+      if (expected & ToBooleanHint::kString) {
         // String value -> false iff empty.
         Label not_string;
         __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
@@ -2122,19 +2120,19 @@
         __ bind(&not_string);
       }
 
-      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+      if (expected & ToBooleanHint::kSymbol) {
         // Symbol value -> true.
         __ CompareInstanceType(map, ip, SYMBOL_TYPE);
         __ b(eq, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+      if (expected & ToBooleanHint::kSimdValue) {
         // SIMD value -> true.
         __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
         __ b(eq, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+      if (expected & ToBooleanHint::kHeapNumber) {
         // heap number -> false iff +0, -0, or NaN.
         DwVfpRegister dbl_scratch = double_scratch0();
         Label not_heap_number;
@@ -2148,7 +2146,7 @@
         __ bind(&not_heap_number);
       }
 
-      if (!expected.IsGeneric()) {
+      if (expected != ToBooleanHint::kAny) {
         // We've seen something for the first time -> deopt.
         // This can only happen if we are not generic already.
         DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
@@ -2393,30 +2391,6 @@
   EmitBranch(instr, BranchCondition(instr->hydrogen()));
 }
 
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
-  Register input = ToRegister(instr->value());
-  Register result = ToRegister(instr->result());
-
-  __ AssertString(input);
-
-  __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
-  __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
-    LHasCachedArrayIndexAndBranch* instr) {
-  Register input = ToRegister(instr->value());
-  Register scratch = scratch0();
-
-  __ ldr(scratch,
-         FieldMemOperand(input, String::kHashFieldOffset));
-  __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
-  EmitBranch(instr, eq);
-}
-
-
 // Branches to a label or falls through with the answer in flags.  Trashes
 // the temp registers, but not the input.
 void LCodeGen::EmitClassOfTest(Label* is_true,
@@ -2585,35 +2559,6 @@
 }
 
 
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = LoadDescriptor::SlotRegister();
-  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
-  DCHECK(slot_register.is(r0));
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ Move(vector_register, vector);
-  // No need to allocate this register.
-  FeedbackVectorSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ mov(slot_register, Operand(Smi::FromInt(index)));
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->result()).is(r0));
-
-  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
-          .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
@@ -2696,19 +2641,6 @@
 }
 
 
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->result()).is(r0));
-
-  // Name is always in r2.
-  __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
-  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   Register scratch = scratch0();
   Register function = ToRegister(instr->function());
@@ -2938,11 +2870,11 @@
     __ b(ne, &done);
     if (info()->IsStub()) {
       // A stub can safely convert the hole to undefined only if the array
-      // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+      // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
       // it needs to bail out.
       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
       __ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
-      __ cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
+      __ cmp(result, Operand(Smi::FromInt(Isolate::kProtectorValid)));
       DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
     }
     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
@@ -2993,18 +2925,6 @@
 }
 
 
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
-
-  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   Register scratch = scratch0();
   Register result = ToRegister(instr->result());
@@ -4539,8 +4459,7 @@
 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
                                 DwVfpRegister result_reg,
                                 NumberUntagDMode mode) {
-  bool can_convert_undefined_to_nan =
-      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool can_convert_undefined_to_nan = instr->truncating();
   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
 
   Register scratch = scratch0();
@@ -4617,34 +4536,12 @@
   __ cmp(scratch1, Operand(ip));
 
   if (instr->truncating()) {
-    // Performs a truncating conversion of a floating point number as used by
-    // the JS bitwise operations.
-    Label no_heap_number, check_bools, check_false;
-    __ b(ne, &no_heap_number);
+    Label truncate;
+    __ b(eq, &truncate);
+    __ CompareInstanceType(scratch1, scratch1, ODDBALL_TYPE);
+    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball);
+    __ bind(&truncate);
     __ TruncateHeapNumberToI(input_reg, scratch2);
-    __ b(&done);
-
-    // Check for Oddballs. Undefined/False is converted to zero and True to one
-    // for truncating conversions.
-    __ bind(&no_heap_number);
-    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-    __ cmp(scratch2, Operand(ip));
-    __ b(ne, &check_bools);
-    __ mov(input_reg, Operand::Zero());
-    __ b(&done);
-
-    __ bind(&check_bools);
-    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
-    __ cmp(scratch2, Operand(ip));
-    __ b(ne, &check_false);
-    __ mov(input_reg, Operand(1));
-    __ b(&done);
-
-    __ bind(&check_false);
-    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
-    __ cmp(scratch2, Operand(ip));
-    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
-    __ mov(input_reg, Operand::Zero());
   } else {
     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
 
@@ -5052,7 +4949,7 @@
   // TODO(3095996): Get rid of this. For now, we need to make the
   // result register contain a valid pointer because it is already
   // contained in the register pointer map.
-  __ mov(result, Operand(Smi::FromInt(0)));
+  __ mov(result, Operand(Smi::kZero));
 
   PushSafepointRegistersScope scope(this);
   if (instr->size()->IsRegister()) {
@@ -5386,7 +5283,7 @@
   Register result = ToRegister(instr->result());
   Label load_cache, done;
   __ EnumLength(result, map);
-  __ cmp(result, Operand(Smi::FromInt(0)));
+  __ cmp(result, Operand(Smi::kZero));
   __ b(ne, &load_cache);
   __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
   __ jmp(&done);
diff --git a/src/crankshaft/arm64/lithium-arm64.cc b/src/crankshaft/arm64/lithium-arm64.cc
index 8a9ce42..e5227e3 100644
--- a/src/crankshaft/arm64/lithium-arm64.cc
+++ b/src/crankshaft/arm64/lithium-arm64.cc
@@ -102,14 +102,6 @@
   stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
 }
 
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if has_cached_array_index(");
-  value()->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
 bool LGoto::HasInterestingComment(LCodeGen* gen) const {
   return !gen->IsNextEmittedBlock(block_id());
 }
@@ -942,12 +934,13 @@
       return new(zone()) LBranch(UseRegister(value), TempRegister(), NULL);
     }
 
-    ToBooleanICStub::Types expected = instr->expected_input_types();
-    bool needs_temps = expected.NeedsMap() || expected.IsEmpty();
+    ToBooleanHints expected = instr->expected_input_types();
+    bool needs_temps = (expected & ToBooleanHint::kNeedsMap) ||
+                       expected == ToBooleanHint::kNone;
     LOperand* temp1 = needs_temps ? TempRegister() : NULL;
     LOperand* temp2 = needs_temps ? TempRegister() : NULL;
 
-    if (expected.IsGeneric() || expected.IsEmpty()) {
+    if (expected == ToBooleanHint::kAny || expected == ToBooleanHint::kNone) {
       // The generic case cannot deoptimize because it already supports every
       // possible input type.
       DCHECK(needs_temps);
@@ -1409,7 +1402,6 @@
   inner->BindContext(instr->closure_context());
   inner->set_entry(instr);
   current_block_->UpdateEnvironment(inner);
-  chunk_->AddInlinedFunction(instr->shared());
   return NULL;
 }
 
@@ -1428,28 +1420,10 @@
   return NULL;
 }
 
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
-    HGetCachedArrayIndex* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
 LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
   return new(zone()) LGoto(instr->FirstSuccessor());
 }
 
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
-    HHasCachedArrayIndexAndBranch* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  return new(zone()) LHasCachedArrayIndexAndBranch(
-      UseRegisterAtStart(instr->value()), TempRegister());
-}
-
-
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
   DCHECK(instr->value()->representation().IsTagged());
@@ -1551,15 +1525,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
-  return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
   DCHECK(instr->key()->representation().IsSmiOrInteger32());
   ElementsKind elements_kind = instr->elements_kind();
@@ -1610,38 +1575,12 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LInstruction* result =
-      DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
-                  x0);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
   LOperand* object = UseRegisterAtStart(instr->object());
   return DefineAsRegister(new(zone()) LLoadNamedField(object));
 }
 
 
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LInstruction* result =
-      DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), x0);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
   return DefineAsRegister(new(zone()) LLoadRoot);
 }
diff --git a/src/crankshaft/arm64/lithium-arm64.h b/src/crankshaft/arm64/lithium-arm64.h
index 9891f9e..a9d85e5 100644
--- a/src/crankshaft/arm64/lithium-arm64.h
+++ b/src/crankshaft/arm64/lithium-arm64.h
@@ -74,9 +74,7 @@
   V(FlooringDivI)                            \
   V(ForInCacheArray)                         \
   V(ForInPrepareMap)                         \
-  V(GetCachedArrayIndex)                     \
   V(Goto)                                    \
-  V(HasCachedArrayIndexAndBranch)            \
   V(HasInPrototypeChainAndBranch)            \
   V(HasInstanceTypeAndBranch)                \
   V(InnerAllocatedObject)                    \
@@ -91,13 +89,10 @@
   V(LoadContextSlot)                         \
   V(LoadFieldByIndex)                        \
   V(LoadFunctionPrototype)                   \
-  V(LoadGlobalGeneric)                       \
   V(LoadKeyedExternal)                       \
   V(LoadKeyedFixed)                          \
   V(LoadKeyedFixedDouble)                    \
-  V(LoadKeyedGeneric)                        \
   V(LoadNamedField)                          \
-  V(LoadNamedGeneric)                        \
   V(LoadRoot)                                \
   V(MathAbs)                                 \
   V(MathAbsTagged)                           \
@@ -1282,38 +1277,6 @@
   DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
 };
 
-
-class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LGetCachedArrayIndex(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
-  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 1> {
- public:
-  LHasCachedArrayIndexAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
-                               "has-cached-array-index-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
-  void PrintDataTo(StringStream* stream) override;
-};
-
-
 class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 1> {
  public:
   LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
@@ -1537,24 +1500,6 @@
   DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
 };
 
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
- public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
-    inputs_[0] = context;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-  TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
-};
-
-
 template <int T>
 class LLoadKeyed : public LTemplateInstruction<1, 3, T> {
  public:
@@ -1637,45 +1582,6 @@
 };
 
 
-class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
- public:
-  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
-                    LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = key;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-};
-
-
-class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
- public:
-  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
 class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
@@ -2046,6 +1952,8 @@
 
   DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
   DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToNumber(); }
 };
 
 
diff --git a/src/crankshaft/arm64/lithium-codegen-arm64.cc b/src/crankshaft/arm64/lithium-codegen-arm64.cc
index a4aa275..4d8e661 100644
--- a/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ b/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -39,6 +39,29 @@
   Safepoint::DeoptMode deopt_mode_;
 };
 
+LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
+    LCodeGen* codegen)
+    : codegen_(codegen) {
+  DCHECK(codegen_->info()->is_calling());
+  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+  codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+
+  UseScratchRegisterScope temps(codegen_->masm_);
+  // Preserve the value of lr which must be saved on the stack (the call to
+  // the stub will clobber it).
+  Register to_be_pushed_lr =
+      temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr());
+  codegen_->masm_->Mov(to_be_pushed_lr, lr);
+  StoreRegistersStateStub stub(codegen_->isolate());
+  codegen_->masm_->CallStub(&stub);
+}
+
+LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
+  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+  RestoreRegistersStateStub stub(codegen_->isolate());
+  codegen_->masm_->CallStub(&stub);
+  codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+}
 
 #define __ masm()->
 
@@ -681,8 +704,7 @@
 
       HValue* value =
           instructions_->at(code->instruction_index())->hydrogen_value();
-      RecordAndWritePosition(
-          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+      RecordAndWritePosition(value->position());
 
       Comment(";;; <@%d,#%d> "
               "-------------------- Deferred %s --------------------",
@@ -1438,7 +1460,7 @@
   // TODO(3095996): Get rid of this. For now, we need to make the
   // result register contain a valid pointer because it is already
   // contained in the register pointer map.
-  __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
+  __ Mov(ToRegister(instr->result()), Smi::kZero);
 
   PushSafepointRegistersScope scope(this);
   LoadContextFromDeferred(instr->context());
@@ -1748,7 +1770,7 @@
       EmitBranch(instr, eq);
     } else if (type.IsSmi()) {
       DCHECK(!info()->IsStub());
-      EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
+      EmitCompareAndBranch(instr, ne, value, Smi::kZero);
     } else if (type.IsJSArray()) {
       DCHECK(!info()->IsStub());
       EmitGoto(instr->TrueDestination(chunk()));
@@ -1764,18 +1786,17 @@
       __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
       EmitCompareAndBranch(instr, ne, temp, 0);
     } else {
-      ToBooleanICStub::Types expected =
-          instr->hydrogen()->expected_input_types();
+      ToBooleanHints expected = instr->hydrogen()->expected_input_types();
       // Avoid deopts in the case where we've never executed this path before.
-      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+      if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
 
-      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+      if (expected & ToBooleanHint::kUndefined) {
         // undefined -> false.
         __ JumpIfRoot(
             value, Heap::kUndefinedValueRootIndex, false_label);
       }
 
-      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+      if (expected & ToBooleanHint::kBoolean) {
         // Boolean -> its value.
         __ JumpIfRoot(
             value, Heap::kTrueValueRootIndex, true_label);
@@ -1783,18 +1804,18 @@
             value, Heap::kFalseValueRootIndex, false_label);
       }
 
-      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+      if (expected & ToBooleanHint::kNull) {
         // 'null' -> false.
         __ JumpIfRoot(
             value, Heap::kNullValueRootIndex, false_label);
       }
 
-      if (expected.Contains(ToBooleanICStub::SMI)) {
+      if (expected & ToBooleanHint::kSmallInteger) {
         // Smis: 0 -> false, all other -> true.
-        DCHECK(Smi::FromInt(0) == 0);
+        DCHECK(Smi::kZero == 0);
         __ Cbz(value, false_label);
         __ JumpIfSmi(value, true_label);
-      } else if (expected.NeedsMap()) {
+      } else if (expected & ToBooleanHint::kNeedsMap) {
         // If we need a map later and have a smi, deopt.
         DeoptimizeIfSmi(value, instr, DeoptimizeReason::kSmi);
       }
@@ -1802,14 +1823,14 @@
       Register map = NoReg;
       Register scratch = NoReg;
 
-      if (expected.NeedsMap()) {
+      if (expected & ToBooleanHint::kNeedsMap) {
         DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
         map = ToRegister(instr->temp1());
         scratch = ToRegister(instr->temp2());
 
         __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
 
-        if (expected.CanBeUndetectable()) {
+        if (expected & ToBooleanHint::kCanBeUndetectable) {
           // Undetectable -> false.
           __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
           __ TestAndBranchIfAnySet(
@@ -1817,13 +1838,13 @@
         }
       }
 
-      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+      if (expected & ToBooleanHint::kReceiver) {
         // spec object -> true.
         __ CompareInstanceType(map, scratch, FIRST_JS_RECEIVER_TYPE);
         __ B(ge, true_label);
       }
 
-      if (expected.Contains(ToBooleanICStub::STRING)) {
+      if (expected & ToBooleanHint::kString) {
         // String value -> false iff empty.
         Label not_string;
         __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
@@ -1834,19 +1855,19 @@
         __ Bind(&not_string);
       }
 
-      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+      if (expected & ToBooleanHint::kSymbol) {
         // Symbol value -> true.
         __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
         __ B(eq, true_label);
       }
 
-      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+      if (expected & ToBooleanHint::kSimdValue) {
         // SIMD value -> true.
         __ CompareInstanceType(map, scratch, SIMD128_VALUE_TYPE);
         __ B(eq, true_label);
       }
 
-      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+      if (expected & ToBooleanHint::kHeapNumber) {
         Label not_heap_number;
         __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
 
@@ -1860,7 +1881,7 @@
         __ Bind(&not_heap_number);
       }
 
-      if (!expected.IsGeneric()) {
+      if (expected != ToBooleanHint::kAny) {
         // We've seen something for the first time -> deopt.
         // This can only happen if we are not generic already.
         Deoptimize(instr, DeoptimizeReason::kUnexpectedObject);
@@ -2664,20 +2685,6 @@
   __ Bind(&use_cache);
 }
 
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
-  Register input = ToRegister(instr->value());
-  Register result = ToRegister(instr->result());
-
-  __ AssertString(input);
-
-  // Assert that we can use a W register load to get the hash.
-  DCHECK((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
-  __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
-  __ IndexFromHash(result, result);
-}
-
-
 void LCodeGen::EmitGoto(int block) {
   // Do not emit jump if we are emitting a goto to the next block.
   if (!IsNextEmittedBlock(block)) {
@@ -2685,25 +2692,10 @@
   }
 }
 
-
 void LCodeGen::DoGoto(LGoto* instr) {
   EmitGoto(instr->block_id());
 }
 
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
-    LHasCachedArrayIndexAndBranch* instr) {
-  Register input = ToRegister(instr->value());
-  Register temp = ToRegister32(instr->temp());
-
-  // Assert that the cache status bits fit in a W register.
-  DCHECK(is_uint32(String::kContainsCachedArrayIndexMask));
-  __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
-  __ Tst(temp, String::kContainsCachedArrayIndexMask);
-  EmitBranch(instr, eq);
-}
-
-
 // HHasInstanceTypeAndBranch instruction is built with an interval of type
 // to test but is only used in very restricted ways. The only possible kinds
 // of intervals are:
@@ -3013,35 +3005,6 @@
 }
 
 
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = LoadWithVectorDescriptor::SlotRegister();
-  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
-  DCHECK(slot_register.is(x0));
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ Mov(vector_register, vector);
-  // No need to allocate this register.
-  FeedbackVectorSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ Mov(slot_register, Smi::FromInt(index));
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->result()).Is(x0));
-
-  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
-          .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
     Register key,
     Register base,
@@ -3277,11 +3240,11 @@
     __ B(ne, &done);
     if (info()->IsStub()) {
       // A stub can safely convert the hole to undefined only if the array
-      // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+      // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
       // it needs to bail out.
       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
       __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
-      __ Cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
+      __ Cmp(result, Operand(Smi::FromInt(Isolate::kProtectorValid)));
       DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
     }
     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
@@ -3290,20 +3253,6 @@
 }
 
 
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
-
-  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
-  DCHECK(ToRegister(instr->result()).Is(x0));
-}
-
-
 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   HObjectAccess access = instr->hydrogen()->access();
   int offset = access.offset();
@@ -3345,19 +3294,6 @@
 }
 
 
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  // LoadIC expects name and receiver in registers.
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
-  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
-  DCHECK(ToRegister(instr->result()).is(x0));
-}
-
-
 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
   Register result = ToRegister(instr->result());
   __ LoadRoot(result, instr->index());
@@ -4289,8 +4225,7 @@
   Register input = ToRegister(instr->value());
   Register scratch = ToRegister(instr->temp());
   DoubleRegister result = ToDoubleRegister(instr->result());
-  bool can_convert_undefined_to_nan =
-      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool can_convert_undefined_to_nan = instr->truncating();
 
   Label done, load_smi;
 
@@ -5267,30 +5202,18 @@
   Label done;
 
   if (instr->truncating()) {
+    UseScratchRegisterScope temps(masm());
     Register output = ToRegister(instr->result());
-    Label check_bools;
-
-    // If it's not a heap number, jump to undefined check.
-    __ JumpIfNotHeapNumber(input, &check_bools);
-
-    // A heap number: load value and convert to int32 using truncating function.
+    Register input_map = temps.AcquireX();
+    Register input_instance_type = input_map;
+    Label truncate;
+    __ CompareObjectType(input, input_map, input_instance_type,
+                         HEAP_NUMBER_TYPE);
+    __ B(eq, &truncate);
+    __ Cmp(input_instance_type, ODDBALL_TYPE);
+    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball);
+    __ Bind(&truncate);
     __ TruncateHeapNumberToI(output, input);
-    __ B(&done);
-
-    __ Bind(&check_bools);
-
-    Register true_root = output;
-    Register false_root = scratch1;
-    __ LoadTrueFalseRoots(true_root, false_root);
-    __ Cmp(input, true_root);
-    __ Cset(output, eq);
-    __ Ccmp(input, false_root, ZFlag, ne);
-    __ B(eq, &done);
-
-    // Output contains zero, undefined is converted to zero for truncating
-    // conversions.
-    DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
-                        DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
   } else {
     Register output = ToRegister32(instr->result());
     DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
@@ -5650,7 +5573,7 @@
       index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
   __ Mov(index, Operand(index, ASR, 1));
 
-  __ Cmp(index, Smi::FromInt(0));
+  __ Cmp(index, Smi::kZero);
   __ B(lt, &out_of_object);
 
   STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
diff --git a/src/crankshaft/arm64/lithium-codegen-arm64.h b/src/crankshaft/arm64/lithium-codegen-arm64.h
index ca04fa2..7f44473 100644
--- a/src/crankshaft/arm64/lithium-codegen-arm64.h
+++ b/src/crankshaft/arm64/lithium-codegen-arm64.h
@@ -368,28 +368,9 @@
 
   class PushSafepointRegistersScope BASE_EMBEDDED {
    public:
-    explicit PushSafepointRegistersScope(LCodeGen* codegen)
-        : codegen_(codegen) {
-      DCHECK(codegen_->info()->is_calling());
-      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
-      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+    explicit PushSafepointRegistersScope(LCodeGen* codegen);
 
-      UseScratchRegisterScope temps(codegen_->masm_);
-      // Preserve the value of lr which must be saved on the stack (the call to
-      // the stub will clobber it).
-      Register to_be_pushed_lr =
-          temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr());
-      codegen_->masm_->Mov(to_be_pushed_lr, lr);
-      StoreRegistersStateStub stub(codegen_->isolate());
-      codegen_->masm_->CallStub(&stub);
-    }
-
-    ~PushSafepointRegistersScope() {
-      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
-      RestoreRegistersStateStub stub(codegen_->isolate());
-      codegen_->masm_->CallStub(&stub);
-      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
-    }
+    ~PushSafepointRegistersScope();
 
    private:
     LCodeGen* codegen_;
diff --git a/src/crankshaft/compilation-phase.cc b/src/crankshaft/compilation-phase.cc
index 9b40cca..4be0b1a 100644
--- a/src/crankshaft/compilation-phase.cc
+++ b/src/crankshaft/compilation-phase.cc
@@ -11,7 +11,7 @@
 namespace internal {
 
 CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info)
-    : name_(name), info_(info), zone_(info->isolate()->allocator()) {
+    : name_(name), info_(info), zone_(info->isolate()->allocator(), ZONE_NAME) {
   if (FLAG_hydrogen_stats) {
     info_zone_start_allocation_size_ = info->zone()->allocation_size();
     timer_.Start();
diff --git a/src/crankshaft/hydrogen-instructions.cc b/src/crankshaft/hydrogen-instructions.cc
index 3a0aaa7..be1ac9a 100644
--- a/src/crankshaft/hydrogen-instructions.cc
+++ b/src/crankshaft/hydrogen-instructions.cc
@@ -7,6 +7,7 @@
 #include "src/base/bits.h"
 #include "src/base/ieee754.h"
 #include "src/base/safe_math.h"
+#include "src/codegen.h"
 #include "src/crankshaft/hydrogen-infer-representation.h"
 #include "src/double.h"
 #include "src/elements.h"
@@ -44,6 +45,21 @@
 HYDROGEN_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
 #undef DEFINE_COMPILE
 
+Representation RepresentationFromMachineType(MachineType type) {
+  if (type == MachineType::Int32()) {
+    return Representation::Integer32();
+  }
+
+  if (type == MachineType::TaggedSigned()) {
+    return Representation::Smi();
+  }
+
+  if (type == MachineType::Pointer()) {
+    return Representation::External();
+  }
+
+  return Representation::Tagged();
+}
 
 Isolate* HValue::isolate() const {
   DCHECK(block() != NULL);
@@ -808,9 +824,7 @@
     case HValue::kEnterInlined:
     case HValue::kEnvironmentMarker:
     case HValue::kForceRepresentation:
-    case HValue::kGetCachedArrayIndex:
     case HValue::kGoto:
-    case HValue::kHasCachedArrayIndexAndBranch:
     case HValue::kHasInstanceTypeAndBranch:
     case HValue::kInnerAllocatedObject:
     case HValue::kIsSmiAndBranch:
@@ -818,9 +832,7 @@
     case HValue::kIsUndetectableAndBranch:
     case HValue::kLeaveInlined:
     case HValue::kLoadFieldByIndex:
-    case HValue::kLoadGlobalGeneric:
     case HValue::kLoadNamedField:
-    case HValue::kLoadNamedGeneric:
     case HValue::kLoadRoot:
     case HValue::kMathMinMax:
     case HValue::kParameter:
@@ -864,7 +876,6 @@
     case HValue::kLoadContextSlot:
     case HValue::kLoadFunctionPrototype:
     case HValue::kLoadKeyed:
-    case HValue::kLoadKeyedGeneric:
     case HValue::kMathFloorOfDiv:
     case HValue::kMaybeGrowElements:
     case HValue::kMod:
@@ -1061,23 +1072,21 @@
 
 
 Representation HBranch::observed_input_representation(int index) {
-  if (expected_input_types_.Contains(ToBooleanICStub::NULL_TYPE) ||
-      expected_input_types_.Contains(ToBooleanICStub::SPEC_OBJECT) ||
-      expected_input_types_.Contains(ToBooleanICStub::STRING) ||
-      expected_input_types_.Contains(ToBooleanICStub::SYMBOL) ||
-      expected_input_types_.Contains(ToBooleanICStub::SIMD_VALUE)) {
+  if (expected_input_types_ & (ToBooleanHint::kNull | ToBooleanHint::kReceiver |
+                               ToBooleanHint::kString | ToBooleanHint::kSymbol |
+                               ToBooleanHint::kSimdValue)) {
     return Representation::Tagged();
   }
-  if (expected_input_types_.Contains(ToBooleanICStub::UNDEFINED)) {
-    if (expected_input_types_.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+  if (expected_input_types_ & ToBooleanHint::kUndefined) {
+    if (expected_input_types_ & ToBooleanHint::kHeapNumber) {
       return Representation::Double();
     }
     return Representation::Tagged();
   }
-  if (expected_input_types_.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+  if (expected_input_types_ & ToBooleanHint::kHeapNumber) {
     return Representation::Double();
   }
-  if (expected_input_types_.Contains(ToBooleanICStub::SMI)) {
+  if (expected_input_types_ & ToBooleanHint::kSmallInteger) {
     return Representation::Smi();
   }
   return Representation::None();
@@ -1483,8 +1492,8 @@
 
   if (CanTruncateToSmi()) os << " truncating-smi";
   if (CanTruncateToInt32()) os << " truncating-int32";
+  if (CanTruncateToNumber()) os << " truncating-number";
   if (CheckFlag(kBailoutOnMinusZero)) os << " -0?";
-  if (CheckFlag(kAllowUndefinedAsNaN)) os << " allow-undefined-as-nan";
   return os;
 }
 
@@ -1495,8 +1504,8 @@
     if (val->IsChange()) val = HChange::cast(val)->value();
     if (val->representation().IsSmiOrInteger32()) {
       if (val->representation().Equals(representation())) return val;
-      return Prepend(new(block()->zone()) HChange(
-          val, representation(), false, false));
+      return Prepend(new (block()->zone())
+                         HChange(val, representation(), false, false, true));
     }
   }
   if (op() == kMathFloor && representation().IsSmiOrInteger32() &&
@@ -1511,8 +1520,8 @@
       // A change from an integer32 can be replaced by the integer32 value.
       left = HChange::cast(left)->value();
     } else if (hdiv->observed_input_representation(1).IsSmiOrInteger32()) {
-      left = Prepend(new(block()->zone()) HChange(
-          left, Representation::Integer32(), false, false));
+      left = Prepend(new (block()->zone()) HChange(
+          left, Representation::Integer32(), false, false, true));
     } else {
       return this;
     }
@@ -1530,8 +1539,8 @@
       // A change from an integer32 can be replaced by the integer32 value.
       right = HChange::cast(right)->value();
     } else if (hdiv->observed_input_representation(2).IsSmiOrInteger32()) {
-      right = Prepend(new(block()->zone()) HChange(
-          right, Representation::Integer32(), false, false));
+      right = Prepend(new (block()->zone()) HChange(
+          right, Representation::Integer32(), false, false, true));
     } else {
       return this;
     }
@@ -2871,7 +2880,7 @@
     // comparisons must cause a deopt when one of their arguments is undefined.
     // See also v8:1434
     if (Token::IsOrderedRelationalCompareOp(token_)) {
-      SetFlag(kAllowUndefinedAsNaN);
+      SetFlag(kTruncatingToNumber);
     }
   }
   ChangeRepresentation(rep);
@@ -2899,13 +2908,6 @@
 }
 
 
-std::ostream& HLoadNamedGeneric::PrintDataTo(
-    std::ostream& os) const {  // NOLINT
-  Handle<String> n = Handle<String>::cast(name());
-  return os << NameOf(object()) << "." << n->ToCString().get();
-}
-
-
 std::ostream& HLoadKeyed::PrintDataTo(std::ostream& os) const {  // NOLINT
   if (!is_fixed_typed_array()) {
     os << NameOf(elements());
@@ -2977,7 +2979,7 @@
 
 bool HLoadKeyed::AllUsesCanTreatHoleAsNaN() const {
   return IsFastDoubleElementsKind(elements_kind()) &&
-      CheckUsesForFlag(HValue::kAllowUndefinedAsNaN);
+         CheckUsesForFlag(HValue::kTruncatingToNumber);
 }
 
 
@@ -2997,46 +2999,40 @@
   return !UsesMustHandleHole();
 }
 
+HValue* HCallWithDescriptor::Canonicalize() {
+  if (kind() != Code::KEYED_LOAD_IC) return this;
 
-std::ostream& HLoadKeyedGeneric::PrintDataTo(
-    std::ostream& os) const {  // NOLINT
-  return os << NameOf(object()) << "[" << NameOf(key()) << "]";
-}
-
-
-HValue* HLoadKeyedGeneric::Canonicalize() {
   // Recognize generic keyed loads that use property name generated
   // by for-in statement as a key and rewrite them into fast property load
   // by index.
-  if (key()->IsLoadKeyed()) {
-    HLoadKeyed* key_load = HLoadKeyed::cast(key());
+  typedef LoadWithVectorDescriptor Descriptor;
+  HValue* key = parameter(Descriptor::kName);
+  if (key->IsLoadKeyed()) {
+    HLoadKeyed* key_load = HLoadKeyed::cast(key);
     if (key_load->elements()->IsForInCacheArray()) {
       HForInCacheArray* names_cache =
           HForInCacheArray::cast(key_load->elements());
 
-      if (names_cache->enumerable() == object()) {
+      HValue* object = parameter(Descriptor::kReceiver);
+      if (names_cache->enumerable() == object) {
         HForInCacheArray* index_cache =
             names_cache->index_cache();
         HCheckMapValue* map_check = HCheckMapValue::New(
             block()->graph()->isolate(), block()->graph()->zone(),
-            block()->graph()->GetInvalidContext(), object(),
-            names_cache->map());
+            block()->graph()->GetInvalidContext(), object, names_cache->map());
         HInstruction* index = HLoadKeyed::New(
             block()->graph()->isolate(), block()->graph()->zone(),
             block()->graph()->GetInvalidContext(), index_cache, key_load->key(),
             key_load->key(), nullptr, key_load->elements_kind());
         map_check->InsertBefore(this);
         index->InsertBefore(this);
-        return Prepend(new(block()->zone()) HLoadFieldByIndex(
-            object(), index));
+        return Prepend(new (block()->zone()) HLoadFieldByIndex(object, index));
       }
     }
   }
-
   return this;
 }
 
-
 std::ostream& HStoreNamedField::PrintDataTo(std::ostream& os) const {  // NOLINT
   os << NameOf(object()) << access_ << " = " << NameOf(value());
   if (NeedsWriteBarrier()) os << " (write-barrier)";
@@ -3074,12 +3070,6 @@
 }
 
 
-std::ostream& HLoadGlobalGeneric::PrintDataTo(
-    std::ostream& os) const {  // NOLINT
-  return os << name()->ToCString().get() << " ";
-}
-
-
 std::ostream& HInnerAllocatedObject::PrintDataTo(
     std::ostream& os) const {  // NOLINT
   os << NameOf(base_object()) << " offset ";
@@ -3596,16 +3586,21 @@
     HConstant* c_left = HConstant::cast(left);
     HConstant* c_right = HConstant::cast(right);
     if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
-      if (c_right->DoubleValue() != 0) {
+      if (std::isnan(c_left->DoubleValue()) ||
+          std::isnan(c_right->DoubleValue())) {
+        return H_CONSTANT_DOUBLE(std::numeric_limits<double>::quiet_NaN());
+      } else if (c_right->DoubleValue() != 0) {
         double double_res = c_left->DoubleValue() / c_right->DoubleValue();
         if (IsInt32Double(double_res)) {
           return H_CONSTANT_INT(double_res);
         }
         return H_CONSTANT_DOUBLE(double_res);
-      } else {
+      } else if (c_left->DoubleValue() != 0) {
         int sign = Double(c_left->DoubleValue()).Sign() *
                    Double(c_right->DoubleValue()).Sign();  // Right could be -0.
         return H_CONSTANT_DOUBLE(sign * V8_INFINITY);
+      } else {
+        return H_CONSTANT_DOUBLE(std::numeric_limits<double>::quiet_NaN());
       }
     }
   }
diff --git a/src/crankshaft/hydrogen-instructions.h b/src/crankshaft/hydrogen-instructions.h
index cfede98..9b9e674 100644
--- a/src/crankshaft/hydrogen-instructions.h
+++ b/src/crankshaft/hydrogen-instructions.h
@@ -12,12 +12,12 @@
 #include "src/ast/ast.h"
 #include "src/base/bits.h"
 #include "src/bit-vector.h"
-#include "src/code-stubs.h"
 #include "src/conversions.h"
 #include "src/crankshaft/hydrogen-types.h"
 #include "src/crankshaft/unique.h"
 #include "src/deoptimizer.h"
 #include "src/globals.h"
+#include "src/interface-descriptors.h"
 #include "src/small-pointer-list.h"
 #include "src/utils.h"
 #include "src/zone/zone.h"
@@ -91,9 +91,7 @@
   V(ForceRepresentation)                      \
   V(ForInCacheArray)                          \
   V(ForInPrepareMap)                          \
-  V(GetCachedArrayIndex)                      \
   V(Goto)                                     \
-  V(HasCachedArrayIndexAndBranch)             \
   V(HasInstanceTypeAndBranch)                 \
   V(InnerAllocatedObject)                     \
   V(InvokeFunction)                           \
@@ -105,11 +103,8 @@
   V(LoadContextSlot)                          \
   V(LoadFieldByIndex)                         \
   V(LoadFunctionPrototype)                    \
-  V(LoadGlobalGeneric)                        \
   V(LoadKeyed)                                \
-  V(LoadKeyedGeneric)                         \
   V(LoadNamedField)                           \
-  V(LoadNamedGeneric)                         \
   V(LoadRoot)                                 \
   V(MathFloorOfDiv)                           \
   V(MathMinMax)                               \
@@ -191,6 +186,7 @@
 
 enum PropertyAccessType { LOAD, STORE };
 
+Representation RepresentationFromMachineType(MachineType type);
 
 class Range final : public ZoneObject {
  public:
@@ -416,7 +412,7 @@
     kLeftCanBeMinInt,
     kLeftCanBeNegative,
     kLeftCanBePositive,
-    kAllowUndefinedAsNaN,
+    kTruncatingToNumber,
     kIsArguments,
     kTruncatingToInt32,
     kAllUsesTruncatingToInt32,
@@ -490,9 +486,6 @@
   virtual ~HValue() {}
 
   virtual SourcePosition position() const { return SourcePosition::Unknown(); }
-  virtual SourcePosition operand_position(int index) const {
-    return position();
-  }
 
   HBasicBlock* block() const { return block_; }
   void SetBlock(HBasicBlock* block);
@@ -952,99 +945,6 @@
     return new (zone) I(context, p1, p2, p3, p4, p5, p6);                      \
   }
 
-
-// A helper class to represent per-operand position information attached to
-// the HInstruction in the compact form. Uses tagging to distinguish between
-// case when only instruction's position is available and case when operands'
-// positions are also available.
-// In the first case it contains intruction's position as a tagged value.
-// In the second case it points to an array which contains instruction's
-// position and operands' positions.
-class HPositionInfo {
- public:
-  explicit HPositionInfo(int pos) : data_(TagPosition(pos)) { }
-
-  SourcePosition position() const {
-    if (has_operand_positions()) {
-      return operand_positions()[kInstructionPosIndex];
-    }
-    return SourcePosition::FromRaw(static_cast<int>(UntagPosition(data_)));
-  }
-
-  void set_position(SourcePosition pos) {
-    if (has_operand_positions()) {
-      operand_positions()[kInstructionPosIndex] = pos;
-    } else {
-      data_ = TagPosition(pos.raw());
-    }
-  }
-
-  void ensure_storage_for_operand_positions(Zone* zone, int operand_count) {
-    if (has_operand_positions()) {
-      return;
-    }
-
-    const int length = kFirstOperandPosIndex + operand_count;
-    SourcePosition* positions = zone->NewArray<SourcePosition>(length);
-    for (int i = 0; i < length; i++) {
-      positions[i] = SourcePosition::Unknown();
-    }
-
-    const SourcePosition pos = position();
-    data_ = reinterpret_cast<intptr_t>(positions);
-    set_position(pos);
-
-    DCHECK(has_operand_positions());
-  }
-
-  SourcePosition operand_position(int idx) const {
-    if (!has_operand_positions()) {
-      return position();
-    }
-    return *operand_position_slot(idx);
-  }
-
-  void set_operand_position(int idx, SourcePosition pos) {
-    *operand_position_slot(idx) = pos;
-  }
-
- private:
-  static const intptr_t kInstructionPosIndex = 0;
-  static const intptr_t kFirstOperandPosIndex = 1;
-
-  SourcePosition* operand_position_slot(int idx) const {
-    DCHECK(has_operand_positions());
-    return &(operand_positions()[kFirstOperandPosIndex + idx]);
-  }
-
-  bool has_operand_positions() const {
-    return !IsTaggedPosition(data_);
-  }
-
-  SourcePosition* operand_positions() const {
-    DCHECK(has_operand_positions());
-    return reinterpret_cast<SourcePosition*>(data_);
-  }
-
-  static const intptr_t kPositionTag = 1;
-  static const intptr_t kPositionShift = 1;
-  static bool IsTaggedPosition(intptr_t val) {
-    return (val & kPositionTag) != 0;
-  }
-  static intptr_t UntagPosition(intptr_t val) {
-    DCHECK(IsTaggedPosition(val));
-    return val >> kPositionShift;
-  }
-  static intptr_t TagPosition(intptr_t val) {
-    const intptr_t result = (val << kPositionShift) | kPositionTag;
-    DCHECK(UntagPosition(result) == val);
-    return result;
-  }
-
-  intptr_t data_;
-};
-
-
 class HInstruction : public HValue {
  public:
   HInstruction* next() const { return next_; }
@@ -1071,31 +971,17 @@
   }
 
   // The position is a write-once variable.
-  SourcePosition position() const override {
-    return SourcePosition(position_.position());
-  }
-  bool has_position() const {
-    return !position().IsUnknown();
-  }
+  SourcePosition position() const override { return position_; }
+  bool has_position() const { return position_.IsKnown(); }
   void set_position(SourcePosition position) {
-    DCHECK(!has_position());
-    DCHECK(!position.IsUnknown());
-    position_.set_position(position);
-  }
-
-  SourcePosition operand_position(int index) const override {
-    const SourcePosition pos = position_.operand_position(index);
-    return pos.IsUnknown() ? position() : pos;
-  }
-  void set_operand_position(Zone* zone, int index, SourcePosition pos) {
-    DCHECK(0 <= index && index < OperandCount());
-    position_.ensure_storage_for_operand_positions(zone, OperandCount());
-    position_.set_operand_position(index, pos);
+    DCHECK(position.IsKnown());
+    position_ = position;
   }
 
   bool Dominates(HInstruction* other);
   bool CanTruncateToSmi() const { return CheckFlag(kTruncatingToSmi); }
   bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
+  bool CanTruncateToNumber() const { return CheckFlag(kTruncatingToNumber); }
 
   virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
 
@@ -1114,7 +1000,7 @@
       : HValue(type),
         next_(NULL),
         previous_(NULL),
-        position_(kNoSourcePosition) {
+        position_(SourcePosition::Unknown()) {
     SetDependsOnFlag(kOsrEntries);
   }
 
@@ -1128,7 +1014,7 @@
 
   HInstruction* next_;
   HInstruction* previous_;
-  HPositionInfo position_;
+  SourcePosition position_;
 
   friend class HBasicBlock;
 };
@@ -1353,9 +1239,9 @@
 class HBranch final : public HUnaryControlInstruction {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HBranch, HValue*);
-  DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*, ToBooleanICStub::Types);
-  DECLARE_INSTRUCTION_FACTORY_P4(HBranch, HValue*, ToBooleanICStub::Types,
-                                 HBasicBlock*, HBasicBlock*);
+  DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*, ToBooleanHints);
+  DECLARE_INSTRUCTION_FACTORY_P4(HBranch, HValue*, ToBooleanHints, HBasicBlock*,
+                                 HBasicBlock*);
 
   Representation RequiredInputRepresentation(int index) override {
     return Representation::None();
@@ -1366,22 +1252,18 @@
 
   std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
 
-  ToBooleanICStub::Types expected_input_types() const {
-    return expected_input_types_;
-  }
+  ToBooleanHints expected_input_types() const { return expected_input_types_; }
 
   DECLARE_CONCRETE_INSTRUCTION(Branch)
 
  private:
-  HBranch(HValue* value, ToBooleanICStub::Types expected_input_types =
-                             ToBooleanICStub::Types(),
+  HBranch(HValue* value,
+          ToBooleanHints expected_input_types = ToBooleanHint::kNone,
           HBasicBlock* true_target = NULL, HBasicBlock* false_target = NULL)
       : HUnaryControlInstruction(value, true_target, false_target),
-        expected_input_types_(expected_input_types) {
-    SetFlag(kAllowUndefinedAsNaN);
-  }
+        expected_input_types_(expected_input_types) {}
 
-  ToBooleanICStub::Types expected_input_types_;
+  ToBooleanHints expected_input_types_;
 };
 
 
@@ -1575,13 +1457,10 @@
   }
 };
 
-
 class HChange final : public HUnaryOperation {
  public:
-  HChange(HValue* value,
-          Representation to,
-          bool is_truncating_to_smi,
-          bool is_truncating_to_int32)
+  HChange(HValue* value, Representation to, bool is_truncating_to_smi,
+          bool is_truncating_to_int32, bool is_truncating_to_number)
       : HUnaryOperation(value) {
     DCHECK(!value->representation().IsNone());
     DCHECK(!to.IsNone());
@@ -1592,8 +1471,13 @@
     if (is_truncating_to_smi && to.IsSmi()) {
       SetFlag(kTruncatingToSmi);
       SetFlag(kTruncatingToInt32);
+      SetFlag(kTruncatingToNumber);
+    } else if (is_truncating_to_int32) {
+      SetFlag(kTruncatingToInt32);
+      SetFlag(kTruncatingToNumber);
+    } else if (is_truncating_to_number) {
+      SetFlag(kTruncatingToNumber);
     }
-    if (is_truncating_to_int32) SetFlag(kTruncatingToInt32);
     if (value->representation().IsSmi() || value->type().IsSmi()) {
       set_type(HType::Smi());
     } else {
@@ -1602,10 +1486,6 @@
     }
   }
 
-  bool can_convert_undefined_to_nan() {
-    return CheckUsesForFlag(kAllowUndefinedAsNaN);
-  }
-
   HType CalculateInferredType() override;
   HValue* Canonicalize() override;
 
@@ -1651,7 +1531,7 @@
   explicit HClampToUint8(HValue* value)
       : HUnaryOperation(value) {
     set_representation(Representation::Integer32());
-    SetFlag(kAllowUndefinedAsNaN);
+    SetFlag(kTruncatingToNumber);
     SetFlag(kUseGVN);
   }
 
@@ -1929,7 +1809,7 @@
         function_(function),
         inlining_kind_(inlining_kind),
         syntactic_tail_call_mode_(syntactic_tail_call_mode),
-        inlining_id_(0),
+        inlining_id_(-1),
         arguments_var_(arguments_var),
         arguments_object_(arguments_object),
         return_targets_(2, zone) {}
@@ -2160,9 +2040,21 @@
       const Vector<HValue*>& operands,
       TailCallMode syntactic_tail_call_mode = TailCallMode::kDisallow,
       TailCallMode tail_call_mode = TailCallMode::kDisallow) {
-    HCallWithDescriptor* res = new (zone)
-        HCallWithDescriptor(target, argument_count, descriptor, operands,
-                            syntactic_tail_call_mode, tail_call_mode, zone);
+    HCallWithDescriptor* res = new (zone) HCallWithDescriptor(
+        Code::STUB, context, target, argument_count, descriptor, operands,
+        syntactic_tail_call_mode, tail_call_mode, zone);
+    return res;
+  }
+
+  static HCallWithDescriptor* New(
+      Isolate* isolate, Zone* zone, HValue* context, Code::Kind kind,
+      HValue* target, int argument_count, CallInterfaceDescriptor descriptor,
+      const Vector<HValue*>& operands,
+      TailCallMode syntactic_tail_call_mode = TailCallMode::kDisallow,
+      TailCallMode tail_call_mode = TailCallMode::kDisallow) {
+    HCallWithDescriptor* res = new (zone) HCallWithDescriptor(
+        kind, context, target, argument_count, descriptor, operands,
+        syntactic_tail_call_mode, tail_call_mode, zone);
     return res;
   }
 
@@ -2194,6 +2086,8 @@
   }
   bool IsTailCall() const { return tail_call_mode() == TailCallMode::kAllow; }
 
+  Code::Kind kind() const { return KindField::decode(bit_field_); }
+
   virtual int argument_count() const {
     return argument_count_;
   }
@@ -2202,29 +2096,36 @@
 
   CallInterfaceDescriptor descriptor() const { return descriptor_; }
 
-  HValue* target() {
-    return OperandAt(0);
+  HValue* target() { return OperandAt(0); }
+  HValue* context() { return OperandAt(1); }
+  HValue* parameter(int index) {
+    DCHECK_LT(index, GetParameterCount());
+    return OperandAt(index + 2);
   }
 
+  HValue* Canonicalize() override;
+
   std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
 
  private:
   // The argument count includes the receiver.
-  HCallWithDescriptor(HValue* target, int argument_count,
-                      CallInterfaceDescriptor descriptor,
+  HCallWithDescriptor(Code::Kind kind, HValue* context, HValue* target,
+                      int argument_count, CallInterfaceDescriptor descriptor,
                       const Vector<HValue*>& operands,
                       TailCallMode syntactic_tail_call_mode,
                       TailCallMode tail_call_mode, Zone* zone)
       : descriptor_(descriptor),
-        values_(GetParameterCount() + 1, zone),  // +1 here is for target.
+        values_(GetParameterCount() + 2, zone),  // +2 for context and target.
         argument_count_(argument_count),
         bit_field_(
             TailCallModeField::encode(tail_call_mode) |
-            SyntacticTailCallModeField::encode(syntactic_tail_call_mode)) {
+            SyntacticTailCallModeField::encode(syntactic_tail_call_mode) |
+            KindField::encode(kind)) {
     DCHECK_EQ(operands.length(), GetParameterCount());
     // We can only tail call without any stack arguments.
     DCHECK(tail_call_mode != TailCallMode::kAllow || argument_count == 0);
     AddOperand(target, zone);
+    AddOperand(context, zone);
     for (int i = 0; i < operands.length(); i++) {
       AddOperand(operands[i], zone);
     }
@@ -2237,9 +2138,7 @@
     SetOperandAt(values_.length() - 1, v);
   }
 
-  int GetParameterCount() const {
-    return descriptor_.GetParameterCount() + 1;  // +1 here is for context.
-  }
+  int GetParameterCount() const { return descriptor_.GetParameterCount(); }
 
   void InternalSetOperandAt(int index, HValue* value) final {
     values_[index] = value;
@@ -2251,6 +2150,8 @@
   class TailCallModeField : public BitField<TailCallMode, 0, 1> {};
   class SyntacticTailCallModeField
       : public BitField<TailCallMode, TailCallModeField::kNext, 1> {};
+  class KindField
+      : public BitField<Code::Kind, SyntacticTailCallModeField::kNext, 5> {};
   uint32_t bit_field_;
 };
 
@@ -2484,7 +2385,7 @@
         UNREACHABLE();
     }
     SetFlag(kUseGVN);
-    SetFlag(kAllowUndefinedAsNaN);
+    SetFlag(kTruncatingToNumber);
   }
 
   bool IsDeletable() const override {
@@ -2898,7 +2799,6 @@
       : inputs_(2, zone), merged_index_(merged_index) {
     DCHECK(merged_index >= 0 || merged_index == kInvalidMergedIndex);
     SetFlag(kFlexibleRepresentation);
-    SetFlag(kAllowUndefinedAsNaN);
   }
 
   Representation RepresentationFromInputs() override;
@@ -3463,12 +3363,6 @@
     return representation();
   }
 
-  void SetOperandPositions(Zone* zone, SourcePosition left_pos,
-                           SourcePosition right_pos) {
-    set_operand_position(zone, 1, left_pos);
-    set_operand_position(zone, 2, right_pos);
-  }
-
   bool RightIsPowerOf2() {
     if (!right()->IsInteger32Constant()) return false;
     int32_t value = right()->GetInteger32Constant();
@@ -3714,7 +3608,7 @@
       : HBinaryOperation(context, left, right, type) {
     SetFlag(kFlexibleRepresentation);
     SetFlag(kTruncatingToInt32);
-    SetFlag(kAllowUndefinedAsNaN);
+    SetFlag(kTruncatingToNumber);
     SetAllSideEffects();
   }
 
@@ -3777,7 +3671,7 @@
     SetFlag(kLeftCanBeMinInt);
     SetFlag(kLeftCanBeNegative);
     SetFlag(kLeftCanBePositive);
-    SetFlag(kAllowUndefinedAsNaN);
+    SetFlag(kTruncatingToNumber);
   }
 
   Range* InferRange(Zone* zone) override;
@@ -3788,11 +3682,12 @@
 
 class HArithmeticBinaryOperation : public HBinaryOperation {
  public:
-  HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right)
-      : HBinaryOperation(context, left, right, HType::TaggedNumber()) {
+  HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right,
+                             HType type = HType::TaggedNumber())
+      : HBinaryOperation(context, left, right, type) {
     SetAllSideEffects();
     SetFlag(kFlexibleRepresentation);
-    SetFlag(kAllowUndefinedAsNaN);
+    SetFlag(kTruncatingToNumber);
   }
 
   void RepresentationChanged(Representation to) override {
@@ -3880,12 +3775,6 @@
 
   std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
 
-  void SetOperandPositions(Zone* zone, SourcePosition left_pos,
-                           SourcePosition right_pos) {
-    set_operand_position(zone, 0, left_pos);
-    set_operand_position(zone, 1, right_pos);
-  }
-
   DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch)
 
  private:
@@ -3925,7 +3814,6 @@
                         HBasicBlock* false_target = NULL)
       : HUnaryControlInstruction(value, true_target, false_target) {
     SetFlag(kFlexibleRepresentation);
-    SetFlag(kAllowUndefinedAsNaN);
   }
 };
 
@@ -4128,45 +4016,6 @@
   InstanceType to_;  // Inclusive range, not all combinations work.
 };
 
-
-class HHasCachedArrayIndexAndBranch final : public HUnaryControlInstruction {
- public:
-  DECLARE_INSTRUCTION_FACTORY_P1(HHasCachedArrayIndexAndBranch, HValue*);
-
-  Representation RequiredInputRepresentation(int index) override {
-    return Representation::Tagged();
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch)
- private:
-  explicit HHasCachedArrayIndexAndBranch(HValue* value)
-      : HUnaryControlInstruction(value, NULL, NULL) { }
-};
-
-
-class HGetCachedArrayIndex final : public HUnaryOperation {
- public:
-  DECLARE_INSTRUCTION_FACTORY_P1(HGetCachedArrayIndex, HValue*);
-
-  Representation RequiredInputRepresentation(int index) override {
-    return Representation::Tagged();
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex)
-
- protected:
-  bool DataEquals(HValue* other) override { return true; }
-
- private:
-  explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) {
-    set_representation(Representation::Tagged());
-    SetFlag(kUseGVN);
-  }
-
-  bool IsDeletable() const override { return true; }
-};
-
-
 class HClassOfTestAndBranch final : public HUnaryControlInstruction {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HClassOfTestAndBranch, HValue*,
@@ -4321,12 +4170,12 @@
     }
     if (to.IsTagged()) {
       SetChangesFlag(kNewSpacePromotion);
-      ClearFlag(kAllowUndefinedAsNaN);
+      ClearFlag(kTruncatingToNumber);
     }
     if (!right()->type().IsTaggedNumber() &&
         !right()->representation().IsDouble() &&
         !right()->representation().IsSmiOrInteger32()) {
-      ClearFlag(kAllowUndefinedAsNaN);
+      ClearFlag(kTruncatingToNumber);
     }
   }
 
@@ -4354,7 +4203,7 @@
  private:
   HAdd(HValue* context, HValue* left, HValue* right,
        ExternalAddType external_add_type = NoExternalAdd)
-      : HArithmeticBinaryOperation(context, left, right),
+      : HArithmeticBinaryOperation(context, left, right, HType::Tagged()),
         external_add_type_(external_add_type) {
     SetFlag(kCanOverflow);
     switch (external_add_type_) {
@@ -4826,48 +4675,6 @@
   HPhi* incoming_value_;
 };
 
-class HLoadGlobalGeneric final : public HTemplateInstruction<1> {
- public:
-  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadGlobalGeneric,
-                                              Handle<String>, TypeofMode,
-                                              Handle<TypeFeedbackVector>,
-                                              FeedbackVectorSlot);
-
-  HValue* context() { return OperandAt(0); }
-  Handle<String> name() const { return name_; }
-  TypeofMode typeof_mode() const { return typeof_mode_; }
-  FeedbackVectorSlot slot() const { return slot_; }
-  Handle<TypeFeedbackVector> feedback_vector() const {
-    return feedback_vector_;
-  }
-
-  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
-
-  Representation RequiredInputRepresentation(int index) override {
-    return Representation::Tagged();
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric)
-
- private:
-  HLoadGlobalGeneric(HValue* context, Handle<String> name,
-                     TypeofMode typeof_mode, Handle<TypeFeedbackVector> vector,
-                     FeedbackVectorSlot slot)
-      : name_(name),
-        typeof_mode_(typeof_mode),
-        feedback_vector_(vector),
-        slot_(slot) {
-    SetOperandAt(0, context);
-    set_representation(Representation::Tagged());
-    SetAllSideEffects();
-  }
-
-  Handle<String> name_;
-  TypeofMode typeof_mode_;
-  Handle<TypeFeedbackVector> feedback_vector_;
-  FeedbackVectorSlot slot_;
-};
-
 class HAllocate final : public HTemplateInstruction<3> {
  public:
   static bool CompatibleInstanceTypes(InstanceType type1,
@@ -5408,11 +5215,6 @@
                          SharedFunctionInfo::kOptimizedCodeMapOffset);
   }
 
-  static HObjectAccess ForOptimizedCodeMapSharedCode() {
-    return HObjectAccess(kInobject, FixedArray::OffsetOfElementAt(
-                                        SharedFunctionInfo::kSharedCodeIndex));
-  }
-
   static HObjectAccess ForFunctionContextPointer() {
     return HObjectAccess(kInobject, JSFunction::kContextOffset);
   }
@@ -5852,46 +5654,6 @@
 };
 
 
-class HLoadNamedGeneric final : public HTemplateInstruction<2> {
- public:
-  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadNamedGeneric, HValue*,
-                                              Handle<Name>,
-                                              Handle<TypeFeedbackVector>,
-                                              FeedbackVectorSlot);
-
-  HValue* context() const { return OperandAt(0); }
-  HValue* object() const { return OperandAt(1); }
-  Handle<Name> name() const { return name_; }
-
-  FeedbackVectorSlot slot() const { return slot_; }
-  Handle<TypeFeedbackVector> feedback_vector() const {
-    return feedback_vector_;
-  }
-
-  Representation RequiredInputRepresentation(int index) override {
-    return Representation::Tagged();
-  }
-
-  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
-
- private:
-  HLoadNamedGeneric(HValue* context, HValue* object, Handle<Name> name,
-                    Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
-      : name_(name), feedback_vector_(vector), slot_(slot) {
-    SetOperandAt(0, context);
-    SetOperandAt(1, object);
-    set_representation(Representation::Tagged());
-    SetAllSideEffects();
-  }
-
-  Handle<Name> name_;
-  Handle<TypeFeedbackVector> feedback_vector_;
-  FeedbackVectorSlot slot_;
-};
-
-
 class HLoadFunctionPrototype final : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HLoadFunctionPrototype, HValue*);
@@ -6128,47 +5890,6 @@
 };
 
 
-class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
- public:
-  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadKeyedGeneric, HValue*,
-                                              HValue*,
-                                              Handle<TypeFeedbackVector>,
-                                              FeedbackVectorSlot);
-  HValue* object() const { return OperandAt(0); }
-  HValue* key() const { return OperandAt(1); }
-  HValue* context() const { return OperandAt(2); }
-  FeedbackVectorSlot slot() const { return slot_; }
-  Handle<TypeFeedbackVector> feedback_vector() const {
-    return feedback_vector_;
-  }
-
-  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
-
-  Representation RequiredInputRepresentation(int index) override {
-    // tagged[tagged]
-    return Representation::Tagged();
-  }
-
-  HValue* Canonicalize() override;
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
-
- private:
-  HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key,
-                    Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
-      : feedback_vector_(vector), slot_(slot) {
-    set_representation(Representation::Tagged());
-    SetOperandAt(0, obj);
-    SetOperandAt(1, key);
-    SetOperandAt(2, context);
-    SetAllSideEffects();
-  }
-
-  Handle<TypeFeedbackVector> feedback_vector_;
-  FeedbackVectorSlot slot_;
-};
-
-
 // Indicates whether the store is a store to an entry that was previously
 // initialized or not.
 enum StoreFieldOrKeyedMode {
@@ -6488,7 +6209,7 @@
     } else if (is_fixed_typed_array()) {
       SetChangesFlag(kTypedArrayElements);
       SetChangesFlag(kExternalMemory);
-      SetFlag(kAllowUndefinedAsNaN);
+      SetFlag(kTruncatingToNumber);
     } else {
       SetChangesFlag(kArrayElements);
     }
diff --git a/src/crankshaft/hydrogen-mark-deoptimize.cc b/src/crankshaft/hydrogen-mark-deoptimize.cc
deleted file mode 100644
index a706d91..0000000
--- a/src/crankshaft/hydrogen-mark-deoptimize.cc
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-mark-deoptimize.h"
-
-namespace v8 {
-namespace internal {
-
-void HMarkDeoptimizeOnUndefinedPhase::Run() {
-  const ZoneList<HPhi*>* phi_list = graph()->phi_list();
-  for (int i = 0; i < phi_list->length(); i++) {
-    HPhi* phi = phi_list->at(i);
-    if (phi->CheckFlag(HValue::kAllowUndefinedAsNaN) &&
-        !phi->CheckUsesForFlag(HValue::kAllowUndefinedAsNaN)) {
-      ProcessPhi(phi);
-    }
-  }
-}
-
-
-void HMarkDeoptimizeOnUndefinedPhase::ProcessPhi(HPhi* phi) {
-  DCHECK(phi->CheckFlag(HValue::kAllowUndefinedAsNaN));
-  DCHECK(worklist_.is_empty());
-
-  // Push the phi onto the worklist
-  phi->ClearFlag(HValue::kAllowUndefinedAsNaN);
-  worklist_.Add(phi, zone());
-
-  // Process all phis that can reach this phi
-  while (!worklist_.is_empty()) {
-    phi = worklist_.RemoveLast();
-    for (int i = phi->OperandCount() - 1; i >= 0; --i) {
-      HValue* input = phi->OperandAt(i);
-      if (input->IsPhi() && input->CheckFlag(HValue::kAllowUndefinedAsNaN)) {
-        input->ClearFlag(HValue::kAllowUndefinedAsNaN);
-        worklist_.Add(HPhi::cast(input), zone());
-      }
-    }
-  }
-}
-
-
-void HComputeChangeUndefinedToNaN::Run() {
-  const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
-  for (int i = 0; i < blocks->length(); ++i) {
-    const HBasicBlock* block(blocks->at(i));
-    for (HInstruction* current = block->first(); current != NULL; ) {
-      HInstruction* next = current->next();
-      if (current->IsChange()) {
-        if (HChange::cast(current)->can_convert_undefined_to_nan()) {
-          current->SetFlag(HValue::kAllowUndefinedAsNaN);
-        }
-      }
-      current = next;
-    }
-  }
-}
-
-
-}  // namespace internal
-}  // namespace v8
diff --git a/src/crankshaft/hydrogen-mark-deoptimize.h b/src/crankshaft/hydrogen-mark-deoptimize.h
deleted file mode 100644
index 45d40ac..0000000
--- a/src/crankshaft/hydrogen-mark-deoptimize.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_MARK_DEOPTIMIZE_H_
-#define V8_CRANKSHAFT_HYDROGEN_MARK_DEOPTIMIZE_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Compute DeoptimizeOnUndefined flag for phis.  Any phi that can reach a use
-// with DeoptimizeOnUndefined set must have DeoptimizeOnUndefined set.
-// Currently only HCompareNumericAndBranch, with double input representation,
-// has this flag set.  The flag is used by HChange tagged->double, which must
-// deoptimize if one of its uses has this flag set.
-class HMarkDeoptimizeOnUndefinedPhase : public HPhase {
- public:
-  explicit HMarkDeoptimizeOnUndefinedPhase(HGraph* graph)
-      : HPhase("H_Mark deoptimize on undefined", graph),
-        worklist_(16, zone()) {}
-
-  void Run();
-
- private:
-  void ProcessPhi(HPhi* phi);
-
-  // Preallocated worklist used as an optimization so we don't have
-  // to allocate a new ZoneList for every ProcessPhi() invocation.
-  ZoneList<HPhi*> worklist_;
-
-  DISALLOW_COPY_AND_ASSIGN(HMarkDeoptimizeOnUndefinedPhase);
-};
-
-
-class HComputeChangeUndefinedToNaN : public HPhase {
- public:
-  explicit HComputeChangeUndefinedToNaN(HGraph* graph)
-      : HPhase("H_Compute change undefined to nan", graph) {}
-
-  void Run();
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(HComputeChangeUndefinedToNaN);
-};
-
-
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_CRANKSHAFT_HYDROGEN_MARK_DEOPTIMIZE_H_
diff --git a/src/crankshaft/hydrogen-osr.cc b/src/crankshaft/hydrogen-osr.cc
index 8de3ac0..607bfbd 100644
--- a/src/crankshaft/hydrogen-osr.cc
+++ b/src/crankshaft/hydrogen-osr.cc
@@ -30,7 +30,7 @@
   HBasicBlock* non_osr_entry = graph->CreateBasicBlock();
   osr_entry_ = graph->CreateBasicBlock();
   HValue* true_value = graph->GetConstantTrue();
-  HBranch* test = builder_->New<HBranch>(true_value, ToBooleanICStub::Types(),
+  HBranch* test = builder_->New<HBranch>(true_value, ToBooleanHint::kNone,
                                          non_osr_entry, osr_entry_);
   builder_->FinishCurrentBlock(test);
 
diff --git a/src/crankshaft/hydrogen-representation-changes.cc b/src/crankshaft/hydrogen-representation-changes.cc
index 32b614c..4d74df4 100644
--- a/src/crankshaft/hydrogen-representation-changes.cc
+++ b/src/crankshaft/hydrogen-representation-changes.cc
@@ -24,6 +24,8 @@
   HInstruction* new_value = NULL;
   bool is_truncating_to_smi = use_value->CheckFlag(HValue::kTruncatingToSmi);
   bool is_truncating_to_int = use_value->CheckFlag(HValue::kTruncatingToInt32);
+  bool is_truncating_to_number =
+      use_value->CheckFlag(HValue::kTruncatingToNumber);
   if (value->IsConstant()) {
     HConstant* constant = HConstant::cast(value);
     // Try to create a new copy of the constant with the new representation.
@@ -36,14 +38,9 @@
   }
 
   if (new_value == NULL) {
-    new_value = new(graph()->zone()) HChange(
-        value, to, is_truncating_to_smi, is_truncating_to_int);
-    if (!use_value->operand_position(use_index).IsUnknown()) {
-      new_value->set_position(use_value->operand_position(use_index));
-    } else {
-      DCHECK(!FLAG_hydrogen_track_positions ||
-             !graph()->info()->IsOptimizing());
-    }
+    new_value = new (graph()->zone())
+        HChange(value, to, is_truncating_to_smi, is_truncating_to_int,
+                is_truncating_to_number);
   }
 
   new_value->InsertBefore(next);
@@ -116,10 +113,15 @@
 
 
 void HRepresentationChangesPhase::Run() {
-  // Compute truncation flag for phis: Initially assume that all
-  // int32-phis allow truncation and iteratively remove the ones that
-  // are used in an operation that does not allow a truncating
-  // conversion.
+  // Compute truncation flag for phis:
+  //
+  // - Initially assume that all phis allow truncation to number and iteratively
+  //   remove the ones that are used in an operation that not do an implicit
+  //   ToNumber conversion.
+  // - Also assume that all Integer32 phis allow ToInt32 truncation and all
+  //   Smi phis allow truncation to Smi.
+  //
+  ZoneList<HPhi*> number_worklist(8, zone());
   ZoneList<HPhi*> int_worklist(8, zone());
   ZoneList<HPhi*> smi_worklist(8, zone());
 
@@ -132,23 +134,34 @@
       phi->SetFlag(HValue::kTruncatingToSmi);
       phi->SetFlag(HValue::kTruncatingToInt32);
     }
+    phi->SetFlag(HValue::kTruncatingToNumber);
   }
 
   for (int i = 0; i < phi_list->length(); i++) {
     HPhi* phi = phi_list->at(i);
     HValue* value = NULL;
-    if (phi->representation().IsSmiOrInteger32() &&
-        !phi->CheckUsesForFlag(HValue::kTruncatingToInt32, &value)) {
+
+    if (phi->CheckFlag(HValue::kTruncatingToNumber) &&
+        !phi->CheckUsesForFlag(HValue::kTruncatingToNumber, &value)) {
+      number_worklist.Add(phi, zone());
+      phi->ClearFlag(HValue::kTruncatingToNumber);
+      phi->ClearFlag(HValue::kTruncatingToInt32);
+      phi->ClearFlag(HValue::kTruncatingToSmi);
+      if (FLAG_trace_representation) {
+        PrintF("#%d Phi is not truncating Number because of #%d %s\n",
+               phi->id(), value->id(), value->Mnemonic());
+      }
+    } else if (phi->representation().IsSmiOrInteger32() &&
+               !phi->CheckUsesForFlag(HValue::kTruncatingToInt32, &value)) {
       int_worklist.Add(phi, zone());
       phi->ClearFlag(HValue::kTruncatingToInt32);
+      phi->ClearFlag(HValue::kTruncatingToSmi);
       if (FLAG_trace_representation) {
         PrintF("#%d Phi is not truncating Int32 because of #%d %s\n",
                phi->id(), value->id(), value->Mnemonic());
       }
-    }
-
-    if (phi->representation().IsSmi() &&
-        !phi->CheckUsesForFlag(HValue::kTruncatingToSmi, &value)) {
+    } else if (phi->representation().IsSmi() &&
+               !phi->CheckUsesForFlag(HValue::kTruncatingToSmi, &value)) {
       smi_worklist.Add(phi, zone());
       phi->ClearFlag(HValue::kTruncatingToSmi);
       if (FLAG_trace_representation) {
@@ -158,6 +171,23 @@
     }
   }
 
+  while (!number_worklist.is_empty()) {
+    HPhi* current = number_worklist.RemoveLast();
+    for (int i = current->OperandCount() - 1; i >= 0; --i) {
+      HValue* input = current->OperandAt(i);
+      if (input->IsPhi() && input->CheckFlag(HValue::kTruncatingToNumber)) {
+        if (FLAG_trace_representation) {
+          PrintF("#%d Phi is not truncating Number because of #%d %s\n",
+                 input->id(), current->id(), current->Mnemonic());
+        }
+        input->ClearFlag(HValue::kTruncatingToNumber);
+        input->ClearFlag(HValue::kTruncatingToInt32);
+        input->ClearFlag(HValue::kTruncatingToSmi);
+        number_worklist.Add(HPhi::cast(input), zone());
+      }
+    }
+  }
+
   while (!int_worklist.is_empty()) {
     HPhi* current = int_worklist.RemoveLast();
     for (int i = 0; i < current->OperandCount(); ++i) {
diff --git a/src/crankshaft/hydrogen.cc b/src/crankshaft/hydrogen.cc
index 8d7b479..754da77 100644
--- a/src/crankshaft/hydrogen.cc
+++ b/src/crankshaft/hydrogen.cc
@@ -23,7 +23,6 @@
 #include "src/crankshaft/hydrogen-infer-representation.h"
 #include "src/crankshaft/hydrogen-infer-types.h"
 #include "src/crankshaft/hydrogen-load-elimination.h"
-#include "src/crankshaft/hydrogen-mark-deoptimize.h"
 #include "src/crankshaft/hydrogen-mark-unreachable.h"
 #include "src/crankshaft/hydrogen-osr.h"
 #include "src/crankshaft/hydrogen-range-analysis.h"
@@ -87,7 +86,7 @@
       SetSourcePosition(node->position());                   \
     }                                                        \
     HOptimizedGraphBuilder::Visit##type(node);               \
-    if (!old_position.IsUnknown()) {                         \
+    if (old_position.IsKnown()) {                            \
       set_source_position(old_position);                     \
     }                                                        \
   }
@@ -102,7 +101,7 @@
       SetSourcePosition(node->position());                   \
     }                                                        \
     HOptimizedGraphBuilder::Visit##type(node);               \
-    if (!old_position.IsUnknown()) {                         \
+    if (old_position.IsKnown()) {                            \
       set_source_position(old_position);                     \
     }                                                        \
   }
@@ -313,7 +312,7 @@
   DCHECK(!instr->IsLinked());
   DCHECK(!IsFinished());
 
-  if (!position.IsUnknown()) {
+  if (position.IsKnown()) {
     instr->set_position(position);
   }
   if (first_ == NULL) {
@@ -321,7 +320,7 @@
     DCHECK(!last_environment()->ast_id().IsNone());
     HBlockEntry* entry = new(zone()) HBlockEntry();
     entry->InitializeAsFirst(this);
-    if (!position.IsUnknown()) {
+    if (position.IsKnown()) {
       entry->set_position(position);
     } else {
       DCHECK(!FLAG_hydrogen_track_positions ||
@@ -1088,8 +1087,7 @@
     // so that the graph builder visits it and sees any live range extending
     // constructs within it.
     HConstant* constant_false = builder()->graph()->GetConstantFalse();
-    ToBooleanICStub::Types boolean_type = ToBooleanICStub::Types();
-    boolean_type.Add(ToBooleanICStub::BOOLEAN);
+    ToBooleanHints boolean_type = ToBooleanHint::kBoolean;
     HBranch* branch = builder()->New<HBranch>(
         constant_false, boolean_type, first_true_block_, first_false_block_);
     builder()->FinishCurrentBlock(branch);
@@ -1366,7 +1364,8 @@
   graph_ = new (zone()) HGraph(info_, descriptor_);
   if (FLAG_hydrogen_stats) isolate()->GetHStatistics()->Initialize(info_);
   if (!info_->IsStub() && is_tracking_positions()) {
-    TraceInlinedFunction(info_->shared_info(), SourcePosition::Unknown());
+    TraceInlinedFunction(info_->shared_info(), SourcePosition::Unknown(),
+                         SourcePosition::kNotInlined);
   }
   CompilationPhase phase("H_Block building", info_);
   set_current_block(graph()->entry_block());
@@ -1375,12 +1374,11 @@
   return graph_;
 }
 
-int HGraphBuilder::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
-                                        SourcePosition position) {
+void HGraphBuilder::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
+                                         SourcePosition position,
+                                         int inlining_id) {
   DCHECK(is_tracking_positions());
 
-  int inline_id = static_cast<int>(graph()->inlined_function_infos().size());
-  HInlinedFunctionInfo info(shared->start_position());
   if (!shared->script()->IsUndefined(isolate())) {
     Handle<Script> script(Script::cast(shared->script()), isolate());
 
@@ -1394,7 +1392,7 @@
         os << String::cast(source_name)->ToCString().get() << ":";
       }
       os << shared->DebugName()->ToCString().get() << ") id{";
-      os << info_->optimization_id() << "," << inline_id << "} ---\n";
+      os << info_->optimization_id() << "," << inlining_id << "} ---\n";
       {
         DisallowHeapAllocation no_allocation;
         int start = shared->start_position();
@@ -1410,23 +1408,19 @@
     }
   }
 
-  graph()->inlined_function_infos().push_back(info);
-
-  if (FLAG_hydrogen_track_positions && inline_id != 0) {
+  if (FLAG_hydrogen_track_positions &&
+      inlining_id != SourcePosition::kNotInlined) {
     CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
     OFStream os(tracing_scope.file());
     os << "INLINE (" << shared->DebugName()->ToCString().get() << ") id{"
-       << info_->optimization_id() << "," << inline_id << "} AS " << inline_id
-       << " AT " << position << std::endl;
+       << info_->optimization_id() << "," << inlining_id << "} AS "
+       << inlining_id << " AT " << position.ScriptOffset() << std::endl;
   }
-
-  return inline_id;
 }
 
 HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
   DCHECK(current_block() != NULL);
-  DCHECK(!FLAG_hydrogen_track_positions ||
-         !position_.IsUnknown() ||
+  DCHECK(!FLAG_hydrogen_track_positions || position_.IsKnown() ||
          !info_->IsOptimizing());
   current_block()->AddInstruction(instr, source_position());
   if (graph()->IsInsideNoSideEffectsScope()) {
@@ -1437,9 +1431,8 @@
 
 
 void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
-  DCHECK(!FLAG_hydrogen_track_positions ||
-         !info_->IsOptimizing() ||
-         !position_.IsUnknown());
+  DCHECK(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() ||
+         position_.IsKnown());
   current_block()->Finish(last, source_position());
   if (last->IsReturn() || last->IsAbnormalExit()) {
     set_current_block(NULL);
@@ -1449,7 +1442,7 @@
 
 void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) {
   DCHECK(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() ||
-         !position_.IsUnknown());
+         position_.IsKnown());
   current_block()->FinishExit(instruction, source_position());
   if (instruction->IsReturn() || instruction->IsAbnormalExit()) {
     set_current_block(NULL);
@@ -1647,190 +1640,6 @@
   return environment()->Pop();
 }
 
-
-void HGraphBuilder::BuildJSObjectCheck(HValue* receiver,
-                                       int bit_field_mask) {
-  // Check that the object isn't a smi.
-  Add<HCheckHeapObject>(receiver);
-
-  // Get the map of the receiver.
-  HValue* map =
-      Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
-
-  // Check the instance type and if an access check is needed, this can be
-  // done with a single load, since both bytes are adjacent in the map.
-  HObjectAccess access(HObjectAccess::ForMapInstanceTypeAndBitField());
-  HValue* instance_type_and_bit_field =
-      Add<HLoadNamedField>(map, nullptr, access);
-
-  HValue* mask = Add<HConstant>(0x00FF | (bit_field_mask << 8));
-  HValue* and_result = AddUncasted<HBitwise>(Token::BIT_AND,
-                                             instance_type_and_bit_field,
-                                             mask);
-  HValue* sub_result = AddUncasted<HSub>(and_result,
-                                         Add<HConstant>(JS_OBJECT_TYPE));
-  Add<HBoundsCheck>(sub_result,
-                    Add<HConstant>(LAST_JS_OBJECT_TYPE + 1 - JS_OBJECT_TYPE));
-}
-
-
-void HGraphBuilder::BuildKeyedIndexCheck(HValue* key,
-                                         HIfContinuation* join_continuation) {
-  // The sometimes unintuitively backward ordering of the ifs below is
-  // convoluted, but necessary.  All of the paths must guarantee that the
-  // if-true of the continuation returns a smi element index and the if-false of
-  // the continuation returns either a symbol or a unique string key. All other
-  // object types cause a deopt to fall back to the runtime.
-
-  IfBuilder key_smi_if(this);
-  key_smi_if.If<HIsSmiAndBranch>(key);
-  key_smi_if.Then();
-  {
-    Push(key);  // Nothing to do, just continue to true of continuation.
-  }
-  key_smi_if.Else();
-  {
-    HValue* map = Add<HLoadNamedField>(key, nullptr, HObjectAccess::ForMap());
-    HValue* instance_type =
-        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
-
-    // Non-unique string, check for a string with a hash code that is actually
-    // an index.
-    STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
-    IfBuilder not_string_or_name_if(this);
-    not_string_or_name_if.If<HCompareNumericAndBranch>(
-        instance_type,
-        Add<HConstant>(LAST_UNIQUE_NAME_TYPE),
-        Token::GT);
-
-    not_string_or_name_if.Then();
-    {
-      // Non-smi, non-Name, non-String: Try to convert to smi in case of
-      // HeapNumber.
-      // TODO(danno): This could call some variant of ToString
-      Push(AddUncasted<HForceRepresentation>(key, Representation::Smi()));
-    }
-    not_string_or_name_if.Else();
-    {
-      // String or Name: check explicitly for Name, they can short-circuit
-      // directly to unique non-index key path.
-      IfBuilder not_symbol_if(this);
-      not_symbol_if.If<HCompareNumericAndBranch>(
-          instance_type,
-          Add<HConstant>(SYMBOL_TYPE),
-          Token::NE);
-
-      not_symbol_if.Then();
-      {
-        // String: check whether the String is a String of an index. If it is,
-        // extract the index value from the hash.
-        HValue* hash = Add<HLoadNamedField>(key, nullptr,
-                                            HObjectAccess::ForNameHashField());
-        HValue* not_index_mask = Add<HConstant>(static_cast<int>(
-            String::kContainsCachedArrayIndexMask));
-
-        HValue* not_index_test = AddUncasted<HBitwise>(
-            Token::BIT_AND, hash, not_index_mask);
-
-        IfBuilder string_index_if(this);
-        string_index_if.If<HCompareNumericAndBranch>(not_index_test,
-                                                     graph()->GetConstant0(),
-                                                     Token::EQ);
-        string_index_if.Then();
-        {
-          // String with index in hash: extract string and merge to index path.
-          Push(BuildDecodeField<String::ArrayIndexValueBits>(hash));
-        }
-        string_index_if.Else();
-        {
-          // Key is a non-index String, check for uniqueness/internalization.
-          // If it's not internalized yet, internalize it now.
-          HValue* not_internalized_bit = AddUncasted<HBitwise>(
-              Token::BIT_AND,
-              instance_type,
-              Add<HConstant>(static_cast<int>(kIsNotInternalizedMask)));
-
-          IfBuilder internalized(this);
-          internalized.If<HCompareNumericAndBranch>(not_internalized_bit,
-                                                    graph()->GetConstant0(),
-                                                    Token::EQ);
-          internalized.Then();
-          Push(key);
-
-          internalized.Else();
-          Add<HPushArguments>(key);
-          HValue* intern_key = Add<HCallRuntime>(
-              Runtime::FunctionForId(Runtime::kInternalizeString), 1);
-          Push(intern_key);
-
-          internalized.End();
-          // Key guaranteed to be a unique string
-        }
-        string_index_if.JoinContinuation(join_continuation);
-      }
-      not_symbol_if.Else();
-      {
-        Push(key);  // Key is symbol
-      }
-      not_symbol_if.JoinContinuation(join_continuation);
-    }
-    not_string_or_name_if.JoinContinuation(join_continuation);
-  }
-  key_smi_if.JoinContinuation(join_continuation);
-}
-
-
-void HGraphBuilder::BuildNonGlobalObjectCheck(HValue* receiver) {
-  // Get the the instance type of the receiver, and make sure that it is
-  // not one of the global object types.
-  HValue* map =
-      Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
-  HValue* instance_type =
-      Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
-  HValue* global_type = Add<HConstant>(JS_GLOBAL_OBJECT_TYPE);
-
-  IfBuilder if_global_object(this);
-  if_global_object.If<HCompareNumericAndBranch>(instance_type, global_type,
-                                                Token::EQ);
-  if_global_object.ThenDeopt(DeoptimizeReason::kReceiverWasAGlobalObject);
-  if_global_object.End();
-}
-
-
-void HGraphBuilder::BuildTestForDictionaryProperties(
-    HValue* object,
-    HIfContinuation* continuation) {
-  HValue* properties = Add<HLoadNamedField>(
-      object, nullptr, HObjectAccess::ForPropertiesPointer());
-  HValue* properties_map =
-      Add<HLoadNamedField>(properties, nullptr, HObjectAccess::ForMap());
-  HValue* hash_map = Add<HLoadRoot>(Heap::kHashTableMapRootIndex);
-  IfBuilder builder(this);
-  builder.If<HCompareObjectEqAndBranch>(properties_map, hash_map);
-  builder.CaptureContinuation(continuation);
-}
-
-
-HValue* HGraphBuilder::BuildKeyedLookupCacheHash(HValue* object,
-                                                 HValue* key) {
-  // Load the map of the receiver, compute the keyed lookup cache hash
-  // based on 32 bits of the map pointer and the string hash.
-  HValue* object_map =
-      Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMapAsInteger32());
-  HValue* shifted_map = AddUncasted<HShr>(
-      object_map, Add<HConstant>(KeyedLookupCache::kMapHashShift));
-  HValue* string_hash =
-      Add<HLoadNamedField>(key, nullptr, HObjectAccess::ForStringHashField());
-  HValue* shifted_hash = AddUncasted<HShr>(
-      string_hash, Add<HConstant>(String::kHashShift));
-  HValue* xor_result = AddUncasted<HBitwise>(Token::BIT_XOR, shifted_map,
-                                             shifted_hash);
-  int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
-  return AddUncasted<HBitwise>(Token::BIT_AND, xor_result,
-                               Add<HConstant>(mask));
-}
-
-
 HValue* HGraphBuilder::BuildElementIndexHash(HValue* index) {
   int32_t seed_value = static_cast<uint32_t>(isolate()->heap()->HashSeed());
   HValue* seed = Add<HConstant>(seed_value);
@@ -1997,7 +1806,6 @@
   return Pop();
 }
 
-
 HValue* HGraphBuilder::BuildCreateIterResultObject(HValue* value,
                                                    HValue* done) {
   NoObservableSideEffectsScope scope(this);
@@ -2029,67 +1837,6 @@
 }
 
 
-HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
-                                                  HValue* index,
-                                                  HValue* input) {
-  NoObservableSideEffectsScope scope(this);
-  HConstant* max_length = Add<HConstant>(JSArray::kInitialMaxFastElementArray);
-  Add<HBoundsCheck>(length, max_length);
-
-  // Generate size calculation code here in order to make it dominate
-  // the JSRegExpResult allocation.
-  ElementsKind elements_kind = FAST_ELEMENTS;
-  HValue* size = BuildCalculateElementsSize(elements_kind, length);
-
-  // Allocate the JSRegExpResult and the FixedArray in one step.
-  HValue* result =
-      Add<HAllocate>(Add<HConstant>(JSRegExpResult::kSize), HType::JSArray(),
-                     NOT_TENURED, JS_ARRAY_TYPE, graph()->GetConstant0());
-
-  // Initialize the JSRegExpResult header.
-  HValue* native_context = Add<HLoadNamedField>(
-      context(), nullptr,
-      HObjectAccess::ForContextSlot(Context::NATIVE_CONTEXT_INDEX));
-  Add<HStoreNamedField>(
-      result, HObjectAccess::ForMap(),
-      Add<HLoadNamedField>(
-          native_context, nullptr,
-          HObjectAccess::ForContextSlot(Context::REGEXP_RESULT_MAP_INDEX)));
-  HConstant* empty_fixed_array =
-      Add<HConstant>(isolate()->factory()->empty_fixed_array());
-  Add<HStoreNamedField>(
-      result, HObjectAccess::ForJSArrayOffset(JSArray::kPropertiesOffset),
-      empty_fixed_array);
-  Add<HStoreNamedField>(
-      result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset),
-      empty_fixed_array);
-  Add<HStoreNamedField>(
-      result, HObjectAccess::ForJSArrayOffset(JSArray::kLengthOffset), length);
-
-  // Initialize the additional fields.
-  Add<HStoreNamedField>(
-      result, HObjectAccess::ForJSArrayOffset(JSRegExpResult::kIndexOffset),
-      index);
-  Add<HStoreNamedField>(
-      result, HObjectAccess::ForJSArrayOffset(JSRegExpResult::kInputOffset),
-      input);
-
-  // Allocate and initialize the elements header.
-  HAllocate* elements = BuildAllocateElements(elements_kind, size);
-  BuildInitializeElementsHeader(elements, elements_kind, length);
-
-  Add<HStoreNamedField>(
-      result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset),
-      elements);
-
-  // Initialize the elements contents with undefined.
-  BuildFillElementsWithValue(
-      elements, elements_kind, graph()->GetConstant0(), length,
-      graph()->GetConstantUndefined());
-
-  return result;
-}
-
 HValue* HGraphBuilder::BuildNumberToString(HValue* object, AstType* type) {
   NoObservableSideEffectsScope scope(this);
 
@@ -2232,12 +1979,13 @@
 }
 
 HValue* HGraphBuilder::BuildToNumber(HValue* input) {
-  if (input->type().IsTaggedNumber()) {
+  if (input->type().IsTaggedNumber() ||
+      input->representation().IsSpecialization()) {
     return input;
   }
   Callable callable = CodeFactory::ToNumber(isolate());
   HValue* stub = Add<HConstant>(callable.code());
-  HValue* values[] = {context(), input};
+  HValue* values[] = {input};
   HCallWithDescriptor* instr = Add<HCallWithDescriptor>(
       stub, 0, callable.descriptor(), ArrayVector(values));
   instr->set_type(HType::TaggedNumber());
@@ -3215,12 +2963,12 @@
       if_hole.Else();
       HStoreKeyed* store =
           Add<HStoreKeyed>(to_elements, key, element, nullptr, kind);
-      store->SetFlag(HValue::kAllowUndefinedAsNaN);
+      store->SetFlag(HValue::kTruncatingToNumber);
       if_hole.End();
     } else {
       HStoreKeyed* store =
           Add<HStoreKeyed>(to_elements, key, element, nullptr, kind);
-      store->SetFlag(HValue::kAllowUndefinedAsNaN);
+      store->SetFlag(HValue::kTruncatingToNumber);
     }
 
     builder.EndBody();
@@ -3373,7 +3121,7 @@
                                                bool track_positions)
     : HGraphBuilder(info, CallInterfaceDescriptor(), track_positions),
       function_state_(NULL),
-      initial_function_state_(this, info, NORMAL_RETURN, 0,
+      initial_function_state_(this, info, NORMAL_RETURN, -1,
                               TailCallMode::kAllow),
       ast_context_(NULL),
       break_scope_(NULL),
@@ -3489,8 +3237,7 @@
       type_change_checksum_(0),
       maximum_environment_size_(0),
       no_side_effects_scope_count_(0),
-      disallow_adding_new_values_(false),
-      inlined_function_infos_(info->zone()) {
+      disallow_adding_new_values_(false) {
   if (info->IsStub()) {
     // For stubs, explicitly add the context to the environment.
     start_environment_ =
@@ -3522,14 +3269,6 @@
 }
 
 
-int HGraph::SourcePositionToScriptPosition(SourcePosition pos) {
-  return (FLAG_hydrogen_track_positions && !pos.IsUnknown())
-             ? inlined_function_infos_.at(pos.inlining_id()).start_position +
-                   pos.position()
-             : pos.raw();
-}
-
-
 // Block ordering was implemented with two mutually recursive methods,
 // HGraph::Postorder and HGraph::PostorderLoopBlocks.
 // The recursion could lead to stack overflow so the algorithm has been
@@ -3954,9 +3693,7 @@
 
   if (owner->is_tracking_positions()) {
     outer_source_position_ = owner->source_position();
-    owner->EnterInlinedSource(
-      info->shared_info()->start_position(),
-      inlining_id);
+    owner->EnterInlinedSource(inlining_id);
     owner->SetSourcePosition(info->shared_info()->start_position());
   }
 }
@@ -3968,9 +3705,7 @@
 
   if (owner_->is_tracking_positions()) {
     owner_->set_source_position(outer_source_position_);
-    owner_->EnterInlinedSource(
-      outer_->compilation_info()->shared_info()->start_position(),
-      outer_->inlining_id());
+    owner_->EnterInlinedSource(outer_->inlining_id());
   }
 }
 
@@ -4181,7 +3916,7 @@
   if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
     builder->Bailout(kArgumentsObjectValueInATestContext);
   }
-  ToBooleanICStub::Types expected(condition()->to_boolean_types());
+  ToBooleanHints expected(condition()->to_boolean_types());
   ReturnControl(owner()->New<HBranch>(value, expected), BailoutId::None());
 }
 
@@ -4376,7 +4111,6 @@
   // This must happen after inferring representations.
   Run<HMergeRemovableSimulatesPhase>();
 
-  Run<HMarkDeoptimizeOnUndefinedPhase>();
   Run<HRepresentationChangesPhase>();
 
   Run<HInferTypesPhase>();
@@ -4396,8 +4130,6 @@
 
   Run<HRangeAnalysisPhase>();
 
-  Run<HComputeChangeUndefinedToNaN>();
-
   // Eliminate redundant stack checks on backwards branches.
   Run<HStackCheckEliminationPhase>();
 
@@ -5261,11 +4993,10 @@
     }
     set_current_block(if_slow);
     {
-      ForInFilterStub stub(isolate());
-      HValue* values[] = {context(), key, enumerable};
-      HConstant* stub_value = Add<HConstant>(stub.GetCode());
-      Push(Add<HCallWithDescriptor>(stub_value, 0,
-                                    stub.GetCallInterfaceDescriptor(),
+      Callable callable = CodeFactory::ForInFilter(isolate());
+      HValue* values[] = {key, enumerable};
+      HConstant* stub_value = Add<HConstant>(callable.code());
+      Push(Add<HCallWithDescriptor>(stub_value, 0, callable.descriptor(),
                                     ArrayVector(values)));
       Add<HSimulate>(stmt->FilterId());
       FinishCurrentBlock(New<HCompareObjectEqAndBranch>(
@@ -5295,7 +5026,7 @@
   }
 
   HBasicBlock* body_exit = JoinContinue(
-      stmt, stmt->ContinueId(), current_block(), break_info.continue_block());
+      stmt, stmt->IncrementId(), current_block(), break_info.continue_block());
 
   if (body_exit != NULL) {
     set_current_block(body_exit);
@@ -5371,7 +5102,7 @@
   if (!expr->pretenure()) {
     FastNewClosureStub stub(isolate());
     FastNewClosureDescriptor descriptor(isolate());
-    HValue* values[] = {context(), shared_info_value};
+    HValue* values[] = {shared_info_value};
     HConstant* stub_value = Add<HConstant>(stub.GetCode());
     instr = New<HCallWithDescriptor>(stub_value, 0, descriptor,
                                      ArrayVector(values));
@@ -5449,12 +5180,16 @@
   }
 }
 
+bool HOptimizedGraphBuilder::CanInlineGlobalPropertyAccess(
+    Variable* var, LookupIterator* it, PropertyAccessType access_type) {
+  if (var->is_this()) return false;
+  return CanInlineGlobalPropertyAccess(it, access_type);
+}
 
-HOptimizedGraphBuilder::GlobalPropertyAccess
-HOptimizedGraphBuilder::LookupGlobalProperty(Variable* var, LookupIterator* it,
-                                             PropertyAccessType access_type) {
-  if (var->is_this() || !current_info()->has_global_object()) {
-    return kUseGeneric;
+bool HOptimizedGraphBuilder::CanInlineGlobalPropertyAccess(
+    LookupIterator* it, PropertyAccessType access_type) {
+  if (!current_info()->has_global_object()) {
+    return false;
   }
 
   switch (it->state()) {
@@ -5463,17 +5198,17 @@
     case LookupIterator::INTERCEPTOR:
     case LookupIterator::INTEGER_INDEXED_EXOTIC:
     case LookupIterator::NOT_FOUND:
-      return kUseGeneric;
+      return false;
     case LookupIterator::DATA:
-      if (access_type == STORE && it->IsReadOnly()) return kUseGeneric;
-      if (!it->GetHolder<JSObject>()->IsJSGlobalObject()) return kUseGeneric;
-      return kUseCell;
+      if (access_type == STORE && it->IsReadOnly()) return false;
+      if (!it->GetHolder<JSObject>()->IsJSGlobalObject()) return false;
+      return true;
     case LookupIterator::JSPROXY:
     case LookupIterator::TRANSITION:
       UNREACHABLE();
   }
   UNREACHABLE();
-  return kUseGeneric;
+  return false;
 }
 
 
@@ -5489,6 +5224,55 @@
   return context;
 }
 
+void HOptimizedGraphBuilder::InlineGlobalPropertyLoad(LookupIterator* it,
+                                                      BailoutId ast_id) {
+  Handle<PropertyCell> cell = it->GetPropertyCell();
+  top_info()->dependencies()->AssumePropertyCell(cell);
+  auto cell_type = it->property_details().cell_type();
+  if (cell_type == PropertyCellType::kConstant ||
+      cell_type == PropertyCellType::kUndefined) {
+    Handle<Object> constant_object(cell->value(), isolate());
+    if (constant_object->IsConsString()) {
+      constant_object = String::Flatten(Handle<String>::cast(constant_object));
+    }
+    HConstant* constant = New<HConstant>(constant_object);
+    return ast_context()->ReturnInstruction(constant, ast_id);
+  } else {
+    auto access = HObjectAccess::ForPropertyCellValue();
+    UniqueSet<Map>* field_maps = nullptr;
+    if (cell_type == PropertyCellType::kConstantType) {
+      switch (cell->GetConstantType()) {
+        case PropertyCellConstantType::kSmi:
+          access = access.WithRepresentation(Representation::Smi());
+          break;
+        case PropertyCellConstantType::kStableMap: {
+          // Check that the map really is stable. The heap object could
+          // have mutated without the cell updating state. In that case,
+          // make no promises about the loaded value except that it's a
+          // heap object.
+          access = access.WithRepresentation(Representation::HeapObject());
+          Handle<Map> map(HeapObject::cast(cell->value())->map());
+          if (map->is_stable()) {
+            field_maps = new (zone())
+                UniqueSet<Map>(Unique<Map>::CreateImmovable(map), zone());
+          }
+          break;
+        }
+      }
+    }
+    HConstant* cell_constant = Add<HConstant>(cell);
+    HLoadNamedField* instr;
+    if (field_maps == nullptr) {
+      instr = New<HLoadNamedField>(cell_constant, nullptr, access);
+    } else {
+      instr = New<HLoadNamedField>(cell_constant, nullptr, access, field_maps,
+                                   HType::HeapObject());
+    }
+    instr->ClearDependsOnFlag(kInobjectFields);
+    instr->SetDependsOnFlag(kGlobalVars);
+    return ast_context()->ReturnInstruction(instr, ast_id);
+  }
+}
 
 void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
   DCHECK(!HasStackOverflow());
@@ -5537,62 +5321,23 @@
       }
 
       LookupIterator it(global, variable->name(), LookupIterator::OWN);
-      GlobalPropertyAccess type = LookupGlobalProperty(variable, &it, LOAD);
-
-      if (type == kUseCell) {
-        Handle<PropertyCell> cell = it.GetPropertyCell();
-        top_info()->dependencies()->AssumePropertyCell(cell);
-        auto cell_type = it.property_details().cell_type();
-        if (cell_type == PropertyCellType::kConstant ||
-            cell_type == PropertyCellType::kUndefined) {
-          Handle<Object> constant_object(cell->value(), isolate());
-          if (constant_object->IsConsString()) {
-            constant_object =
-                String::Flatten(Handle<String>::cast(constant_object));
-          }
-          HConstant* constant = New<HConstant>(constant_object);
-          return ast_context()->ReturnInstruction(constant, expr->id());
-        } else {
-          auto access = HObjectAccess::ForPropertyCellValue();
-          UniqueSet<Map>* field_maps = nullptr;
-          if (cell_type == PropertyCellType::kConstantType) {
-            switch (cell->GetConstantType()) {
-              case PropertyCellConstantType::kSmi:
-                access = access.WithRepresentation(Representation::Smi());
-                break;
-              case PropertyCellConstantType::kStableMap: {
-                // Check that the map really is stable. The heap object could
-                // have mutated without the cell updating state. In that case,
-                // make no promises about the loaded value except that it's a
-                // heap object.
-                access =
-                    access.WithRepresentation(Representation::HeapObject());
-                Handle<Map> map(HeapObject::cast(cell->value())->map());
-                if (map->is_stable()) {
-                  field_maps = new (zone())
-                      UniqueSet<Map>(Unique<Map>::CreateImmovable(map), zone());
-                }
-                break;
-              }
-            }
-          }
-          HConstant* cell_constant = Add<HConstant>(cell);
-          HLoadNamedField* instr;
-          if (field_maps == nullptr) {
-            instr = New<HLoadNamedField>(cell_constant, nullptr, access);
-          } else {
-            instr = New<HLoadNamedField>(cell_constant, nullptr, access,
-                                         field_maps, HType::HeapObject());
-          }
-          instr->ClearDependsOnFlag(kInobjectFields);
-          instr->SetDependsOnFlag(kGlobalVars);
-          return ast_context()->ReturnInstruction(instr, expr->id());
-        }
+      it.TryLookupCachedProperty();
+      if (CanInlineGlobalPropertyAccess(variable, &it, LOAD)) {
+        InlineGlobalPropertyLoad(&it, expr->id());
+        return;
       } else {
         Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
-        HLoadGlobalGeneric* instr = New<HLoadGlobalGeneric>(
-            variable->name(), ast_context()->typeof_mode(), vector,
-            expr->VariableFeedbackSlot());
+
+        HValue* vector_value = Add<HConstant>(vector);
+        HValue* slot_value =
+            Add<HConstant>(vector->GetIndex(expr->VariableFeedbackSlot()));
+        Callable callable = CodeFactory::LoadGlobalICInOptimizedCode(
+            isolate(), ast_context()->typeof_mode());
+        HValue* stub = Add<HConstant>(callable.code());
+        HValue* values[] = {slot_value, vector_value};
+        HCallWithDescriptor* instr = New<HCallWithDescriptor>(
+            Code::LOAD_GLOBAL_IC, stub, 0, callable.descriptor(),
+            ArrayVector(values));
         return ast_context()->ReturnInstruction(instr, expr->id());
       }
     }
@@ -5648,9 +5393,9 @@
   DCHECK(current_block() != NULL);
   DCHECK(current_block()->HasPredecessor());
   Callable callable = CodeFactory::FastCloneRegExp(isolate());
-  HValue* values[] = {
-      context(), AddThisFunction(), Add<HConstant>(expr->literal_index()),
-      Add<HConstant>(expr->pattern()), Add<HConstant>(expr->flags())};
+  HValue* values[] = {AddThisFunction(), Add<HConstant>(expr->literal_index()),
+                      Add<HConstant>(expr->pattern()),
+                      Add<HConstant>(expr->flags())};
   HConstant* stub_value = Add<HConstant>(callable.code());
   HInstruction* instr = New<HCallWithDescriptor>(
       stub_value, 0, callable.descriptor(), ArrayVector(values));
@@ -6216,7 +5961,7 @@
   DCHECK(field_type_.IsHeapObject());
 
   // Add dependency on the map that introduced the field.
-  top_info()->dependencies()->AssumeFieldType(GetFieldOwnerFromMap(map));
+  top_info()->dependencies()->AssumeFieldOwner(GetFieldOwnerFromMap(map));
   return true;
 }
 
@@ -6388,6 +6133,18 @@
   }
 
   if (info->IsAccessorConstant()) {
+    MaybeHandle<Name> maybe_name =
+        FunctionTemplateInfo::TryGetCachedPropertyName(isolate(),
+                                                       info->accessor());
+    if (!maybe_name.is_null()) {
+      Handle<Name> name = maybe_name.ToHandleChecked();
+      PropertyAccessInfo cache_info(this, LOAD, info->map(), name);
+      // Load new target.
+      if (cache_info.CanAccessMonomorphic()) {
+        return BuildLoadNamedField(&cache_info, checked_object);
+      }
+    }
+
     Push(checked_object);
     int argument_count = 1;
     if (!info->IsLoad()) {
@@ -6683,6 +6440,67 @@
              expr->AssignmentId(), expr->IsUninitialized());
 }
 
+HInstruction* HOptimizedGraphBuilder::InlineGlobalPropertyStore(
+    LookupIterator* it, HValue* value, BailoutId ast_id) {
+  Handle<PropertyCell> cell = it->GetPropertyCell();
+  top_info()->dependencies()->AssumePropertyCell(cell);
+  auto cell_type = it->property_details().cell_type();
+  if (cell_type == PropertyCellType::kConstant ||
+      cell_type == PropertyCellType::kUndefined) {
+    Handle<Object> constant(cell->value(), isolate());
+    if (value->IsConstant()) {
+      HConstant* c_value = HConstant::cast(value);
+      if (!constant.is_identical_to(c_value->handle(isolate()))) {
+        Add<HDeoptimize>(DeoptimizeReason::kConstantGlobalVariableAssignment,
+                         Deoptimizer::EAGER);
+      }
+    } else {
+      HValue* c_constant = Add<HConstant>(constant);
+      IfBuilder builder(this);
+      if (constant->IsNumber()) {
+        builder.If<HCompareNumericAndBranch>(value, c_constant, Token::EQ);
+      } else {
+        builder.If<HCompareObjectEqAndBranch>(value, c_constant);
+      }
+      builder.Then();
+      builder.Else();
+      Add<HDeoptimize>(DeoptimizeReason::kConstantGlobalVariableAssignment,
+                       Deoptimizer::EAGER);
+      builder.End();
+    }
+  }
+  HConstant* cell_constant = Add<HConstant>(cell);
+  auto access = HObjectAccess::ForPropertyCellValue();
+  if (cell_type == PropertyCellType::kConstantType) {
+    switch (cell->GetConstantType()) {
+      case PropertyCellConstantType::kSmi:
+        access = access.WithRepresentation(Representation::Smi());
+        break;
+      case PropertyCellConstantType::kStableMap: {
+        // First check that the previous value of the {cell} still has the
+        // map that we are about to check the new {value} for. If not, then
+        // the stable map assumption was invalidated and we cannot continue
+        // with the optimized code.
+        Handle<HeapObject> cell_value(HeapObject::cast(cell->value()));
+        Handle<Map> cell_value_map(cell_value->map());
+        if (!cell_value_map->is_stable()) {
+          Bailout(kUnstableConstantTypeHeapObject);
+          return nullptr;
+        }
+        top_info()->dependencies()->AssumeMapStable(cell_value_map);
+        // Now check that the new {value} is a HeapObject with the same map
+        Add<HCheckHeapObject>(value);
+        value = Add<HCheckMaps>(value, cell_value_map);
+        access = access.WithRepresentation(Representation::HeapObject());
+        break;
+      }
+    }
+  }
+  HInstruction* instr = New<HStoreNamedField>(cell_constant, access, value);
+  instr->ClearChangesFlag(kInobjectFields);
+  instr->SetChangesFlag(kGlobalVars);
+  return instr;
+}
 
 // Because not every expression has a position and there is not common
 // superclass of Assignment and CountOperation, we cannot just pass the
@@ -6723,64 +6541,10 @@
   }
 
   LookupIterator it(global, var->name(), LookupIterator::OWN);
-  GlobalPropertyAccess type = LookupGlobalProperty(var, &it, STORE);
-  if (type == kUseCell) {
-    Handle<PropertyCell> cell = it.GetPropertyCell();
-    top_info()->dependencies()->AssumePropertyCell(cell);
-    auto cell_type = it.property_details().cell_type();
-    if (cell_type == PropertyCellType::kConstant ||
-        cell_type == PropertyCellType::kUndefined) {
-      Handle<Object> constant(cell->value(), isolate());
-      if (value->IsConstant()) {
-        HConstant* c_value = HConstant::cast(value);
-        if (!constant.is_identical_to(c_value->handle(isolate()))) {
-          Add<HDeoptimize>(DeoptimizeReason::kConstantGlobalVariableAssignment,
-                           Deoptimizer::EAGER);
-        }
-      } else {
-        HValue* c_constant = Add<HConstant>(constant);
-        IfBuilder builder(this);
-        if (constant->IsNumber()) {
-          builder.If<HCompareNumericAndBranch>(value, c_constant, Token::EQ);
-        } else {
-          builder.If<HCompareObjectEqAndBranch>(value, c_constant);
-        }
-        builder.Then();
-        builder.Else();
-        Add<HDeoptimize>(DeoptimizeReason::kConstantGlobalVariableAssignment,
-                         Deoptimizer::EAGER);
-        builder.End();
-      }
-    }
-    HConstant* cell_constant = Add<HConstant>(cell);
-    auto access = HObjectAccess::ForPropertyCellValue();
-    if (cell_type == PropertyCellType::kConstantType) {
-      switch (cell->GetConstantType()) {
-        case PropertyCellConstantType::kSmi:
-          access = access.WithRepresentation(Representation::Smi());
-          break;
-        case PropertyCellConstantType::kStableMap: {
-          // First check that the previous value of the {cell} still has the
-          // map that we are about to check the new {value} for. If not, then
-          // the stable map assumption was invalidated and we cannot continue
-          // with the optimized code.
-          Handle<HeapObject> cell_value(HeapObject::cast(cell->value()));
-          Handle<Map> cell_value_map(cell_value->map());
-          if (!cell_value_map->is_stable()) {
-            return Bailout(kUnstableConstantTypeHeapObject);
-          }
-          top_info()->dependencies()->AssumeMapStable(cell_value_map);
-          // Now check that the new {value} is a HeapObject with the same map.
-          Add<HCheckHeapObject>(value);
-          value = Add<HCheckMaps>(value, cell_value_map);
-          access = access.WithRepresentation(Representation::HeapObject());
-          break;
-        }
-      }
-    }
-    HInstruction* instr = Add<HStoreNamedField>(cell_constant, access, value);
-    instr->ClearChangesFlag(kInobjectFields);
-    instr->SetChangesFlag(kGlobalVars);
+  if (CanInlineGlobalPropertyAccess(var, &it, STORE)) {
+    HInstruction* instr = InlineGlobalPropertyStore(&it, value, ast_id);
+    if (!instr) return;
+    AddInstruction(instr);
     if (instr->HasObservableSideEffects()) {
       Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
     }
@@ -6796,10 +6560,9 @@
     Callable callable = CodeFactory::StoreICInOptimizedCode(
         isolate(), function_language_mode());
     HValue* stub = Add<HConstant>(callable.code());
-    HValue* values[] = {context(), global_object, name,
-                        value,     slot_value,    vector_value};
+    HValue* values[] = {global_object, name, value, slot_value, vector_value};
     HCallWithDescriptor* instr = Add<HCallWithDescriptor>(
-        stub, 0, callable.descriptor(), ArrayVector(values));
+        Code::STORE_IC, stub, 0, callable.descriptor(), ArrayVector(values));
     USE(instr);
     DCHECK(instr->HasObservableSideEffects());
     Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -7098,36 +6861,35 @@
         DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess,
         Deoptimizer::SOFT);
   }
-  if (access_type == LOAD) {
-    Handle<TypeFeedbackVector> vector =
-        handle(current_feedback_vector(), isolate());
+  Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
 
+  HValue* key = Add<HConstant>(name);
+  HValue* vector_value = Add<HConstant>(vector);
+  HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
+
+  if (access_type == LOAD) {
+    HValue* values[] = {object, key, slot_value, vector_value};
     if (!expr->AsProperty()->key()->IsPropertyName()) {
       // It's possible that a keyed load of a constant string was converted
       // to a named load. Here, at the last minute, we need to make sure to
       // use a generic Keyed Load if we are using the type vector, because
       // it has to share information with full code.
-      HConstant* key = Add<HConstant>(name);
-      HLoadKeyedGeneric* result =
-          New<HLoadKeyedGeneric>(object, key, vector, slot);
+      Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
+      HValue* stub = Add<HConstant>(callable.code());
+      HCallWithDescriptor* result =
+          New<HCallWithDescriptor>(Code::KEYED_LOAD_IC, stub, 0,
+                                   callable.descriptor(), ArrayVector(values));
       return result;
     }
-
-    HLoadNamedGeneric* result =
-        New<HLoadNamedGeneric>(object, name, vector, slot);
+    Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
+    HValue* stub = Add<HConstant>(callable.code());
+    HCallWithDescriptor* result = New<HCallWithDescriptor>(
+        Code::LOAD_IC, stub, 0, callable.descriptor(), ArrayVector(values));
     return result;
+
   } else {
-    Handle<TypeFeedbackVector> vector =
-        handle(current_feedback_vector(), isolate());
-
-    HValue* key = Add<HConstant>(name);
-    HValue* vector_value = Add<HConstant>(vector);
-    HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
-    HValue* values[] = {context(), object,     key,
-                        value,     slot_value, vector_value};
-
-    if (current_feedback_vector()->GetKind(slot) ==
-        FeedbackVectorSlotKind::KEYED_STORE_IC) {
+    HValue* values[] = {object, key, value, slot_value, vector_value};
+    if (vector->GetKind(slot) == FeedbackVectorSlotKind::KEYED_STORE_IC) {
       // It's possible that a keyed store of a constant string was converted
       // to a named store. Here, at the last minute, we need to make sure to
       // use a generic Keyed Store if we are using the type vector, because
@@ -7135,15 +6897,16 @@
       Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
           isolate(), function_language_mode());
       HValue* stub = Add<HConstant>(callable.code());
-      HCallWithDescriptor* result = New<HCallWithDescriptor>(
-          stub, 0, callable.descriptor(), ArrayVector(values));
+      HCallWithDescriptor* result =
+          New<HCallWithDescriptor>(Code::KEYED_STORE_IC, stub, 0,
+                                   callable.descriptor(), ArrayVector(values));
       return result;
     }
     Callable callable = CodeFactory::StoreICInOptimizedCode(
         isolate(), function_language_mode());
     HValue* stub = Add<HConstant>(callable.code());
     HCallWithDescriptor* result = New<HCallWithDescriptor>(
-        stub, 0, callable.descriptor(), ArrayVector(values));
+        Code::STORE_IC, stub, 0, callable.descriptor(), ArrayVector(values));
     return result;
   }
 }
@@ -7152,23 +6915,28 @@
 HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
     PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
     HValue* object, HValue* key, HValue* value) {
-  Handle<TypeFeedbackVector> vector =
-      handle(current_feedback_vector(), isolate());
+  Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
+  HValue* vector_value = Add<HConstant>(vector);
+  HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
+
   if (access_type == LOAD) {
-    HLoadKeyedGeneric* result =
-        New<HLoadKeyedGeneric>(object, key, vector, slot);
+    HValue* values[] = {object, key, slot_value, vector_value};
+
+    Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
+    HValue* stub = Add<HConstant>(callable.code());
+    HCallWithDescriptor* result =
+        New<HCallWithDescriptor>(Code::KEYED_LOAD_IC, stub, 0,
+                                 callable.descriptor(), ArrayVector(values));
     return result;
   } else {
-    HValue* vector_value = Add<HConstant>(vector);
-    HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
-    HValue* values[] = {context(), object,     key,
-                        value,     slot_value, vector_value};
+    HValue* values[] = {object, key, value, slot_value, vector_value};
 
     Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
         isolate(), function_language_mode());
     HValue* stub = Add<HConstant>(callable.code());
-    HCallWithDescriptor* result = New<HCallWithDescriptor>(
-        stub, 0, callable.descriptor(), ArrayVector(values));
+    HCallWithDescriptor* result =
+        New<HCallWithDescriptor>(Code::KEYED_STORE_IC, stub, 0,
+                                 callable.descriptor(), ArrayVector(values));
     return result;
   }
 }
@@ -7634,6 +7402,16 @@
   function_state()->set_arguments_elements(arguments_elements);
 }
 
+bool HOptimizedGraphBuilder::IsAnyParameterContextAllocated() {
+  int count = current_info()->scope()->num_parameters();
+  for (int i = 0; i < count; ++i) {
+    if (current_info()->scope()->parameter(i)->location() ==
+        VariableLocation::CONTEXT) {
+      return true;
+    }
+  }
+  return false;
+}
 
 bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
   VariableProxy* proxy = expr->obj()->AsVariableProxy();
@@ -7665,6 +7443,10 @@
       result = New<HConstant>(argument_count);
     }
   } else {
+    // We need to take into account the KEYED_LOAD_IC feedback to guard the
+    // HBoundsCheck instructions below.
+    if (!expr->IsMonomorphic() && !expr->IsUninitialized()) return false;
+    if (IsAnyParameterContextAllocated()) return false;
     CHECK_ALIVE_OR_RETURN(VisitForValue(expr->obj(), ARGUMENTS_ALLOWED), true);
     CHECK_ALIVE_OR_RETURN(VisitForValue(expr->key()), true);
     HValue* key = Pop();
@@ -7698,7 +7480,35 @@
   ComputeReceiverTypes(expr, object, &maps, this);
   DCHECK(maps != NULL);
 
+  // Check for special case: Access via a single map to the global proxy
+  // can also be handled monomorphically.
   if (maps->length() > 0) {
+    Handle<Object> map_constructor =
+        handle(maps->first()->GetConstructor(), isolate());
+    if (map_constructor->IsJSFunction()) {
+      Handle<Context> map_context =
+          handle(Handle<JSFunction>::cast(map_constructor)->context());
+      Handle<Context> current_context(current_info()->context());
+      bool is_same_context_global_proxy_access =
+          maps->length() == 1 &&  // >1 map => fallback to polymorphic
+          maps->first()->IsJSGlobalProxyMap() &&
+          (*map_context == *current_context);
+      if (is_same_context_global_proxy_access) {
+        Handle<JSGlobalObject> global_object(current_info()->global_object());
+        LookupIterator it(global_object, name, LookupIterator::OWN);
+        if (CanInlineGlobalPropertyAccess(&it, access)) {
+          BuildCheckHeapObject(object);
+          Add<HCheckMaps>(object, maps);
+          if (access == LOAD) {
+            InlineGlobalPropertyLoad(&it, expr->id());
+            return nullptr;
+          } else {
+            return InlineGlobalPropertyStore(&it, value, expr->id());
+          }
+        }
+      }
+    }
+
     PropertyAccessInfo info(this, access, maps->first(), name);
     if (!info.CanAccessAsMonomorphic(maps)) {
       HandlePolymorphicNamedFieldAccess(access, expr, slot, ast_id, return_id,
@@ -7861,7 +7671,7 @@
   }
   HValue* arity = Add<HConstant>(argument_count - 1);
 
-  HValue* op_vals[] = {context(), function, arity};
+  HValue* op_vals[] = {function, arity};
 
   Callable callable =
       CodeFactory::Call(isolate(), convert_mode, tail_call_mode);
@@ -7883,13 +7693,13 @@
   }
   int arity = argument_count - 1;
   Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
+  HValue* arity_val = Add<HConstant>(arity);
   HValue* index_val = Add<HConstant>(vector->GetIndex(slot));
   HValue* vector_val = Add<HConstant>(vector);
 
-  HValue* op_vals[] = {context(), function, index_val, vector_val};
-
+  HValue* op_vals[] = {function, arity_val, index_val, vector_val};
   Callable callable = CodeFactory::CallICInOptimizedCode(
-      isolate(), arity, convert_mode, tail_call_mode);
+      isolate(), convert_mode, tail_call_mode);
   HConstant* stub = Add<HConstant>(callable.code());
 
   return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
@@ -8241,13 +8051,13 @@
   // Parse and allocate variables.
   // Use the same AstValueFactory for creating strings in the sub-compilation
   // step, but don't transfer ownership to target_info.
-  ParseInfo parse_info(zone(), target);
+  Handle<SharedFunctionInfo> target_shared(target->shared());
+  ParseInfo parse_info(zone(), target_shared);
   parse_info.set_ast_value_factory(
       top_info()->parse_info()->ast_value_factory());
   parse_info.set_ast_value_factory_owned(false);
 
   CompilationInfo target_info(&parse_info, target);
-  Handle<SharedFunctionInfo> target_shared(target->shared());
 
   if (inlining_kind != CONSTRUCT_CALL_RETURN &&
       IsClassConstructor(target_shared->kind())) {
@@ -8315,11 +8125,10 @@
   }
 
   // All declarations must be inlineable.
-  ZoneList<Declaration*>* decls = target_info.scope()->declarations();
-  int decl_count = decls->length();
-  for (int i = 0; i < decl_count; ++i) {
-    if (decls->at(i)->IsFunctionDeclaration() ||
-        !decls->at(i)->proxy()->var()->IsStackAllocated()) {
+  Declaration::List* decls = target_info.scope()->declarations();
+  for (Declaration* decl : *decls) {
+    if (decl->IsFunctionDeclaration() ||
+        !decl->proxy()->var()->IsStackAllocated()) {
       TraceInline(target, caller, "target has non-trivial declaration");
       return false;
     }
@@ -8335,7 +8144,8 @@
   // Remember that we inlined this function. This needs to be called right
   // after the EnsureDeoptimizationSupport call so that the code flusher
   // does not remove the code with the deoptimization support.
-  top_info()->AddInlinedFunction(target_info.shared_info());
+  int inlining_id = top_info()->AddInlinedFunction(target_info.shared_info(),
+                                                   source_position());
 
   // ----------------------------------------------------------------
   // After this point, we've made a decision to inline this function (so
@@ -8351,9 +8161,8 @@
            &bounds_)
       .Run();
 
-  int inlining_id = 0;
   if (is_tracking_positions()) {
-    inlining_id = TraceInlinedFunction(target_shared, source_position());
+    TraceInlinedFunction(target_shared, source_position(), inlining_id);
   }
 
   // Save the pending call context. Set up new one for the inlined function.
@@ -8404,6 +8213,7 @@
   if (is_tracking_positions()) {
     enter_inlined->set_inlining_id(inlining_id);
   }
+
   function_state()->set_entry(enter_inlined);
 
   VisitDeclarations(target_info.scope()->declarations());
@@ -9011,7 +8821,7 @@
                                           copy_kind, ALLOW_RETURN_HOLE);
               HStoreKeyed* store = Add<HStoreKeyed>(elements, new_key, element,
                                                     nullptr, copy_kind);
-              store->SetFlag(HValue::kAllowUndefinedAsNaN);
+              store->SetFlag(HValue::kTruncatingToNumber);
             }
             loop.EndBody();
 
@@ -9092,6 +8902,7 @@
 
 bool HOptimizedGraphBuilder::TryInlineApiFunctionCall(Call* expr,
                                                       HValue* receiver) {
+  if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
   Handle<JSFunction> function = expr->target();
   int argc = expr->arguments()->length();
   SmallMapList receiver_maps;
@@ -9104,6 +8915,7 @@
     Call* expr,
     HValue* receiver,
     SmallMapList* receiver_maps) {
+  if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
   Handle<JSFunction> function = expr->target();
   int argc = expr->arguments()->length();
   return TryInlineApiCall(function, receiver, receiver_maps, argc, expr->id(),
@@ -9113,6 +8925,7 @@
 bool HOptimizedGraphBuilder::TryInlineApiGetter(Handle<Object> function,
                                                 Handle<Map> receiver_map,
                                                 BailoutId ast_id) {
+  if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
   SmallMapList receiver_maps(1, zone());
   receiver_maps.Add(receiver_map, zone());
   return TryInlineApiCall(function,
@@ -9136,6 +8949,7 @@
     Handle<Object> function, HValue* receiver, SmallMapList* receiver_maps,
     int argc, BailoutId ast_id, ApiCallType call_type,
     TailCallMode syntactic_tail_call_mode) {
+  if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
   if (function->IsJSFunction() &&
       Handle<JSFunction>::cast(function)->context()->native_context() !=
           top_info()->closure()->context()->native_context()) {
@@ -9236,7 +9050,7 @@
                                             isolate());
   HValue* api_function_address = Add<HConstant>(ExternalReference(ref));
 
-  HValue* op_vals[] = {context(), Add<HConstant>(function), call_data, holder,
+  HValue* op_vals[] = {Add<HConstant>(function), call_data, holder,
                        api_function_address};
 
   HInstruction* call = nullptr;
@@ -9745,7 +9559,7 @@
                                      syntactic_tail_call_mode, tail_call_mode);
     } else {
       PushArgumentsFromEnvironment(argument_count);
-      if (expr->is_uninitialized() && expr->IsUsingCallFeedbackICSlot()) {
+      if (expr->is_uninitialized()) {
         // We've never seen this call before, so let's have Crankshaft learn
         // through the type vector.
         call = NewCallFunctionViaIC(function, argument_count,
@@ -9975,7 +9789,7 @@
   }
 
   HValue* arity = Add<HConstant>(argument_count - 1);
-  HValue* op_vals[] = {context(), function, function, arity};
+  HValue* op_vals[] = {function, function, arity};
   Callable callable = CodeFactory::Construct(isolate());
   HConstant* stub = Add<HConstant>(callable.code());
   PushArgumentsFromEnvironment(argument_count);
@@ -10232,9 +10046,9 @@
   HValue* byte_offset;
   bool is_zero_byte_offset;
 
-  if (arguments->at(kByteOffsetArg)->IsLiteral()
-      && Smi::FromInt(0) ==
-      *static_cast<Literal*>(arguments->at(kByteOffsetArg))->value()) {
+  if (arguments->at(kByteOffsetArg)->IsLiteral() &&
+      Smi::kZero ==
+          *static_cast<Literal*>(arguments->at(kByteOffsetArg))->value()) {
     byte_offset = Add<HConstant>(static_cast<int32_t>(0));
     is_zero_byte_offset = true;
   } else {
@@ -10554,28 +10368,23 @@
   return Representation::Tagged();
 }
 
-
-HInstruction* HOptimizedGraphBuilder::BuildIncrement(
-    bool returns_original_input,
-    CountOperation* expr) {
+HInstruction* HOptimizedGraphBuilder::BuildIncrement(CountOperation* expr) {
   // The input to the count operation is on top of the expression stack.
   Representation rep = RepresentationFor(expr->type());
   if (rep.IsNone() || rep.IsTagged()) {
     rep = Representation::Smi();
   }
 
-  if (returns_original_input) {
-    // We need an explicit HValue representing ToNumber(input).  The
-    // actual HChange instruction we need is (sometimes) added in a later
-    // phase, so it is not available now to be used as an input to HAdd and
-    // as the return value.
-    HInstruction* number_input = AddUncasted<HForceRepresentation>(Pop(), rep);
-    if (!rep.IsDouble()) {
-      number_input->SetFlag(HInstruction::kFlexibleRepresentation);
-      number_input->SetFlag(HInstruction::kCannotBeTagged);
-    }
-    Push(number_input);
+  // We need an explicit HValue representing ToNumber(input).  The
+  // actual HChange instruction we need is (sometimes) added in a later
+  // phase, so it is not available now to be used as an input to HAdd and
+  // as the return value.
+  HInstruction* number_input = AddUncasted<HForceRepresentation>(Pop(), rep);
+  if (!rep.IsDouble()) {
+    number_input->SetFlag(HInstruction::kFlexibleRepresentation);
+    number_input->SetFlag(HInstruction::kCannotBeTagged);
   }
+  Push(number_input);
 
   // The addition has no side effects, so we do not need
   // to simulate the expression stack after this instruction.
@@ -10634,7 +10443,7 @@
     DCHECK(prop == NULL);
     CHECK_ALIVE(VisitForValue(target));
 
-    after = BuildIncrement(returns_original_input, expr);
+    after = BuildIncrement(expr);
     input = returns_original_input ? Top() : Pop();
     Push(after);
 
@@ -10650,21 +10459,6 @@
         break;
 
       case VariableLocation::CONTEXT: {
-        // Bail out if we try to mutate a parameter value in a function
-        // using the arguments object.  We do not (yet) correctly handle the
-        // arguments property of the function.
-        if (current_info()->scope()->arguments() != NULL) {
-          // Parameters will rewrite to context slots.  We have no direct
-          // way to detect that the variable is a parameter so we use a
-          // linear search of the parameter list.
-          int count = current_info()->scope()->num_parameters();
-          for (int i = 0; i < count; ++i) {
-            if (var == current_info()->scope()->parameter(i)) {
-              return Bailout(kAssignmentToParameterInArgumentsObject);
-            }
-          }
-        }
-
         HValue* context = BuildContextChainWalk(var);
         HStoreContextSlot::Mode mode = IsLexicalVariableMode(var->mode())
             ? HStoreContextSlot::kCheckDeoptimize : HStoreContextSlot::kNoCheck;
@@ -10702,7 +10496,7 @@
 
   CHECK_ALIVE(PushLoad(prop, object, key));
 
-  after = BuildIncrement(returns_original_input, expr);
+  after = BuildIncrement(expr);
 
   if (returns_original_input) {
     input = Pop();
@@ -11041,7 +10835,7 @@
   // inline several instructions (including the two pushes) for every tagged
   // operation in optimized code, which is more expensive, than a stub call.
   if (graph()->info()->IsStub() && is_non_primitive) {
-    HValue* values[] = {context(), left, right};
+    HValue* values[] = {left, right};
 #define GET_STUB(Name)                                                       \
   do {                                                                       \
     Callable callable = CodeFactory::Name(isolate());                        \
@@ -11255,7 +11049,7 @@
     // We need an extra block to maintain edge-split form.
     HBasicBlock* empty_block = graph()->CreateBasicBlock();
     HBasicBlock* eval_right = graph()->CreateBasicBlock();
-    ToBooleanICStub::Types expected(expr->left()->to_boolean_types());
+    ToBooleanHints expected(expr->left()->to_boolean_types());
     HBranch* test = is_logical_and
         ? New<HBranch>(left_value, expected, eval_right, empty_block)
         : New<HBranch>(left_value, expected, empty_block, eval_right);
@@ -11325,12 +11119,6 @@
       BuildBinaryOperation(expr, left, right,
           ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
                                     : PUSH_BEFORE_SIMULATE);
-  if (is_tracking_positions() && result->IsBinaryOperation()) {
-    HBinaryOperation::cast(result)->SetOperandPositions(
-        zone(),
-        ScriptPositionToSourcePosition(expr->left()->position()),
-        ScriptPositionToSourcePosition(expr->right()->position()));
-  }
   return ast_context()->ReturnValue(result);
 }
 
@@ -11454,7 +11242,7 @@
 
     Callable callable = CodeFactory::InstanceOf(isolate());
     HValue* stub = Add<HConstant>(callable.code());
-    HValue* values[] = {context(), left, right};
+    HValue* values[] = {left, right};
     HCallWithDescriptor* result = New<HCallWithDescriptor>(
         stub, 0, callable.descriptor(), ArrayVector(values));
     result->set_type(HType::Boolean());
@@ -11463,7 +11251,7 @@
   } else if (op == Token::IN) {
     Callable callable = CodeFactory::HasProperty(isolate());
     HValue* stub = Add<HConstant>(callable.code());
-    HValue* values[] = {context(), left, right};
+    HValue* values[] = {left, right};
     HInstruction* result =
         New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
                                  Vector<HValue*>(values, arraysize(values)));
@@ -11515,27 +11303,35 @@
         // The caller expects a branch instruction, so make it happy.
         return New<HBranch>(graph()->GetConstantTrue());
       }
-      // Can we get away with map check and not instance type check?
-      HValue* operand_to_check =
-          left->block()->block_id() < right->block()->block_id() ? left : right;
-      if (combined_type->IsClass()) {
-        Handle<Map> map = combined_type->AsClass()->Map();
-        AddCheckMap(operand_to_check, map);
-        HCompareObjectEqAndBranch* result =
-            New<HCompareObjectEqAndBranch>(left, right);
-        if (is_tracking_positions()) {
-          result->set_operand_position(zone(), 0, left_position);
-          result->set_operand_position(zone(), 1, right_position);
+      if (op == Token::EQ) {
+        // For abstract equality we need to check both sides are receivers.
+        if (combined_type->IsClass()) {
+          Handle<Map> map = combined_type->AsClass()->Map();
+          AddCheckMap(left, map);
+          AddCheckMap(right, map);
+        } else {
+          BuildCheckHeapObject(left);
+          Add<HCheckInstanceType>(left, HCheckInstanceType::IS_JS_RECEIVER);
+          BuildCheckHeapObject(right);
+          Add<HCheckInstanceType>(right, HCheckInstanceType::IS_JS_RECEIVER);
         }
-        return result;
       } else {
-        BuildCheckHeapObject(operand_to_check);
-        Add<HCheckInstanceType>(operand_to_check,
-                                HCheckInstanceType::IS_JS_RECEIVER);
-        HCompareObjectEqAndBranch* result =
-            New<HCompareObjectEqAndBranch>(left, right);
-        return result;
+        // For strict equality we only need to check one side.
+        HValue* operand_to_check =
+            left->block()->block_id() < right->block()->block_id() ? left
+                                                                   : right;
+        if (combined_type->IsClass()) {
+          Handle<Map> map = combined_type->AsClass()->Map();
+          AddCheckMap(operand_to_check, map);
+        } else {
+          BuildCheckHeapObject(operand_to_check);
+          Add<HCheckInstanceType>(operand_to_check,
+                                  HCheckInstanceType::IS_JS_RECEIVER);
+        }
       }
+      HCompareObjectEqAndBranch* result =
+          New<HCompareObjectEqAndBranch>(left, right);
+      return result;
     } else {
       if (combined_type->IsClass()) {
         // TODO(bmeurer): This is an optimized version of an x < y, x > y,
@@ -11573,8 +11369,11 @@
           // We depend on the prototype chain to stay the same, because we
           // also need to deoptimize when someone installs @@toPrimitive
           // or @@toStringTag somewhere in the prototype chain.
-          BuildCheckPrototypeMaps(handle(JSObject::cast(map->prototype())),
-                                  Handle<JSObject>::null());
+          Handle<Object> prototype(map->prototype(), isolate());
+          if (prototype->IsJSObject()) {
+            BuildCheckPrototypeMaps(Handle<JSObject>::cast(prototype),
+                                    Handle<JSObject>::null());
+          }
           AddCheckMap(left, map);
           AddCheckMap(right, map);
           // The caller expects a branch instruction, so make it happy.
@@ -11666,9 +11465,6 @@
       HCompareNumericAndBranch* result =
           New<HCompareNumericAndBranch>(left, right, op);
       result->set_observed_input_representation(left_rep, right_rep);
-      if (is_tracking_positions()) {
-        result->SetOperandPositions(zone(), left_position, right_position);
-      }
       return result;
     }
   }
@@ -11967,7 +11763,7 @@
                         kind, ALLOW_RETURN_HOLE);
     HInstruction* store = Add<HStoreKeyed>(object_elements, key_constant,
                                            value_instruction, nullptr, kind);
-    store->SetFlag(HValue::kAllowUndefinedAsNaN);
+    store->SetFlag(HValue::kTruncatingToNumber);
   }
 }
 
@@ -12028,9 +11824,8 @@
   return Bailout(kSuperReference);
 }
 
-
 void HOptimizedGraphBuilder::VisitDeclarations(
-    ZoneList<Declaration*>* declarations) {
+    Declaration::List* declarations) {
   DCHECK(globals_.is_empty());
   AstVisitor<HOptimizedGraphBuilder>::VisitDeclarations(declarations);
   if (!globals_.is_empty()) {
@@ -12154,17 +11949,6 @@
   return ast_context()->ReturnControl(result, call->id());
 }
 
-
-void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 1);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  HValue* value = Pop();
-  HHasCachedArrayIndexAndBranch* result =
-      New<HHasCachedArrayIndexAndBranch>(value);
-  return ast_context()->ReturnControl(result, call->id());
-}
-
-
 void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) {
   DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12204,7 +11988,7 @@
   } else {
     Callable callable = CodeFactory::ToInteger(isolate());
     HValue* stub = Add<HConstant>(callable.code());
-    HValue* values[] = {context(), input};
+    HValue* values[] = {input};
     HInstruction* result = New<HCallWithDescriptor>(
         stub, 0, callable.descriptor(), ArrayVector(values));
     return ast_context()->ReturnInstruction(result, call->id());
@@ -12230,7 +12014,7 @@
   } else {
     Callable callable = CodeFactory::ToString(isolate());
     HValue* stub = Add<HConstant>(callable.code());
-    HValue* values[] = {context(), input};
+    HValue* values[] = {input};
     HInstruction* result = New<HCallWithDescriptor>(
         stub, 0, callable.descriptor(), ArrayVector(values));
     return ast_context()->ReturnInstruction(result, call->id());
@@ -12244,7 +12028,7 @@
   Callable callable = CodeFactory::ToLength(isolate());
   HValue* input = Pop();
   HValue* stub = Add<HConstant>(callable.code());
-  HValue* values[] = {context(), input};
+  HValue* values[] = {input};
   HInstruction* result = New<HCallWithDescriptor>(
       stub, 0, callable.descriptor(), ArrayVector(values));
   return ast_context()->ReturnInstruction(result, call->id());
@@ -12336,7 +12120,7 @@
   HValue* to = Pop();
   HValue* from = Pop();
   HValue* string = Pop();
-  HValue* values[] = {context(), string, from, to};
+  HValue* values[] = {string, from, to};
   HInstruction* result = New<HCallWithDescriptor>(
       stub, 0, callable.descriptor(), ArrayVector(values));
   result->set_type(HType::String());
@@ -12349,7 +12133,7 @@
   CHECK_ALIVE(VisitExpressions(call->arguments()));
   FastNewObjectStub stub(isolate());
   FastNewObjectDescriptor descriptor(isolate());
-  HValue* values[] = {context(), Pop(), Pop()};
+  HValue* values[] = {Pop(), Pop()};
   HConstant* stub_value = Add<HConstant>(stub.GetCode());
   HInstruction* result =
       New<HCallWithDescriptor>(stub_value, 0, descriptor, ArrayVector(values));
@@ -12366,48 +12150,13 @@
   HValue* subject = Pop();
   HValue* regexp_object = Pop();
   HValue* stub = Add<HConstant>(callable.code());
-  HValue* values[] = {context(), regexp_object, subject, index,
-                      last_match_info};
+  HValue* values[] = {regexp_object, subject, index, last_match_info};
   HInstruction* result = New<HCallWithDescriptor>(
       stub, 0, callable.descriptor(), ArrayVector(values));
   return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
-void HOptimizedGraphBuilder::GenerateRegExpFlags(CallRuntime* call) {
-  DCHECK_EQ(1, call->arguments()->length());
-  CHECK_ALIVE(VisitExpressions(call->arguments()));
-  HValue* regexp = Pop();
-  HInstruction* result =
-      New<HLoadNamedField>(regexp, nullptr, HObjectAccess::ForJSRegExpFlags());
-  return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateRegExpSource(CallRuntime* call) {
-  DCHECK_EQ(1, call->arguments()->length());
-  CHECK_ALIVE(VisitExpressions(call->arguments()));
-  HValue* regexp = Pop();
-  HInstruction* result =
-      New<HLoadNamedField>(regexp, nullptr, HObjectAccess::ForJSRegExpSource());
-  return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Construct a RegExp exec result with two in-object properties.
-void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
-  DCHECK_EQ(3, call->arguments()->length());
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
-  HValue* input = Pop();
-  HValue* index = Pop();
-  HValue* length = Pop();
-  HValue* result = BuildRegExpConstructResult(length, index, input);
-  return ast_context()->ReturnValue(result);
-}
-
-
 // Fast support for number to string.
 void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
   DCHECK_EQ(1, call->arguments()->length());
@@ -12426,8 +12175,7 @@
   PushArgumentsFromEnvironment(call->arguments()->length() - 1);
   HValue* trampoline = Add<HConstant>(isolate()->builtins()->Call());
   HValue* target = Pop();
-  HValue* values[] = {context(), target,
-                      Add<HConstant>(call->arguments()->length() - 2)};
+  HValue* values[] = {target, Add<HConstant>(call->arguments()->length() - 2)};
   HInstruction* result =
       New<HCallWithDescriptor>(trampoline, call->arguments()->length() - 1,
                                descriptor, ArrayVector(values));
@@ -12615,16 +12363,6 @@
   return ast_context()->ReturnValue(graph()->GetConstantUndefined());
 }
 
-
-void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 1);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  HValue* value = Pop();
-  HGetCachedArrayIndex* result = New<HGetCachedArrayIndex>(value);
-  return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
 void HOptimizedGraphBuilder::GenerateDebugBreakInOptimizedCode(
     CallRuntime* call) {
   Add<HDebugBreak>();
@@ -13095,11 +12833,11 @@
         PrintIndent();
         std::ostringstream os;
         os << "0 " << uses << " " << NameOf(instruction) << " " << *instruction;
-        if (instruction->has_position() && instruction->position().raw() != 0) {
+        if (instruction->has_position()) {
           const SourcePosition pos = instruction->position();
           os << " pos:";
-          if (pos.inlining_id() != 0) os << pos.inlining_id() << "_";
-          os << pos.position();
+          if (pos.isInlined()) os << "inlining(" << pos.InliningId() << "),";
+          os << pos.ScriptOffset();
         }
         os << " <|@\n";
         trace_.Add(os.str().c_str());
diff --git a/src/crankshaft/hydrogen.h b/src/crankshaft/hydrogen.h
index d2f1637..9f2508a 100644
--- a/src/crankshaft/hydrogen.h
+++ b/src/crankshaft/hydrogen.h
@@ -37,8 +37,8 @@
  public:
   explicit HCompilationJob(Handle<JSFunction> function)
       : CompilationJob(function->GetIsolate(), &info_, "Crankshaft"),
-        zone_(function->GetIsolate()->allocator()),
-        parse_info_(&zone_, function),
+        zone_(function->GetIsolate()->allocator(), ZONE_NAME),
+        parse_info_(&zone_, handle(function->shared())),
         info_(&parse_info_, function),
         graph_(nullptr),
         chunk_(nullptr) {}
@@ -318,12 +318,6 @@
   HStackCheck* stack_check_;
 };
 
-struct HInlinedFunctionInfo {
-  explicit HInlinedFunctionInfo(int start_position)
-      : start_position(start_position) {}
-  int start_position;
-};
-
 class HGraph final : public ZoneObject {
  public:
   explicit HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor);
@@ -475,14 +469,6 @@
   int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
                            SourcePosition position);
 
-  // Converts given SourcePosition to the absolute offset from the start of
-  // the corresponding script.
-  int SourcePositionToScriptPosition(SourcePosition position);
-
-  ZoneVector<HInlinedFunctionInfo>& inlined_function_infos() {
-    return inlined_function_infos_;
-  }
-
  private:
   HConstant* ReinsertConstantIfNecessary(HConstant* constant);
   HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
@@ -528,8 +514,6 @@
   int no_side_effects_scope_count_;
   bool disallow_adding_new_values_;
 
-  ZoneVector<HInlinedFunctionInfo> inlined_function_infos_;
-
   DISALLOW_COPY_AND_ASSIGN(HGraph);
 };
 
@@ -1073,7 +1057,6 @@
         current_block_(NULL),
         scope_(info->scope()),
         position_(SourcePosition::Unknown()),
-        start_position_(0),
         track_positions_(track_positions) {}
   virtual ~HGraphBuilder() {}
 
@@ -1410,28 +1393,6 @@
   HValue* BuildToNumber(HValue* input);
   HValue* BuildToObject(HValue* receiver);
 
-  void BuildJSObjectCheck(HValue* receiver,
-                          int bit_field_mask);
-
-  // Checks a key value that's being used for a keyed element access context. If
-  // the key is a index, i.e. a smi or a number in a unique string with a cached
-  // numeric value, the "true" of the continuation is joined. Otherwise,
-  // if the key is a name or a unique string, the "false" of the continuation is
-  // joined. Otherwise, a deoptimization is triggered. In both paths of the
-  // continuation, the key is pushed on the top of the environment.
-  void BuildKeyedIndexCheck(HValue* key,
-                            HIfContinuation* join_continuation);
-
-  // Checks the properties of an object if they are in dictionary case, in which
-  // case "true" of continuation is taken, otherwise the "false"
-  void BuildTestForDictionaryProperties(HValue* object,
-                                        HIfContinuation* continuation);
-
-  void BuildNonGlobalObjectCheck(HValue* receiver);
-
-  HValue* BuildKeyedLookupCacheHash(HValue* object,
-                                    HValue* key);
-
   HValue* BuildUncheckedDictionaryElementLoad(HValue* receiver,
                                               HValue* elements, HValue* key,
                                               HValue* hash);
@@ -1439,10 +1400,6 @@
   // ES6 section 7.4.7 CreateIterResultObject ( value, done )
   HValue* BuildCreateIterResultObject(HValue* value, HValue* done);
 
-  HValue* BuildRegExpConstructResult(HValue* length,
-                                     HValue* index,
-                                     HValue* input);
-
   // Allocates a new object according with the given allocation properties.
   HAllocate* BuildAllocate(HValue* object_size,
                            HType type,
@@ -1870,28 +1827,25 @@
  protected:
   void SetSourcePosition(int position) {
     if (position != kNoSourcePosition) {
-      position_.set_position(position - start_position_);
+      position_.SetScriptOffset(position);
     }
     // Otherwise position remains unknown.
   }
 
-  void EnterInlinedSource(int start_position, int id) {
+  void EnterInlinedSource(int inlining_id) {
     if (is_tracking_positions()) {
-      start_position_ = start_position;
-      position_.set_inlining_id(id);
+      position_.SetInliningId(inlining_id);
     }
   }
 
   // Convert the given absolute offset from the start of the script to
   // the SourcePosition assuming that this position corresponds to the
-  // same function as current position_.
+  // same function as position_.
   SourcePosition ScriptPositionToSourcePosition(int position) {
     if (position == kNoSourcePosition) {
       return SourcePosition::Unknown();
     }
-    SourcePosition pos = position_;
-    pos.set_position(position - start_position_);
-    return pos;
+    return SourcePosition(position, position_.InliningId());
   }
 
   SourcePosition source_position() { return position_; }
@@ -1899,8 +1853,8 @@
 
   bool is_tracking_positions() { return track_positions_; }
 
-  int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
-                           SourcePosition position);
+  void TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
+                            SourcePosition position, int inlining_id);
 
   HValue* BuildAllocateEmptyArrayBuffer(HValue* byte_length);
   template <typename ViewClass>
@@ -1923,7 +1877,6 @@
   HBasicBlock* current_block_;
   Scope* scope_;
   SourcePosition position_;
-  int start_position_;
   bool track_positions_;
 };
 
@@ -2142,7 +2095,7 @@
 
   FunctionState* function_state() const { return function_state_; }
 
-  void VisitDeclarations(ZoneList<Declaration*>* declarations);
+  void VisitDeclarations(Declaration::List* declarations);
 
   AstTypeBounds* bounds() { return &bounds_; }
 
@@ -2220,15 +2173,10 @@
   F(ToLength)                          \
   F(ToNumber)                          \
   F(IsJSReceiver)                      \
-  F(HasCachedArrayIndex)               \
-  F(GetCachedArrayIndex)               \
   F(DebugBreakInOptimizedCode)         \
   F(StringCharCodeAt)                  \
   F(SubString)                         \
   F(RegExpExec)                        \
-  F(RegExpConstructResult)             \
-  F(RegExpFlags)                       \
-  F(RegExpSource)                      \
   F(NumberToString)                    \
   F(DebugIsActive)                     \
   /* Typed Arrays */                   \
@@ -2364,13 +2312,15 @@
 #undef DECLARE_VISIT
 
  private:
-  // Helpers for flow graph construction.
-  enum GlobalPropertyAccess {
-    kUseCell,
-    kUseGeneric
-  };
-  GlobalPropertyAccess LookupGlobalProperty(Variable* var, LookupIterator* it,
-                                            PropertyAccessType access_type);
+  bool CanInlineGlobalPropertyAccess(Variable* var, LookupIterator* it,
+                                     PropertyAccessType access_type);
+
+  bool CanInlineGlobalPropertyAccess(LookupIterator* it,
+                                     PropertyAccessType access_type);
+
+  void InlineGlobalPropertyLoad(LookupIterator* it, BailoutId ast_id);
+  HInstruction* InlineGlobalPropertyStore(LookupIterator* it, HValue* value,
+                                          BailoutId ast_id);
 
   void EnsureArgumentsArePushedForAccess();
   bool TryArgumentsAccess(Property* expr);
@@ -2716,8 +2666,7 @@
       HValue* left,
       HValue* right,
       PushBeforeSimulateBehavior push_sim_result);
-  HInstruction* BuildIncrement(bool returns_original_input,
-                               CountOperation* expr);
+  HInstruction* BuildIncrement(CountOperation* expr);
   HInstruction* BuildKeyedGeneric(PropertyAccessType access_type,
                                   Expression* expr, FeedbackVectorSlot slot,
                                   HValue* object, HValue* key, HValue* value);
@@ -2829,6 +2778,8 @@
 
   bool CanBeFunctionApplyArguments(Call* expr);
 
+  bool IsAnyParameterContextAllocated();
+
   // The translation state of the currently-being-translated function.
   FunctionState* function_state_;
 
diff --git a/src/crankshaft/ia32/lithium-codegen-ia32.cc b/src/crankshaft/ia32/lithium-codegen-ia32.cc
index 6c121dd..d9044ca 100644
--- a/src/crankshaft/ia32/lithium-codegen-ia32.cc
+++ b/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -327,8 +327,7 @@
 
       HValue* value =
           instructions_->at(code->instruction_index())->hydrogen_value();
-      RecordAndWritePosition(
-          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+      RecordAndWritePosition(value->position());
 
       Comment(";;; <@%d,#%d> "
               "-------------------- Deferred %s --------------------",
@@ -1854,16 +1853,15 @@
       __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
       EmitBranch(instr, not_equal);
     } else {
-      ToBooleanICStub::Types expected =
-          instr->hydrogen()->expected_input_types();
-      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+      ToBooleanHints expected = instr->hydrogen()->expected_input_types();
+      if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
 
-      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+      if (expected & ToBooleanHint::kUndefined) {
         // undefined -> false.
         __ cmp(reg, factory()->undefined_value());
         __ j(equal, instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+      if (expected & ToBooleanHint::kBoolean) {
         // true -> true.
         __ cmp(reg, factory()->true_value());
         __ j(equal, instr->TrueLabel(chunk_));
@@ -1871,30 +1869,30 @@
         __ cmp(reg, factory()->false_value());
         __ j(equal, instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+      if (expected & ToBooleanHint::kNull) {
         // 'null' -> false.
         __ cmp(reg, factory()->null_value());
         __ j(equal, instr->FalseLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::SMI)) {
+      if (expected & ToBooleanHint::kSmallInteger) {
         // Smis: 0 -> false, all other -> true.
         __ test(reg, Operand(reg));
         __ j(equal, instr->FalseLabel(chunk_));
         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
-      } else if (expected.NeedsMap()) {
+      } else if (expected & ToBooleanHint::kNeedsMap) {
         // If we need a map later and have a Smi -> deopt.
         __ test(reg, Immediate(kSmiTagMask));
         DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
       }
 
       Register map = no_reg;  // Keep the compiler happy.
-      if (expected.NeedsMap()) {
+      if (expected & ToBooleanHint::kNeedsMap) {
         map = ToRegister(instr->temp());
         DCHECK(!map.is(reg));
         __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
 
-        if (expected.CanBeUndetectable()) {
+        if (expected & ToBooleanHint::kCanBeUndetectable) {
           // Undetectable -> false.
           __ test_b(FieldOperand(map, Map::kBitFieldOffset),
                     Immediate(1 << Map::kIsUndetectable));
@@ -1902,13 +1900,13 @@
         }
       }
 
-      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+      if (expected & ToBooleanHint::kReceiver) {
         // spec object -> true.
         __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
         __ j(above_equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::STRING)) {
+      if (expected & ToBooleanHint::kString) {
         // String value -> false iff empty.
         Label not_string;
         __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
@@ -1919,19 +1917,19 @@
         __ bind(&not_string);
       }
 
-      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+      if (expected & ToBooleanHint::kSymbol) {
         // Symbol value -> true.
         __ CmpInstanceType(map, SYMBOL_TYPE);
         __ j(equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+      if (expected & ToBooleanHint::kSimdValue) {
         // SIMD value -> true.
         __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
         __ j(equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+      if (expected & ToBooleanHint::kHeapNumber) {
         // heap number -> false iff +0, -0, or NaN.
         Label not_heap_number;
         __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
@@ -1945,7 +1943,7 @@
         __ bind(&not_heap_number);
       }
 
-      if (!expected.IsGeneric()) {
+      if (expected != ToBooleanHint::kAny) {
         // We've seen something for the first time -> deopt.
         // This can only happen if we are not generic already.
         DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
@@ -2192,28 +2190,6 @@
   EmitBranch(instr, BranchCondition(instr->hydrogen()));
 }
 
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
-  Register input = ToRegister(instr->value());
-  Register result = ToRegister(instr->result());
-
-  __ AssertString(input);
-
-  __ mov(result, FieldOperand(input, String::kHashFieldOffset));
-  __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
-    LHasCachedArrayIndexAndBranch* instr) {
-  Register input = ToRegister(instr->value());
-
-  __ test(FieldOperand(input, String::kHashFieldOffset),
-          Immediate(String::kContainsCachedArrayIndexMask));
-  EmitBranch(instr, equal);
-}
-
-
 // Branches to a label or falls through with the answer in the z flag.  Trashes
 // the temp registers, but not the input.
 void LCodeGen::EmitClassOfTest(Label* is_true,
@@ -2380,35 +2356,6 @@
 }
 
 
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = LoadWithVectorDescriptor::SlotRegister();
-  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
-  DCHECK(slot_register.is(eax));
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ mov(vector_register, vector);
-  // No need to allocate this register.
-  FeedbackVectorSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ mov(slot_register, Immediate(Smi::FromInt(index)));
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->result()).is(eax));
-
-  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
-          .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
@@ -2512,18 +2459,6 @@
 }
 
 
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->result()).is(eax));
-
-  __ mov(LoadDescriptor::NameRegister(), instr->name());
-  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   Register function = ToRegister(instr->function());
   Register temp = ToRegister(instr->temp());
@@ -2690,11 +2625,11 @@
     __ j(not_equal, &done);
     if (info()->IsStub()) {
       // A stub can safely convert the hole to undefined only if the array
-      // protector cell contains (Smi) Isolate::kArrayProtectorValid.
+      // protector cell contains (Smi) Isolate::kProtectorValid.
       // Otherwise it needs to bail out.
       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
       __ cmp(FieldOperand(result, PropertyCell::kValueOffset),
-             Immediate(Smi::FromInt(Isolate::kArrayProtectorValid)));
+             Immediate(Smi::FromInt(Isolate::kProtectorValid)));
       DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
     }
     __ mov(result, isolate()->factory()->undefined_value());
@@ -2745,18 +2680,6 @@
 }
 
 
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
-
-  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   Register result = ToRegister(instr->result());
 
@@ -4317,8 +4240,7 @@
 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
                                 Register temp_reg, XMMRegister result_reg,
                                 NumberUntagDMode mode) {
-  bool can_convert_undefined_to_nan =
-      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool can_convert_undefined_to_nan = instr->truncating();
   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
 
   Label convert, load_smi, done;
@@ -4384,34 +4306,18 @@
   __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
 
   if (instr->truncating()) {
-    Label no_heap_number, check_bools, check_false;
-
-    // Heap number map check.
+    Label truncate;
+    Label::Distance truncate_distance =
+        DeoptEveryNTimes() ? Label::kFar : Label::kNear;
     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
            factory()->heap_number_map());
-    __ j(not_equal, &no_heap_number, Label::kNear);
+    __ j(equal, &truncate, truncate_distance);
+    __ push(input_reg);
+    __ CmpObjectType(input_reg, ODDBALL_TYPE, input_reg);
+    __ pop(input_reg);
+    DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotANumberOrOddball);
+    __ bind(&truncate);
     __ TruncateHeapNumberToI(input_reg, input_reg);
-    __ jmp(done);
-
-    __ bind(&no_heap_number);
-    // Check for Oddballs. Undefined/False is converted to zero and True to one
-    // for truncating conversions.
-    __ cmp(input_reg, factory()->undefined_value());
-    __ j(not_equal, &check_bools, Label::kNear);
-    __ Move(input_reg, Immediate(0));
-    __ jmp(done);
-
-    __ bind(&check_bools);
-    __ cmp(input_reg, factory()->true_value());
-    __ j(not_equal, &check_false, Label::kNear);
-    __ Move(input_reg, Immediate(1));
-    __ jmp(done);
-
-    __ bind(&check_false);
-    __ cmp(input_reg, factory()->false_value());
-    DeoptimizeIf(not_equal, instr,
-                 DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
-    __ Move(input_reg, Immediate(0));
   } else {
     XMMRegister scratch = ToDoubleRegister(instr->temp());
     DCHECK(!scratch.is(xmm0));
@@ -4844,7 +4750,7 @@
   // TODO(3095996): Get rid of this. For now, we need to make the
   // result register contain a valid pointer because it is already
   // contained in the register pointer map.
-  __ Move(result, Immediate(Smi::FromInt(0)));
+  __ Move(result, Immediate(Smi::kZero));
 
   PushSafepointRegistersScope scope(this);
   if (instr->size()->IsRegister()) {
@@ -5148,7 +5054,7 @@
   Register result = ToRegister(instr->result());
   Label load_cache, done;
   __ EnumLength(result, map);
-  __ cmp(result, Immediate(Smi::FromInt(0)));
+  __ cmp(result, Immediate(Smi::kZero));
   __ j(not_equal, &load_cache, Label::kNear);
   __ mov(result, isolate()->factory()->empty_fixed_array());
   __ jmp(&done, Label::kNear);
diff --git a/src/crankshaft/ia32/lithium-ia32.cc b/src/crankshaft/ia32/lithium-ia32.cc
index e6077cc..7272a91 100644
--- a/src/crankshaft/ia32/lithium-ia32.cc
+++ b/src/crankshaft/ia32/lithium-ia32.cc
@@ -224,14 +224,6 @@
   stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
 }
 
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if has_cached_array_index(");
-  value()->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
 void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if class_of_test(");
   value()->PrintTo(stream);
@@ -908,16 +900,18 @@
   HValue* value = instr->value();
   Representation r = value->representation();
   HType type = value->type();
-  ToBooleanICStub::Types expected = instr->expected_input_types();
-  if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+  ToBooleanHints expected = instr->expected_input_types();
+  if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
 
   bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
       type.IsJSArray() || type.IsHeapNumber() || type.IsString();
-  LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
+  LOperand* temp = !easy_case && (expected & ToBooleanHint::kNeedsMap)
+                       ? TempRegister()
+                       : NULL;
   LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp);
-  if (!easy_case &&
-      ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
-       !expected.IsGeneric())) {
+  if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
+                      (expected & ToBooleanHint::kNeedsMap)) ||
+                     expected != ToBooleanHint::kAny)) {
     branch = AssignEnvironment(branch);
   }
   return branch;
@@ -1700,24 +1694,6 @@
       TempRegister());
 }
 
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
-    HGetCachedArrayIndex* instr)  {
-  DCHECK(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-
-  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
-    HHasCachedArrayIndexAndBranch* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  return new(zone()) LHasCachedArrayIndexAndBranch(
-      UseRegisterAtStart(instr->value()));
-}
-
-
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
   DCHECK(instr->value()->representation().IsTagged());
@@ -2007,15 +1983,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
   LInstruction* result =
@@ -2055,17 +2022,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
-      context, object, vector);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
     HLoadFunctionPrototype* instr) {
   return AssignEnvironment(DefineAsRegister(
@@ -2124,18 +2080,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  LLoadKeyedGeneric* result =
-      new(zone()) LLoadKeyedGeneric(context, object, key, vector);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
 LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
   ElementsKind elements_kind = instr->elements_kind();
 
@@ -2470,7 +2414,6 @@
   inner->BindContext(instr->closure_context());
   inner->set_entry(instr);
   current_block_->UpdateEnvironment(inner);
-  chunk_->AddInlinedFunction(instr->shared());
   return NULL;
 }
 
diff --git a/src/crankshaft/ia32/lithium-ia32.h b/src/crankshaft/ia32/lithium-ia32.h
index 816d8fd..13ad4bd 100644
--- a/src/crankshaft/ia32/lithium-ia32.h
+++ b/src/crankshaft/ia32/lithium-ia32.h
@@ -75,9 +75,7 @@
   V(FlooringDivI)                            \
   V(ForInCacheArray)                         \
   V(ForInPrepareMap)                         \
-  V(GetCachedArrayIndex)                     \
   V(Goto)                                    \
-  V(HasCachedArrayIndexAndBranch)            \
   V(HasInPrototypeChainAndBranch)            \
   V(HasInstanceTypeAndBranch)                \
   V(InnerAllocatedObject)                    \
@@ -92,11 +90,8 @@
   V(LoadContextSlot)                         \
   V(LoadFieldByIndex)                        \
   V(LoadFunctionPrototype)                   \
-  V(LoadGlobalGeneric)                       \
   V(LoadKeyed)                               \
-  V(LoadKeyedGeneric)                        \
   V(LoadNamedField)                          \
-  V(LoadNamedGeneric)                        \
   V(LoadRoot)                                \
   V(MathAbs)                                 \
   V(MathClz32)                               \
@@ -1080,35 +1075,6 @@
   void PrintDataTo(StringStream* stream) override;
 };
 
-
-class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LGetCachedArrayIndex(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
-  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
- public:
-  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
-                               "has-cached-array-index-and-branch")
-
-  void PrintDataTo(StringStream* stream) override;
-};
-
-
 class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
  public:
   LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
@@ -1493,25 +1459,6 @@
 };
 
 
-class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
- public:
-  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
 class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 1> {
  public:
   LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
@@ -1576,43 +1523,6 @@
 }
 
 
-class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
- public:
-  LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
-                    LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = obj;
-    inputs_[2] = key;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
- public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
-    inputs_[0] = context;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-  TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
-};
-
-
 class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadContextSlot(LOperand* context) {
@@ -1975,6 +1885,8 @@
 
   DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
   DECLARE_HYDROGEN_ACCESSOR(Change);
+
+  bool truncating() { return hydrogen()->CanTruncateToNumber(); }
 };
 
 
diff --git a/src/crankshaft/lithium-allocator.cc b/src/crankshaft/lithium-allocator.cc
index d17cd27..aa4459b 100644
--- a/src/crankshaft/lithium-allocator.cc
+++ b/src/crankshaft/lithium-allocator.cc
@@ -513,7 +513,7 @@
 }
 
 LAllocator::LAllocator(int num_values, HGraph* graph)
-    : zone_(graph->isolate()->allocator()),
+    : zone_(graph->isolate()->allocator(), ZONE_NAME),
       chunk_(NULL),
       live_in_sets_(graph->blocks()->length(), zone()),
       live_ranges_(num_values * 2, zone()),
diff --git a/src/crankshaft/lithium-codegen.cc b/src/crankshaft/lithium-codegen.cc
index decc2a5..2d16560 100644
--- a/src/crankshaft/lithium-codegen.cc
+++ b/src/crankshaft/lithium-codegen.cc
@@ -103,9 +103,8 @@
     GenerateBodyInstructionPre(instr);
 
     HValue* value = instr->hydrogen_value();
-    if (!value->position().IsUnknown()) {
-      RecordAndWritePosition(
-        chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+    if (value->position().IsKnown()) {
+      RecordAndWritePosition(value->position());
     }
 
     instr->CompileToNative(codegen);
@@ -141,8 +140,8 @@
 #endif
 }
 
-void LCodeGenBase::RecordAndWritePosition(int pos) {
-  if (pos == kNoSourcePosition) return;
+void LCodeGenBase::RecordAndWritePosition(SourcePosition pos) {
+  if (!pos.IsKnown()) return;
   source_position_table_builder_.AddPosition(masm_->pc_offset(), pos, false);
 }
 
@@ -167,8 +166,7 @@
 void LCodeGenBase::DeoptComment(const Deoptimizer::DeoptInfo& deopt_info) {
   SourcePosition position = deopt_info.position;
   int deopt_id = deopt_info.deopt_id;
-  int raw_position = position.IsUnknown() ? 0 : position.raw();
-  masm()->RecordDeoptReason(deopt_info.deopt_reason, raw_position, deopt_id);
+  masm()->RecordDeoptReason(deopt_info.deopt_reason, position, deopt_id);
 }
 
 
@@ -311,6 +309,26 @@
   }
 }
 
+namespace {
+
+Handle<PodArray<InliningPosition>> CreateInliningPositions(
+    CompilationInfo* info) {
+  const CompilationInfo::InlinedFunctionList& inlined_functions =
+      info->inlined_functions();
+  if (inlined_functions.size() == 0) {
+    return Handle<PodArray<InliningPosition>>::cast(
+        info->isolate()->factory()->empty_byte_array());
+  }
+  Handle<PodArray<InliningPosition>> inl_positions =
+      PodArray<InliningPosition>::New(
+          info->isolate(), static_cast<int>(inlined_functions.size()), TENURED);
+  for (size_t i = 0; i < inlined_functions.size(); ++i) {
+    inl_positions->set(static_cast<int>(i), inlined_functions[i].position);
+  }
+  return inl_positions;
+}
+
+}  // namespace
 
 void LCodeGenBase::PopulateDeoptimizationData(Handle<Code> code) {
   int length = deoptimizations_.length();
@@ -328,9 +346,9 @@
     AllowDeferredHandleDereference allow_handle_dereference;
     data->SetSharedFunctionInfo(*info_->shared_info());
   } else {
-    data->SetSharedFunctionInfo(Smi::FromInt(0));
+    data->SetSharedFunctionInfo(Smi::kZero);
   }
-  data->SetWeakCellCache(Smi::FromInt(0));
+  data->SetWeakCellCache(Smi::kZero);
 
   Handle<FixedArray> literals =
       factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -342,6 +360,9 @@
     data->SetLiteralArray(*literals);
   }
 
+  Handle<PodArray<InliningPosition>> inl_pos = CreateInliningPositions(info_);
+  data->SetInliningPositions(*inl_pos);
+
   data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
   data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
 
@@ -360,16 +381,22 @@
 
 void LCodeGenBase::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
   DCHECK_EQ(0, deoptimization_literals_.length());
-  for (Handle<SharedFunctionInfo> function : chunk()->inlined_functions()) {
-    DefineDeoptimizationLiteral(function);
+  for (CompilationInfo::InlinedFunctionHolder& inlined :
+       info()->inlined_functions()) {
+    if (!inlined.shared_info.is_identical_to(info()->shared_info())) {
+      int index = DefineDeoptimizationLiteral(inlined.shared_info);
+      inlined.RegisterInlinedFunctionId(index);
+    }
   }
   inlined_function_count_ = deoptimization_literals_.length();
 
   // Define deoptimization literals for all unoptimized code objects of inlined
   // functions. This ensures unoptimized code is kept alive by optimized code.
-  AllowDeferredHandleDereference allow_shared_function_info_dereference;
-  for (Handle<SharedFunctionInfo> function : chunk()->inlined_functions()) {
-    DefineDeoptimizationLiteral(handle(function->code()));
+  for (const CompilationInfo::InlinedFunctionHolder& inlined :
+       info()->inlined_functions()) {
+    if (!inlined.shared_info.is_identical_to(info()->shared_info())) {
+      DefineDeoptimizationLiteral(inlined.inlined_code_object_root);
+    }
   }
 }
 
diff --git a/src/crankshaft/lithium-codegen.h b/src/crankshaft/lithium-codegen.h
index c6bf447..03ece53 100644
--- a/src/crankshaft/lithium-codegen.h
+++ b/src/crankshaft/lithium-codegen.h
@@ -50,7 +50,7 @@
   virtual void GenerateBodyInstructionPost(LInstruction* instr) {}
 
   virtual void EnsureSpaceForLazyDeopt(int space_needed) = 0;
-  void RecordAndWritePosition(int position);
+  void RecordAndWritePosition(SourcePosition position);
 
   int GetNextEmittedBlock() const;
 
diff --git a/src/crankshaft/lithium.cc b/src/crankshaft/lithium.cc
index 8cf3a3f..94d6041 100644
--- a/src/crankshaft/lithium.cc
+++ b/src/crankshaft/lithium.cc
@@ -5,6 +5,7 @@
 #include "src/crankshaft/lithium.h"
 
 #include "src/ast/scopes.h"
+#include "src/codegen.h"
 
 #if V8_TARGET_ARCH_IA32
 #include "src/crankshaft/ia32/lithium-ia32.h"  // NOLINT
@@ -260,7 +261,6 @@
       graph_(graph),
       instructions_(32, info->zone()),
       pointer_maps_(8, info->zone()),
-      inlined_functions_(1, info->zone()),
       deprecation_dependencies_(32, info->zone()),
       stability_dependencies_(8, info->zone()) {}
 
@@ -468,8 +468,6 @@
     code->set_is_crankshafted(true);
 
     CodeGenerator::PrintCode(code, info());
-    DCHECK(!(info()->GetMustNotHaveEagerFrame() &&
-             generator.NeedsEagerFrame()));
     return code;
   }
   assembler.AbortedCodeGeneration();
diff --git a/src/crankshaft/lithium.h b/src/crankshaft/lithium.h
index d04bd56..d67c490 100644
--- a/src/crankshaft/lithium.h
+++ b/src/crankshaft/lithium.h
@@ -660,14 +660,6 @@
   int LookupDestination(int block_id) const;
   Label* GetAssemblyLabel(int block_id) const;
 
-  const ZoneList<Handle<SharedFunctionInfo>>& inlined_functions() const {
-    return inlined_functions_;
-  }
-
-  void AddInlinedFunction(Handle<SharedFunctionInfo> closure) {
-    inlined_functions_.Add(closure, zone());
-  }
-
   void AddDeprecationDependency(Handle<Map> map) {
     DCHECK(!map->is_deprecated());
     if (!map->CanBeDeprecated()) return;
@@ -705,7 +697,6 @@
   BitVector* allocated_double_registers_;
   ZoneList<LInstruction*> instructions_;
   ZoneList<LPointerMap*> pointer_maps_;
-  ZoneList<Handle<SharedFunctionInfo>> inlined_functions_;
   ZoneList<Handle<Map>> deprecation_dependencies_;
   ZoneList<Handle<Map>> stability_dependencies_;
 };
diff --git a/src/crankshaft/mips/lithium-codegen-mips.cc b/src/crankshaft/mips/lithium-codegen-mips.cc
index b24b1c5..abbf208 100644
--- a/src/crankshaft/mips/lithium-codegen-mips.cc
+++ b/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -61,6 +61,25 @@
   Safepoint::DeoptMode deopt_mode_;
 };
 
+LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
+    LCodeGen* codegen)
+    : codegen_(codegen) {
+  DCHECK(codegen_->info()->is_calling());
+  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+  codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+
+  StoreRegistersStateStub stub(codegen_->isolate());
+  codegen_->masm_->push(ra);
+  codegen_->masm_->CallStub(&stub);
+}
+
+LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
+  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+  RestoreRegistersStateStub stub(codegen_->isolate());
+  codegen_->masm_->push(ra);
+  codegen_->masm_->CallStub(&stub);
+  codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+}
 
 #define __ masm()->
 
@@ -267,8 +286,7 @@
 
       HValue* value =
           instructions_->at(code->instruction_index())->hydrogen_value();
-      RecordAndWritePosition(
-          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+      RecordAndWritePosition(value->position());
 
       Comment(";;; <@%d,#%d> "
               "-------------------- Deferred %s --------------------",
@@ -1935,43 +1953,42 @@
       __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
       EmitBranch(instr, ne, at, Operand(zero_reg));
     } else {
-      ToBooleanICStub::Types expected =
-          instr->hydrogen()->expected_input_types();
+      ToBooleanHints expected = instr->hydrogen()->expected_input_types();
       // Avoid deopts in the case where we've never executed this path before.
-      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+      if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
 
-      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+      if (expected & ToBooleanHint::kUndefined) {
         // undefined -> false.
         __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
       }
-      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+      if (expected & ToBooleanHint::kBoolean) {
         // Boolean -> its value.
         __ LoadRoot(at, Heap::kTrueValueRootIndex);
         __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
         __ LoadRoot(at, Heap::kFalseValueRootIndex);
         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
       }
-      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+      if (expected & ToBooleanHint::kNull) {
         // 'null' -> false.
         __ LoadRoot(at, Heap::kNullValueRootIndex);
         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
       }
 
-      if (expected.Contains(ToBooleanICStub::SMI)) {
+      if (expected & ToBooleanHint::kSmallInteger) {
         // Smis: 0 -> false, all other -> true.
         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
-      } else if (expected.NeedsMap()) {
+      } else if (expected & ToBooleanHint::kNeedsMap) {
         // If we need a map later and have a Smi -> deopt.
         __ SmiTst(reg, at);
         DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
       }
 
       const Register map = scratch0();
-      if (expected.NeedsMap()) {
+      if (expected & ToBooleanHint::kNeedsMap) {
         __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
-        if (expected.CanBeUndetectable()) {
+        if (expected & ToBooleanHint::kCanBeUndetectable) {
           // Undetectable -> false.
           __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
           __ And(at, at, Operand(1 << Map::kIsUndetectable));
@@ -1979,14 +1996,14 @@
         }
       }
 
-      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+      if (expected & ToBooleanHint::kReceiver) {
         // spec object -> true.
         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
         __ Branch(instr->TrueLabel(chunk_),
                   ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
       }
 
-      if (expected.Contains(ToBooleanICStub::STRING)) {
+      if (expected & ToBooleanHint::kString) {
         // String value -> false iff empty.
         Label not_string;
         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -1997,14 +2014,14 @@
         __ bind(&not_string);
       }
 
-      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+      if (expected & ToBooleanHint::kSymbol) {
         // Symbol value -> true.
         const Register scratch = scratch1();
         __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
         __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
       }
 
-      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+      if (expected & ToBooleanHint::kSimdValue) {
         // SIMD value -> true.
         const Register scratch = scratch1();
         __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -2012,7 +2029,7 @@
                   Operand(SIMD128_VALUE_TYPE));
       }
 
-      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+      if (expected & ToBooleanHint::kHeapNumber) {
         // heap number -> false iff +0, -0, or NaN.
         DoubleRegister dbl_scratch = double_scratch0();
         Label not_heap_number;
@@ -2026,7 +2043,7 @@
         __ bind(&not_heap_number);
       }
 
-      if (!expected.IsGeneric()) {
+      if (expected != ToBooleanHint::kAny) {
         // We've seen something for the first time -> deopt.
         // This can only happen if we are not generic already.
         DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject, zero_reg,
@@ -2286,30 +2303,6 @@
              Operand(TestType(instr->hydrogen())));
 }
 
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
-  Register input = ToRegister(instr->value());
-  Register result = ToRegister(instr->result());
-
-  __ AssertString(input);
-
-  __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
-  __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
-    LHasCachedArrayIndexAndBranch* instr) {
-  Register input = ToRegister(instr->value());
-  Register scratch = scratch0();
-
-  __ lw(scratch,
-         FieldMemOperand(input, String::kHashFieldOffset));
-  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
-  EmitBranch(instr, eq, at, Operand(zero_reg));
-}
-
-
 // Branches to a label or falls through with the answer in flags.  Trashes
 // the temp registers, but not the input.
 void LCodeGen::EmitClassOfTest(Label* is_true,
@@ -2482,35 +2475,6 @@
 }
 
 
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = LoadWithVectorDescriptor::SlotRegister();
-  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
-  DCHECK(slot_register.is(a0));
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ li(vector_register, vector);
-  // No need to allocate this register.
-  FeedbackVectorSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ li(slot_register, Operand(Smi::FromInt(index)));
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->result()).is(v0));
-
-  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
-          .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
@@ -2597,19 +2561,6 @@
 }
 
 
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->result()).is(v0));
-
-  // Name is always in a2.
-  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
-  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   Register scratch = scratch0();
   Register function = ToRegister(instr->function());
@@ -2845,12 +2796,12 @@
     __ Branch(&done, ne, result, Operand(scratch));
     if (info()->IsStub()) {
       // A stub can safely convert the hole to undefined only if the array
-      // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+      // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
       // it needs to bail out.
       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
       __ lw(result, FieldMemOperand(result, Cell::kValueOffset));
       DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
-                   Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
+                   Operand(Smi::FromInt(Isolate::kProtectorValid)));
     }
     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
     __ bind(&done);
@@ -2906,18 +2857,6 @@
 }
 
 
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
-
-  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   Register scratch = scratch0();
   Register temp = scratch1();
@@ -4495,8 +4434,7 @@
 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
                                 DoubleRegister result_reg,
                                 NumberUntagDMode mode) {
-  bool can_convert_undefined_to_nan =
-      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool can_convert_undefined_to_nan = instr->truncating();
   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
 
   Register scratch = scratch0();
@@ -4566,36 +4504,14 @@
   // of the if.
 
   if (instr->truncating()) {
-    // Performs a truncating conversion of a floating point number as used by
-    // the JS bitwise operations.
-    Label no_heap_number, check_bools, check_false;
-    // Check HeapNumber map.
-    __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
+    Label truncate;
+    __ Branch(USE_DELAY_SLOT, &truncate, eq, scratch1, Operand(at));
     __ mov(scratch2, input_reg);  // In delay slot.
+    __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball, scratch1,
+                 Operand(ODDBALL_TYPE));
+    __ bind(&truncate);
     __ TruncateHeapNumberToI(input_reg, scratch2);
-    __ Branch(&done);
-
-    // Check for Oddballs. Undefined/False is converted to zero and True to one
-    // for truncating conversions.
-    __ bind(&no_heap_number);
-    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-    __ Branch(&check_bools, ne, input_reg, Operand(at));
-    DCHECK(ToRegister(instr->result()).is(input_reg));
-    __ Branch(USE_DELAY_SLOT, &done);
-    __ mov(input_reg, zero_reg);  // In delay slot.
-
-    __ bind(&check_bools);
-    __ LoadRoot(at, Heap::kTrueValueRootIndex);
-    __ Branch(&check_false, ne, scratch2, Operand(at));
-    __ Branch(USE_DELAY_SLOT, &done);
-    __ li(input_reg, Operand(1));  // In delay slot.
-
-    __ bind(&check_false);
-    __ LoadRoot(at, Heap::kFalseValueRootIndex);
-    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean,
-                 scratch2, Operand(at));
-    __ Branch(USE_DELAY_SLOT, &done);
-    __ mov(input_reg, zero_reg);  // In delay slot.
   } else {
     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch1,
                  Operand(at));
@@ -5401,7 +5317,7 @@
   Register result = ToRegister(instr->result());
   Label load_cache, done;
   __ EnumLength(result, map);
-  __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
+  __ Branch(&load_cache, ne, result, Operand(Smi::kZero));
   __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
   __ jmp(&done);
 
diff --git a/src/crankshaft/mips/lithium-codegen-mips.h b/src/crankshaft/mips/lithium-codegen-mips.h
index bb09abc..28ca01c 100644
--- a/src/crankshaft/mips/lithium-codegen-mips.h
+++ b/src/crankshaft/mips/lithium-codegen-mips.h
@@ -357,24 +357,9 @@
 
   class PushSafepointRegistersScope final BASE_EMBEDDED {
    public:
-    explicit PushSafepointRegistersScope(LCodeGen* codegen)
-        : codegen_(codegen) {
-      DCHECK(codegen_->info()->is_calling());
-      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
-      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+    explicit PushSafepointRegistersScope(LCodeGen* codegen);
 
-      StoreRegistersStateStub stub(codegen_->isolate());
-      codegen_->masm_->push(ra);
-      codegen_->masm_->CallStub(&stub);
-    }
-
-    ~PushSafepointRegistersScope() {
-      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
-      RestoreRegistersStateStub stub(codegen_->isolate());
-      codegen_->masm_->push(ra);
-      codegen_->masm_->CallStub(&stub);
-      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
-    }
+    ~PushSafepointRegistersScope();
 
    private:
     LCodeGen* codegen_;
diff --git a/src/crankshaft/mips/lithium-mips.cc b/src/crankshaft/mips/lithium-mips.cc
index 5533b8f..26d422a 100644
--- a/src/crankshaft/mips/lithium-mips.cc
+++ b/src/crankshaft/mips/lithium-mips.cc
@@ -212,14 +212,6 @@
   stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
 }
 
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if has_cached_array_index(");
-  value()->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
 void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if class_of_test(");
   value()->PrintTo(stream);
@@ -885,15 +877,15 @@
   HValue* value = instr->value();
   Representation r = value->representation();
   HType type = value->type();
-  ToBooleanICStub::Types expected = instr->expected_input_types();
-  if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+  ToBooleanHints expected = instr->expected_input_types();
+  if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
 
   bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
       type.IsJSArray() || type.IsHeapNumber() || type.IsString();
   LInstruction* branch = new(zone()) LBranch(UseRegister(value));
-  if (!easy_case &&
-      ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
-       !expected.IsGeneric())) {
+  if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
+                      (expected & ToBooleanHint::kNeedsMap)) ||
+                     expected != ToBooleanHint::kAny)) {
     branch = AssignEnvironment(branch);
   }
   return branch;
@@ -1672,24 +1664,6 @@
   return new(zone()) LHasInstanceTypeAndBranch(value);
 }
 
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
-    HGetCachedArrayIndex* instr)  {
-  DCHECK(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-
-  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
-    HHasCachedArrayIndexAndBranch* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  return new(zone()) LHasCachedArrayIndexAndBranch(
-      UseRegisterAtStart(instr->value()));
-}
-
-
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
   DCHECK(instr->value()->representation().IsTagged());
@@ -1946,15 +1920,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
   LInstruction* result =
@@ -1990,18 +1955,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LInstruction* result =
-      DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), v0);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
     HLoadFunctionPrototype* instr) {
   return AssignEnvironment(DefineAsRegister(
@@ -2061,20 +2014,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LInstruction* result =
-      DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
-                  v0);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
   if (!instr->is_fixed_typed_array()) {
     DCHECK(instr->elements()->representation().IsTagged());
@@ -2352,7 +2291,6 @@
   inner->BindContext(instr->closure_context());
   inner->set_entry(instr);
   current_block_->UpdateEnvironment(inner);
-  chunk_->AddInlinedFunction(instr->shared());
   return NULL;
 }
 
diff --git a/src/crankshaft/mips/lithium-mips.h b/src/crankshaft/mips/lithium-mips.h
index f49fb93..209987b 100644
--- a/src/crankshaft/mips/lithium-mips.h
+++ b/src/crankshaft/mips/lithium-mips.h
@@ -71,9 +71,7 @@
   V(FlooringDivI)                            \
   V(ForInCacheArray)                         \
   V(ForInPrepareMap)                         \
-  V(GetCachedArrayIndex)                     \
   V(Goto)                                    \
-  V(HasCachedArrayIndexAndBranch)            \
   V(HasInPrototypeChainAndBranch)            \
   V(HasInstanceTypeAndBranch)                \
   V(InnerAllocatedObject)                    \
@@ -89,11 +87,8 @@
   V(LoadRoot)                                \
   V(LoadFieldByIndex)                        \
   V(LoadFunctionPrototype)                   \
-  V(LoadGlobalGeneric)                       \
   V(LoadKeyed)                               \
-  V(LoadKeyedGeneric)                        \
   V(LoadNamedField)                          \
-  V(LoadNamedGeneric)                        \
   V(MathAbs)                                 \
   V(MathCos)                                 \
   V(MathSin)                                 \
@@ -1049,36 +1044,6 @@
   void PrintDataTo(StringStream* stream) override;
 };
 
-
-class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LGetCachedArrayIndex(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
-  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
- public:
-  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
-                               "has-cached-array-index-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
-  void PrintDataTo(StringStream* stream) override;
-};
-
-
 class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
  public:
   LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
@@ -1448,25 +1413,6 @@
 };
 
 
-class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
- public:
-  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
 class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadFunctionPrototype(LOperand* function) {
@@ -1515,43 +1461,6 @@
 };
 
 
-class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
- public:
-  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
-                    LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = key;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
- public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
-    inputs_[0] = context;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-  TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
-};
-
-
 class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadContextSlot(LOperand* context) {
@@ -1924,6 +1833,8 @@
 
   DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
   DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToNumber(); }
 };
 
 
diff --git a/src/crankshaft/mips64/lithium-codegen-mips64.cc b/src/crankshaft/mips64/lithium-codegen-mips64.cc
index 5f93e55..1531996 100644
--- a/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -37,6 +37,25 @@
   Safepoint::DeoptMode deopt_mode_;
 };
 
+LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
+    LCodeGen* codegen)
+    : codegen_(codegen) {
+  DCHECK(codegen_->info()->is_calling());
+  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+  codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+
+  StoreRegistersStateStub stub(codegen_->isolate());
+  codegen_->masm_->push(ra);
+  codegen_->masm_->CallStub(&stub);
+}
+
+LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
+  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+  RestoreRegistersStateStub stub(codegen_->isolate());
+  codegen_->masm_->push(ra);
+  codegen_->masm_->CallStub(&stub);
+  codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+}
 
 #define __ masm()->
 
@@ -243,8 +262,7 @@
 
       HValue* value =
           instructions_->at(code->instruction_index())->hydrogen_value();
-      RecordAndWritePosition(
-          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+      RecordAndWritePosition(value->position());
 
       Comment(";;; <@%d,#%d> "
               "-------------------- Deferred %s --------------------",
@@ -2057,43 +2075,42 @@
       __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
       EmitBranch(instr, ne, at, Operand(zero_reg));
     } else {
-      ToBooleanICStub::Types expected =
-          instr->hydrogen()->expected_input_types();
+      ToBooleanHints expected = instr->hydrogen()->expected_input_types();
       // Avoid deopts in the case where we've never executed this path before.
-      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+      if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
 
-      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+      if (expected & ToBooleanHint::kUndefined) {
         // undefined -> false.
         __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
       }
-      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+      if (expected & ToBooleanHint::kBoolean) {
         // Boolean -> its value.
         __ LoadRoot(at, Heap::kTrueValueRootIndex);
         __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
         __ LoadRoot(at, Heap::kFalseValueRootIndex);
         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
       }
-      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+      if (expected & ToBooleanHint::kNull) {
         // 'null' -> false.
         __ LoadRoot(at, Heap::kNullValueRootIndex);
         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
       }
 
-      if (expected.Contains(ToBooleanICStub::SMI)) {
+      if (expected & ToBooleanHint::kSmallInteger) {
         // Smis: 0 -> false, all other -> true.
         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
-      } else if (expected.NeedsMap()) {
+      } else if (expected & ToBooleanHint::kNeedsMap) {
         // If we need a map later and have a Smi -> deopt.
         __ SmiTst(reg, at);
         DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
       }
 
       const Register map = scratch0();
-      if (expected.NeedsMap()) {
+      if (expected & ToBooleanHint::kNeedsMap) {
         __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset));
-        if (expected.CanBeUndetectable()) {
+        if (expected & ToBooleanHint::kCanBeUndetectable) {
           // Undetectable -> false.
           __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
           __ And(at, at, Operand(1 << Map::kIsUndetectable));
@@ -2101,14 +2118,14 @@
         }
       }
 
-      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+      if (expected & ToBooleanHint::kReceiver) {
         // spec object -> true.
         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
         __ Branch(instr->TrueLabel(chunk_),
                   ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
       }
 
-      if (expected.Contains(ToBooleanICStub::STRING)) {
+      if (expected & ToBooleanHint::kString) {
         // String value -> false iff empty.
         Label not_string;
         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -2119,14 +2136,14 @@
         __ bind(&not_string);
       }
 
-      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+      if (expected & ToBooleanHint::kSymbol) {
         // Symbol value -> true.
         const Register scratch = scratch1();
         __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
         __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
       }
 
-      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+      if (expected & ToBooleanHint::kSimdValue) {
         // SIMD value -> true.
         const Register scratch = scratch1();
         __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -2134,7 +2151,7 @@
                   Operand(SIMD128_VALUE_TYPE));
       }
 
-      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+      if (expected & ToBooleanHint::kHeapNumber) {
         // heap number -> false iff +0, -0, or NaN.
         DoubleRegister dbl_scratch = double_scratch0();
         Label not_heap_number;
@@ -2148,7 +2165,7 @@
         __ bind(&not_heap_number);
       }
 
-      if (!expected.IsGeneric()) {
+      if (expected != ToBooleanHint::kAny) {
         // We've seen something for the first time -> deopt.
         // This can only happen if we are not generic already.
         DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject, zero_reg,
@@ -2408,30 +2425,6 @@
              Operand(TestType(instr->hydrogen())));
 }
 
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
-  Register input = ToRegister(instr->value());
-  Register result = ToRegister(instr->result());
-
-  __ AssertString(input);
-
-  __ lwu(result, FieldMemOperand(input, String::kHashFieldOffset));
-  __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
-    LHasCachedArrayIndexAndBranch* instr) {
-  Register input = ToRegister(instr->value());
-  Register scratch = scratch0();
-
-  __ lwu(scratch,
-         FieldMemOperand(input, String::kHashFieldOffset));
-  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
-  EmitBranch(instr, eq, at, Operand(zero_reg));
-}
-
-
 // Branches to a label or falls through with the answer in flags.  Trashes
 // the temp registers, but not the input.
 void LCodeGen::EmitClassOfTest(Label* is_true,
@@ -2606,35 +2599,6 @@
 }
 
 
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = LoadWithVectorDescriptor::SlotRegister();
-  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
-  DCHECK(slot_register.is(a0));
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ li(vector_register, vector);
-  // No need to allocate this register.
-  FeedbackVectorSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ li(slot_register, Operand(Smi::FromInt(index)));
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->result()).is(v0));
-
-  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
-          .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
@@ -2736,19 +2700,6 @@
 }
 
 
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->result()).is(v0));
-
-  // Name is always in a2.
-  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
-  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   Register scratch = scratch0();
   Register function = ToRegister(instr->function());
@@ -3021,13 +2972,13 @@
     __ Branch(&done, ne, result, Operand(scratch));
     if (info()->IsStub()) {
       // A stub can safely convert the hole to undefined only if the array
-      // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+      // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
       // it needs to bail out.
       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
       // The comparison only needs LS bits of value, which is a smi.
       __ ld(result, FieldMemOperand(result, Cell::kValueOffset));
       DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
-                   Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
+                   Operand(Smi::FromInt(Isolate::kProtectorValid)));
     }
     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
     __ bind(&done);
@@ -3089,18 +3040,6 @@
 }
 
 
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
-
-  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   Register scratch = scratch0();
   Register temp = scratch1();
@@ -4703,8 +4642,7 @@
 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
                                 DoubleRegister result_reg,
                                 NumberUntagDMode mode) {
-  bool can_convert_undefined_to_nan =
-      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool can_convert_undefined_to_nan = instr->truncating();
   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
 
   Register scratch = scratch0();
@@ -4774,36 +4712,14 @@
   // of the if.
 
   if (instr->truncating()) {
-    // Performs a truncating conversion of a floating point number as used by
-    // the JS bitwise operations.
-    Label no_heap_number, check_bools, check_false;
-    // Check HeapNumber map.
-    __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
+    Label truncate;
+    __ Branch(USE_DELAY_SLOT, &truncate, eq, scratch1, Operand(at));
     __ mov(scratch2, input_reg);  // In delay slot.
+    __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball, scratch1,
+                 Operand(ODDBALL_TYPE));
+    __ bind(&truncate);
     __ TruncateHeapNumberToI(input_reg, scratch2);
-    __ Branch(&done);
-
-    // Check for Oddballs. Undefined/False is converted to zero and True to one
-    // for truncating conversions.
-    __ bind(&no_heap_number);
-    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-    __ Branch(&check_bools, ne, input_reg, Operand(at));
-    DCHECK(ToRegister(instr->result()).is(input_reg));
-    __ Branch(USE_DELAY_SLOT, &done);
-    __ mov(input_reg, zero_reg);  // In delay slot.
-
-    __ bind(&check_bools);
-    __ LoadRoot(at, Heap::kTrueValueRootIndex);
-    __ Branch(&check_false, ne, scratch2, Operand(at));
-    __ Branch(USE_DELAY_SLOT, &done);
-    __ li(input_reg, Operand(1));  // In delay slot.
-
-    __ bind(&check_false);
-    __ LoadRoot(at, Heap::kFalseValueRootIndex);
-    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean,
-                 scratch2, Operand(at));
-    __ Branch(USE_DELAY_SLOT, &done);
-    __ mov(input_reg, zero_reg);  // In delay slot.
   } else {
     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch1,
                  Operand(at));
@@ -5610,7 +5526,7 @@
   Register result = ToRegister(instr->result());
   Label load_cache, done;
   __ EnumLength(result, map);
-  __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
+  __ Branch(&load_cache, ne, result, Operand(Smi::kZero));
   __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
   __ jmp(&done);
 
diff --git a/src/crankshaft/mips64/lithium-codegen-mips64.h b/src/crankshaft/mips64/lithium-codegen-mips64.h
index aaa2e6b..ba332ae 100644
--- a/src/crankshaft/mips64/lithium-codegen-mips64.h
+++ b/src/crankshaft/mips64/lithium-codegen-mips64.h
@@ -360,24 +360,9 @@
 
   class PushSafepointRegistersScope final BASE_EMBEDDED {
    public:
-    explicit PushSafepointRegistersScope(LCodeGen* codegen)
-        : codegen_(codegen) {
-      DCHECK(codegen_->info()->is_calling());
-      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
-      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+    explicit PushSafepointRegistersScope(LCodeGen* codegen);
 
-      StoreRegistersStateStub stub(codegen_->isolate());
-      codegen_->masm_->push(ra);
-      codegen_->masm_->CallStub(&stub);
-    }
-
-    ~PushSafepointRegistersScope() {
-      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
-      RestoreRegistersStateStub stub(codegen_->isolate());
-      codegen_->masm_->push(ra);
-      codegen_->masm_->CallStub(&stub);
-      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
-    }
+    ~PushSafepointRegistersScope();
 
    private:
     LCodeGen* codegen_;
diff --git a/src/crankshaft/mips64/lithium-mips64.cc b/src/crankshaft/mips64/lithium-mips64.cc
index 0855754..fd0ebc8 100644
--- a/src/crankshaft/mips64/lithium-mips64.cc
+++ b/src/crankshaft/mips64/lithium-mips64.cc
@@ -212,14 +212,6 @@
   stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
 }
 
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if has_cached_array_index(");
-  value()->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
 void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if class_of_test(");
   value()->PrintTo(stream);
@@ -885,15 +877,15 @@
   HValue* value = instr->value();
   Representation r = value->representation();
   HType type = value->type();
-  ToBooleanICStub::Types expected = instr->expected_input_types();
-  if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+  ToBooleanHints expected = instr->expected_input_types();
+  if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
 
   bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
       type.IsJSArray() || type.IsHeapNumber() || type.IsString();
   LInstruction* branch = new(zone()) LBranch(UseRegister(value));
-  if (!easy_case &&
-      ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
-       !expected.IsGeneric())) {
+  if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
+                      (expected & ToBooleanHint::kNeedsMap)) ||
+                     expected != ToBooleanHint::kAny)) {
     branch = AssignEnvironment(branch);
   }
   return branch;
@@ -1678,24 +1670,6 @@
   return new(zone()) LHasInstanceTypeAndBranch(value);
 }
 
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
-    HGetCachedArrayIndex* instr)  {
-  DCHECK(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-
-  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
-    HHasCachedArrayIndexAndBranch* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  return new(zone()) LHasCachedArrayIndexAndBranch(
-      UseRegisterAtStart(instr->value()));
-}
-
-
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
   DCHECK(instr->value()->representation().IsTagged());
@@ -1949,15 +1923,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
   LInstruction* result =
@@ -1993,18 +1958,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LInstruction* result =
-      DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), v0);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
     HLoadFunctionPrototype* instr) {
   return AssignEnvironment(DefineAsRegister(
@@ -2065,20 +2018,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LInstruction* result =
-      DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
-                  v0);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
   if (!instr->is_fixed_typed_array()) {
     DCHECK(instr->elements()->representation().IsTagged());
@@ -2357,7 +2296,6 @@
   inner->BindContext(instr->closure_context());
   inner->set_entry(instr);
   current_block_->UpdateEnvironment(inner);
-  chunk_->AddInlinedFunction(instr->shared());
   return NULL;
 }
 
diff --git a/src/crankshaft/mips64/lithium-mips64.h b/src/crankshaft/mips64/lithium-mips64.h
index 7bc89af..f5b402a 100644
--- a/src/crankshaft/mips64/lithium-mips64.h
+++ b/src/crankshaft/mips64/lithium-mips64.h
@@ -73,9 +73,7 @@
   V(FlooringDivI)                            \
   V(ForInCacheArray)                         \
   V(ForInPrepareMap)                         \
-  V(GetCachedArrayIndex)                     \
   V(Goto)                                    \
-  V(HasCachedArrayIndexAndBranch)            \
   V(HasInPrototypeChainAndBranch)            \
   V(HasInstanceTypeAndBranch)                \
   V(InnerAllocatedObject)                    \
@@ -91,11 +89,8 @@
   V(LoadRoot)                                \
   V(LoadFieldByIndex)                        \
   V(LoadFunctionPrototype)                   \
-  V(LoadGlobalGeneric)                       \
   V(LoadKeyed)                               \
-  V(LoadKeyedGeneric)                        \
   V(LoadNamedField)                          \
-  V(LoadNamedGeneric)                        \
   V(MathAbs)                                 \
   V(MathCos)                                 \
   V(MathSin)                                 \
@@ -1067,36 +1062,6 @@
   void PrintDataTo(StringStream* stream) override;
 };
 
-
-class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LGetCachedArrayIndex(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
-  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
- public:
-  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
-                               "has-cached-array-index-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
-  void PrintDataTo(StringStream* stream) override;
-};
-
-
 class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
  public:
   LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
@@ -1510,25 +1475,6 @@
 };
 
 
-class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
- public:
-  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
 class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadFunctionPrototype(LOperand* function) {
@@ -1577,43 +1523,6 @@
 };
 
 
-class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
- public:
-  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
-                    LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = key;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
- public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
-    inputs_[0] = context;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-  TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
-};
-
-
 class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadContextSlot(LOperand* context) {
@@ -1970,6 +1879,8 @@
 
   DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
   DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToNumber(); }
 };
 
 
diff --git a/src/crankshaft/ppc/lithium-codegen-ppc.cc b/src/crankshaft/ppc/lithium-codegen-ppc.cc
index 95018e8..9c65586 100644
--- a/src/crankshaft/ppc/lithium-codegen-ppc.cc
+++ b/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -35,6 +35,22 @@
   Safepoint::DeoptMode deopt_mode_;
 };
 
+LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
+    LCodeGen* codegen)
+    : codegen_(codegen) {
+  DCHECK(codegen_->info()->is_calling());
+  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+  codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+  StoreRegistersStateStub stub(codegen_->isolate());
+  codegen_->masm_->CallStub(&stub);
+}
+
+LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
+  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+  RestoreRegistersStateStub stub(codegen_->isolate());
+  codegen_->masm_->CallStub(&stub);
+  codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+}
 
 #define __ masm()->
 
@@ -254,8 +270,7 @@
 
       HValue* value =
           instructions_->at(code->instruction_index())->hydrogen_value();
-      RecordAndWritePosition(
-          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+      RecordAndWritePosition(value->position());
 
       Comment(
           ";;; <@%d,#%d> "
@@ -2099,45 +2114,44 @@
       __ cmpi(ip, Operand::Zero());
       EmitBranch(instr, ne);
     } else {
-      ToBooleanICStub::Types expected =
-          instr->hydrogen()->expected_input_types();
+      ToBooleanHints expected = instr->hydrogen()->expected_input_types();
       // Avoid deopts in the case where we've never executed this path before.
-      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+      if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
 
-      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+      if (expected & ToBooleanHint::kUndefined) {
         // undefined -> false.
         __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
         __ beq(instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+      if (expected & ToBooleanHint::kBoolean) {
         // Boolean -> its value.
         __ CompareRoot(reg, Heap::kTrueValueRootIndex);
         __ beq(instr->TrueLabel(chunk_));
         __ CompareRoot(reg, Heap::kFalseValueRootIndex);
         __ beq(instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+      if (expected & ToBooleanHint::kNull) {
         // 'null' -> false.
         __ CompareRoot(reg, Heap::kNullValueRootIndex);
         __ beq(instr->FalseLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::SMI)) {
+      if (expected & ToBooleanHint::kSmallInteger) {
         // Smis: 0 -> false, all other -> true.
         __ cmpi(reg, Operand::Zero());
         __ beq(instr->FalseLabel(chunk_));
         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
-      } else if (expected.NeedsMap()) {
+      } else if (expected & ToBooleanHint::kNeedsMap) {
         // If we need a map later and have a Smi -> deopt.
         __ TestIfSmi(reg, r0);
         DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
       }
 
       const Register map = scratch0();
-      if (expected.NeedsMap()) {
+      if (expected & ToBooleanHint::kNeedsMap) {
         __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
 
-        if (expected.CanBeUndetectable()) {
+        if (expected & ToBooleanHint::kCanBeUndetectable) {
           // Undetectable -> false.
           __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset));
           __ TestBit(ip, Map::kIsUndetectable, r0);
@@ -2145,13 +2159,13 @@
         }
       }
 
-      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+      if (expected & ToBooleanHint::kReceiver) {
         // spec object -> true.
         __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
         __ bge(instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::STRING)) {
+      if (expected & ToBooleanHint::kString) {
         // String value -> false iff empty.
         Label not_string;
         __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
@@ -2163,20 +2177,20 @@
         __ bind(&not_string);
       }
 
-      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+      if (expected & ToBooleanHint::kSymbol) {
         // Symbol value -> true.
         __ CompareInstanceType(map, ip, SYMBOL_TYPE);
         __ beq(instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+      if (expected & ToBooleanHint::kSimdValue) {
         // SIMD value -> true.
         Label not_simd;
         __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
         __ beq(instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+      if (expected & ToBooleanHint::kHeapNumber) {
         // heap number -> false iff +0, -0, or NaN.
         Label not_heap_number;
         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
@@ -2191,7 +2205,7 @@
         __ bind(&not_heap_number);
       }
 
-      if (!expected.IsGeneric()) {
+      if (expected != ToBooleanHint::kAny) {
         // We've seen something for the first time -> deopt.
         // This can only happen if we are not generic already.
         DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
@@ -2459,30 +2473,6 @@
   EmitBranch(instr, BranchCondition(instr->hydrogen()));
 }
 
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
-  Register input = ToRegister(instr->value());
-  Register result = ToRegister(instr->result());
-
-  __ AssertString(input);
-
-  __ lwz(result, FieldMemOperand(input, String::kHashFieldOffset));
-  __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
-    LHasCachedArrayIndexAndBranch* instr) {
-  Register input = ToRegister(instr->value());
-  Register scratch = scratch0();
-
-  __ lwz(scratch, FieldMemOperand(input, String::kHashFieldOffset));
-  __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
-  __ and_(r0, scratch, r0, SetRC);
-  EmitBranch(instr, eq, cr0);
-}
-
-
 // Branches to a label or falls through with the answer in flags.  Trashes
 // the temp registers, but not the input.
 void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
@@ -2660,35 +2650,6 @@
 }
 
 
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = LoadDescriptor::SlotRegister();
-  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
-  DCHECK(slot_register.is(r3));
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ Move(vector_register, vector);
-  // No need to allocate this register.
-  FeedbackVectorSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->result()).is(r3));
-
-  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
-          .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
@@ -2791,19 +2752,6 @@
 }
 
 
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->result()).is(r3));
-
-  // Name is always in r5.
-  __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
-  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   Register scratch = scratch0();
   Register function = ToRegister(instr->function());
@@ -3098,11 +3046,11 @@
     __ bne(&done);
     if (info()->IsStub()) {
       // A stub can safely convert the hole to undefined only if the array
-      // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+      // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
       // it needs to bail out.
       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
       __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
-      __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
+      __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kProtectorValid), r0);
       DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
     }
     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
@@ -3153,17 +3101,6 @@
 }
 
 
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
-  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   Register scratch = scratch0();
   Register result = ToRegister(instr->result());
@@ -4081,7 +4018,7 @@
     int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
     Register index = ToRegister(instr->index());
     if (representation.IsSmi()) {
-      __ Cmpli(index, Operand(Smi::FromInt(length)), r0);
+      __ CmplSmiLiteral(index, Smi::FromInt(length), r0);
     } else {
       __ Cmplwi(index, Operand(length), r0);
     }
@@ -4090,7 +4027,7 @@
     int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
     Register length = ToRegister(instr->length());
     if (representation.IsSmi()) {
-      __ Cmpli(length, Operand(Smi::FromInt(index)), r0);
+      __ CmplSmiLiteral(length, Smi::FromInt(index), r0);
     } else {
       __ Cmplwi(length, Operand(index), r0);
     }
@@ -4802,8 +4739,7 @@
 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
                                 DoubleRegister result_reg,
                                 NumberUntagDMode mode) {
-  bool can_convert_undefined_to_nan =
-      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool can_convert_undefined_to_nan = instr->truncating();
   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
 
   Register scratch = scratch0();
@@ -4871,35 +4807,13 @@
   __ cmp(scratch1, ip);
 
   if (instr->truncating()) {
-    // Performs a truncating conversion of a floating point number as used by
-    // the JS bitwise operations.
-    Label no_heap_number, check_bools, check_false;
-    __ bne(&no_heap_number);
+    Label truncate;
+    __ beq(&truncate);
+    __ CompareInstanceType(scratch1, scratch1, ODDBALL_TYPE);
+    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball);
+    __ bind(&truncate);
     __ mr(scratch2, input_reg);
     __ TruncateHeapNumberToI(input_reg, scratch2);
-    __ b(&done);
-
-    // Check for Oddballs. Undefined/False is converted to zero and True to one
-    // for truncating conversions.
-    __ bind(&no_heap_number);
-    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-    __ cmp(input_reg, ip);
-    __ bne(&check_bools);
-    __ li(input_reg, Operand::Zero());
-    __ b(&done);
-
-    __ bind(&check_bools);
-    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
-    __ cmp(input_reg, ip);
-    __ bne(&check_false);
-    __ li(input_reg, Operand(1));
-    __ b(&done);
-
-    __ bind(&check_false);
-    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
-    __ cmp(input_reg, ip);
-    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
-    __ li(input_reg, Operand::Zero());
   } else {
     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
 
@@ -5313,7 +5227,7 @@
   // TODO(3095996): Get rid of this. For now, we need to make the
   // result register contain a valid pointer because it is already
   // contained in the register pointer map.
-  __ LoadSmiLiteral(result, Smi::FromInt(0));
+  __ LoadSmiLiteral(result, Smi::kZero);
 
   PushSafepointRegistersScope scope(this);
   if (instr->size()->IsRegister()) {
@@ -5404,8 +5318,8 @@
   __ mov(r3, Operand(isolate()->factory()->number_string()));
   __ b(&end);
   __ bind(&do_call);
-  TypeofStub stub(isolate());
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  Callable callable = CodeFactory::Typeof(isolate());
+  CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
   __ bind(&end);
 }
 
@@ -5646,7 +5560,7 @@
   Register result = ToRegister(instr->result());
   Label load_cache, done;
   __ EnumLength(result, map);
-  __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
+  __ CmpSmiLiteral(result, Smi::kZero, r0);
   __ bne(&load_cache);
   __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
   __ b(&done);
diff --git a/src/crankshaft/ppc/lithium-codegen-ppc.h b/src/crankshaft/ppc/lithium-codegen-ppc.h
index a4a90a7..32b9e18 100644
--- a/src/crankshaft/ppc/lithium-codegen-ppc.h
+++ b/src/crankshaft/ppc/lithium-codegen-ppc.h
@@ -294,21 +294,9 @@
 
   class PushSafepointRegistersScope final BASE_EMBEDDED {
    public:
-    explicit PushSafepointRegistersScope(LCodeGen* codegen)
-        : codegen_(codegen) {
-      DCHECK(codegen_->info()->is_calling());
-      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
-      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
-      StoreRegistersStateStub stub(codegen_->isolate());
-      codegen_->masm_->CallStub(&stub);
-    }
+    explicit PushSafepointRegistersScope(LCodeGen* codegen);
 
-    ~PushSafepointRegistersScope() {
-      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
-      RestoreRegistersStateStub stub(codegen_->isolate());
-      codegen_->masm_->CallStub(&stub);
-      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
-    }
+    ~PushSafepointRegistersScope();
 
    private:
     LCodeGen* codegen_;
diff --git a/src/crankshaft/ppc/lithium-ppc.cc b/src/crankshaft/ppc/lithium-ppc.cc
index 738cf23..75aec2f 100644
--- a/src/crankshaft/ppc/lithium-ppc.cc
+++ b/src/crankshaft/ppc/lithium-ppc.cc
@@ -220,14 +220,6 @@
   stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
 }
 
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if has_cached_array_index(");
-  value()->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
 void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if class_of_test(");
   value()->PrintTo(stream);
@@ -890,15 +882,15 @@
   HValue* value = instr->value();
   Representation r = value->representation();
   HType type = value->type();
-  ToBooleanICStub::Types expected = instr->expected_input_types();
-  if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+  ToBooleanHints expected = instr->expected_input_types();
+  if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
 
   bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
                    type.IsJSArray() || type.IsHeapNumber() || type.IsString();
   LInstruction* branch = new (zone()) LBranch(UseRegister(value));
-  if (!easy_case &&
-      ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
-       !expected.IsGeneric())) {
+  if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
+                      (expected & ToBooleanHint::kNeedsMap)) ||
+                     expected != ToBooleanHint::kAny)) {
     branch = AssignEnvironment(branch);
   }
   return branch;
@@ -1702,24 +1694,6 @@
   return new (zone()) LHasInstanceTypeAndBranch(value);
 }
 
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
-    HGetCachedArrayIndex* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-
-  return DefineAsRegister(new (zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
-    HHasCachedArrayIndexAndBranch* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  return new (zone())
-      LHasCachedArrayIndexAndBranch(UseRegisterAtStart(instr->value()));
-}
-
-
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
   DCHECK(instr->value()->representation().IsTagged());
@@ -1974,15 +1948,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
-  return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
   LInstruction* result =
@@ -2018,18 +1983,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LInstruction* result =
-      DefineFixed(new (zone()) LLoadNamedGeneric(context, object, vector), r3);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
     HLoadFunctionPrototype* instr) {
   return AssignEnvironment(DefineAsRegister(
@@ -2087,19 +2040,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LInstruction* result = DefineFixed(
-      new (zone()) LLoadKeyedGeneric(context, object, key, vector), r3);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
   if (!instr->is_fixed_typed_array()) {
     DCHECK(instr->elements()->representation().IsTagged());
@@ -2373,7 +2313,6 @@
   inner->BindContext(instr->closure_context());
   inner->set_entry(instr);
   current_block_->UpdateEnvironment(inner);
-  chunk_->AddInlinedFunction(instr->shared());
   return NULL;
 }
 
diff --git a/src/crankshaft/ppc/lithium-ppc.h b/src/crankshaft/ppc/lithium-ppc.h
index 626f00a..4dda385 100644
--- a/src/crankshaft/ppc/lithium-ppc.h
+++ b/src/crankshaft/ppc/lithium-ppc.h
@@ -71,9 +71,7 @@
   V(FlooringDivI)                            \
   V(ForInCacheArray)                         \
   V(ForInPrepareMap)                         \
-  V(GetCachedArrayIndex)                     \
   V(Goto)                                    \
-  V(HasCachedArrayIndexAndBranch)            \
   V(HasInPrototypeChainAndBranch)            \
   V(HasInstanceTypeAndBranch)                \
   V(InnerAllocatedObject)                    \
@@ -89,11 +87,8 @@
   V(LoadRoot)                                \
   V(LoadFieldByIndex)                        \
   V(LoadFunctionPrototype)                   \
-  V(LoadGlobalGeneric)                       \
   V(LoadKeyed)                               \
-  V(LoadKeyedGeneric)                        \
   V(LoadNamedField)                          \
-  V(LoadNamedGeneric)                        \
   V(MathAbs)                                 \
   V(MathClz32)                               \
   V(MathCos)                                 \
@@ -1058,34 +1053,6 @@
   void PrintDataTo(StringStream* stream) override;
 };
 
-
-class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LGetCachedArrayIndex(LOperand* value) { inputs_[0] = value; }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
-  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
- public:
-  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
-                               "has-cached-array-index-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
-  void PrintDataTo(StringStream* stream) override;
-};
-
-
 class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
  public:
   LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
@@ -1460,25 +1427,6 @@
 };
 
 
-class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
- public:
-  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
 class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadFunctionPrototype(LOperand* function) { inputs_[0] = function; }
@@ -1523,43 +1471,6 @@
 };
 
 
-class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
- public:
-  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
-                    LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = key;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
- public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
-    inputs_[0] = context;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-  TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
-};
-
-
 class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadContextSlot(LOperand* context) { inputs_[0] = context; }
@@ -1910,6 +1821,8 @@
 
   DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
   DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToNumber(); }
 };
 
 
diff --git a/src/crankshaft/s390/lithium-codegen-s390.cc b/src/crankshaft/s390/lithium-codegen-s390.cc
index 4511bb9..c44df95 100644
--- a/src/crankshaft/s390/lithium-codegen-s390.cc
+++ b/src/crankshaft/s390/lithium-codegen-s390.cc
@@ -35,6 +35,23 @@
   Safepoint::DeoptMode deopt_mode_;
 };
 
+LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
+    LCodeGen* codegen)
+    : codegen_(codegen) {
+  DCHECK(codegen_->info()->is_calling());
+  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+  codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+  StoreRegistersStateStub stub(codegen_->isolate());
+  codegen_->masm_->CallStub(&stub);
+}
+
+LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
+  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+  RestoreRegistersStateStub stub(codegen_->isolate());
+  codegen_->masm_->CallStub(&stub);
+  codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+}
+
 #define __ masm()->
 
 bool LCodeGen::GenerateCode() {
@@ -241,8 +258,7 @@
 
       HValue* value =
           instructions_->at(code->instruction_index())->hydrogen_value();
-      RecordAndWritePosition(
-          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+      RecordAndWritePosition(value->position());
 
       Comment(
           ";;; <@%d,#%d> "
@@ -2102,45 +2118,44 @@
       __ CmpP(ip, Operand::Zero());
       EmitBranch(instr, ne);
     } else {
-      ToBooleanICStub::Types expected =
-          instr->hydrogen()->expected_input_types();
+      ToBooleanHints expected = instr->hydrogen()->expected_input_types();
       // Avoid deopts in the case where we've never executed this path before.
-      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+      if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
 
-      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+      if (expected & ToBooleanHint::kUndefined) {
         // undefined -> false.
         __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
         __ beq(instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+      if (expected & ToBooleanHint::kBoolean) {
         // Boolean -> its value.
         __ CompareRoot(reg, Heap::kTrueValueRootIndex);
         __ beq(instr->TrueLabel(chunk_));
         __ CompareRoot(reg, Heap::kFalseValueRootIndex);
         __ beq(instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+      if (expected & ToBooleanHint::kNull) {
         // 'null' -> false.
         __ CompareRoot(reg, Heap::kNullValueRootIndex);
         __ beq(instr->FalseLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::SMI)) {
+      if (expected & ToBooleanHint::kSmallInteger) {
         // Smis: 0 -> false, all other -> true.
         __ CmpP(reg, Operand::Zero());
         __ beq(instr->FalseLabel(chunk_));
         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
-      } else if (expected.NeedsMap()) {
+      } else if (expected & ToBooleanHint::kNeedsMap) {
         // If we need a map later and have a Smi -> deopt.
         __ TestIfSmi(reg);
         DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
       }
 
       const Register map = scratch0();
-      if (expected.NeedsMap()) {
+      if (expected & ToBooleanHint::kNeedsMap) {
         __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
 
-        if (expected.CanBeUndetectable()) {
+        if (expected & ToBooleanHint::kCanBeUndetectable) {
           // Undetectable -> false.
           __ tm(FieldMemOperand(map, Map::kBitFieldOffset),
                 Operand(1 << Map::kIsUndetectable));
@@ -2148,13 +2163,13 @@
         }
       }
 
-      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+      if (expected & ToBooleanHint::kReceiver) {
         // spec object -> true.
         __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
         __ bge(instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::STRING)) {
+      if (expected & ToBooleanHint::kString) {
         // String value -> false iff empty.
         Label not_string;
         __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
@@ -2166,20 +2181,20 @@
         __ bind(&not_string);
       }
 
-      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+      if (expected & ToBooleanHint::kSymbol) {
         // Symbol value -> true.
         __ CompareInstanceType(map, ip, SYMBOL_TYPE);
         __ beq(instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+      if (expected & ToBooleanHint::kSimdValue) {
         // SIMD value -> true.
         Label not_simd;
         __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
         __ beq(instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+      if (expected & ToBooleanHint::kHeapNumber) {
         // heap number -> false iff +0, -0, or NaN.
         Label not_heap_number;
         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
@@ -2194,7 +2209,7 @@
         __ bind(&not_heap_number);
       }
 
-      if (!expected.IsGeneric()) {
+      if (expected != ToBooleanHint::kAny) {
         // We've seen something for the first time -> deopt.
         // This can only happen if we are not generic already.
         DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
@@ -2448,27 +2463,6 @@
   EmitBranch(instr, BranchCondition(instr->hydrogen()));
 }
 
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
-  Register input = ToRegister(instr->value());
-  Register result = ToRegister(instr->result());
-
-  __ AssertString(input);
-
-  __ LoadlW(result, FieldMemOperand(input, String::kHashFieldOffset));
-  __ IndexFromHash(result, result);
-}
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
-    LHasCachedArrayIndexAndBranch* instr) {
-  Register input = ToRegister(instr->value());
-  Register scratch = scratch0();
-
-  __ LoadlW(scratch, FieldMemOperand(input, String::kHashFieldOffset));
-  __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
-  __ AndP(r0, scratch);
-  EmitBranch(instr, eq);
-}
-
 // Branches to a label or falls through with the answer in flags.  Trashes
 // the temp registers, but not the input.
 void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
@@ -2637,33 +2631,6 @@
   __ Ret();
 }
 
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = LoadDescriptor::SlotRegister();
-  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
-  DCHECK(slot_register.is(r2));
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ Move(vector_register, vector);
-  // No need to allocate this register.
-  FeedbackVectorSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
-}
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->result()).is(r2));
-
-  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
-          .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
@@ -2753,18 +2720,6 @@
                         r0);
 }
 
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->result()).is(r2));
-
-  // Name is always in r4.
-  __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
-  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   Register scratch = scratch0();
   Register function = ToRegister(instr->function());
@@ -3054,11 +3009,11 @@
     __ bne(&done);
     if (info()->IsStub()) {
       // A stub can safely convert the hole to undefined only if the array
-      // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+      // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
       // it needs to bail out.
       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
       __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
-      __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
+      __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kProtectorValid), r0);
       DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
     }
     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
@@ -3112,16 +3067,6 @@
   return MemOperand(scratch, base, base_offset);
 }
 
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
-  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   Register scratch = scratch0();
   Register result = ToRegister(instr->result());
@@ -3135,7 +3080,8 @@
     __ LoadP(
         result,
         MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
-    __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+    __ LoadSmiLiteral(r0, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+    __ CmpP(result, r0);
 
     // Result is the frame pointer for the frame if not adapted and for the real
     // frame below the adaptor frame if adapted.
@@ -3977,13 +3923,14 @@
   Representation representation = instr->hydrogen()->length()->representation();
   DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
   DCHECK(representation.IsSmiOrInteger32());
+  Register temp = scratch0();
 
   Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
   if (instr->length()->IsConstantOperand()) {
     int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
     Register index = ToRegister(instr->index());
     if (representation.IsSmi()) {
-      __ CmpLogicalP(index, Operand(Smi::FromInt(length)));
+      __ CmpLogicalSmiLiteral(index, Smi::FromInt(length), temp);
     } else {
       __ CmpLogical32(index, Operand(length));
     }
@@ -3992,7 +3939,7 @@
     int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
     Register length = ToRegister(instr->length());
     if (representation.IsSmi()) {
-      __ CmpLogicalP(length, Operand(Smi::FromInt(index)));
+      __ CmpLogicalSmiLiteral(length, Smi::FromInt(index), temp);
     } else {
       __ CmpLogical32(length, Operand(index));
     }
@@ -4728,8 +4675,7 @@
 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
                                 DoubleRegister result_reg,
                                 NumberUntagDMode mode) {
-  bool can_convert_undefined_to_nan =
-      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool can_convert_undefined_to_nan = instr->truncating();
   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
 
   Register scratch = scratch0();
@@ -4796,32 +4742,13 @@
   __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
 
   if (instr->truncating()) {
-    // Performs a truncating conversion of a floating point number as used by
-    // the JS bitwise operations.
-    Label no_heap_number, check_bools, check_false;
-    __ bne(&no_heap_number, Label::kNear);
+    Label truncate;
+    __ beq(&truncate);
+    __ CompareInstanceType(scratch1, scratch1, ODDBALL_TYPE);
+    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball);
+    __ bind(&truncate);
     __ LoadRR(scratch2, input_reg);
     __ TruncateHeapNumberToI(input_reg, scratch2);
-    __ b(&done, Label::kNear);
-
-    // Check for Oddballs. Undefined/False is converted to zero and True to one
-    // for truncating conversions.
-    __ bind(&no_heap_number);
-    __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
-    __ bne(&check_bools);
-    __ LoadImmP(input_reg, Operand::Zero());
-    __ b(&done, Label::kNear);
-
-    __ bind(&check_bools);
-    __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
-    __ bne(&check_false, Label::kNear);
-    __ LoadImmP(input_reg, Operand(1));
-    __ b(&done, Label::kNear);
-
-    __ bind(&check_false);
-    __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
-    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
-    __ LoadImmP(input_reg, Operand::Zero());
   } else {
     // Deoptimize if we don't have a heap number.
     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
@@ -5228,7 +5155,7 @@
   // TODO(3095996): Get rid of this. For now, we need to make the
   // result register contain a valid pointer because it is already
   // contained in the register pointer map.
-  __ LoadSmiLiteral(result, Smi::FromInt(0));
+  __ LoadSmiLiteral(result, Smi::kZero);
 
   PushSafepointRegistersScope scope(this);
   if (instr->size()->IsRegister()) {
@@ -5318,8 +5245,8 @@
   __ mov(r2, Operand(isolate()->factory()->number_string()));
   __ b(&end);
   __ bind(&do_call);
-  TypeofStub stub(isolate());
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  Callable callable = CodeFactory::Typeof(isolate());
+  CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
   __ bind(&end);
 }
 
@@ -5546,7 +5473,7 @@
   Register result = ToRegister(instr->result());
   Label load_cache, done;
   __ EnumLength(result, map);
-  __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
+  __ CmpSmiLiteral(result, Smi::kZero, r0);
   __ bne(&load_cache, Label::kNear);
   __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
   __ b(&done, Label::kNear);
diff --git a/src/crankshaft/s390/lithium-codegen-s390.h b/src/crankshaft/s390/lithium-codegen-s390.h
index 30e9d2b..a8d59ff 100644
--- a/src/crankshaft/s390/lithium-codegen-s390.h
+++ b/src/crankshaft/s390/lithium-codegen-s390.h
@@ -293,21 +293,9 @@
 
   class PushSafepointRegistersScope final BASE_EMBEDDED {
    public:
-    explicit PushSafepointRegistersScope(LCodeGen* codegen)
-        : codegen_(codegen) {
-      DCHECK(codegen_->info()->is_calling());
-      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
-      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
-      StoreRegistersStateStub stub(codegen_->isolate());
-      codegen_->masm_->CallStub(&stub);
-    }
+    explicit PushSafepointRegistersScope(LCodeGen* codegen);
 
-    ~PushSafepointRegistersScope() {
-      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
-      RestoreRegistersStateStub stub(codegen_->isolate());
-      codegen_->masm_->CallStub(&stub);
-      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
-    }
+    ~PushSafepointRegistersScope();
 
    private:
     LCodeGen* codegen_;
diff --git a/src/crankshaft/s390/lithium-s390.cc b/src/crankshaft/s390/lithium-s390.cc
index bf9dfd5..3d14764 100644
--- a/src/crankshaft/s390/lithium-s390.cc
+++ b/src/crankshaft/s390/lithium-s390.cc
@@ -203,12 +203,6 @@
   stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
 }
 
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if has_cached_array_index(");
-  value()->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
 void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if class_of_test(");
   value()->PrintTo(stream);
@@ -813,15 +807,15 @@
   HValue* value = instr->value();
   Representation r = value->representation();
   HType type = value->type();
-  ToBooleanICStub::Types expected = instr->expected_input_types();
-  if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+  ToBooleanHints expected = instr->expected_input_types();
+  if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
 
   bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
                    type.IsJSArray() || type.IsHeapNumber() || type.IsString();
   LInstruction* branch = new (zone()) LBranch(UseRegister(value));
-  if (!easy_case &&
-      ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
-       !expected.IsGeneric())) {
+  if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
+                      (expected & ToBooleanHint::kNeedsMap)) ||
+                     expected != ToBooleanHint::kAny)) {
     branch = AssignEnvironment(branch);
   }
   return branch;
@@ -1552,21 +1546,6 @@
   return new (zone()) LHasInstanceTypeAndBranch(value);
 }
 
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
-    HGetCachedArrayIndex* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-
-  return DefineAsRegister(new (zone()) LGetCachedArrayIndex(value));
-}
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
-    HHasCachedArrayIndexAndBranch* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  return new (zone())
-      LHasCachedArrayIndexAndBranch(UseRegisterAtStart(instr->value()));
-}
-
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
   DCHECK(instr->value()->representation().IsTagged());
@@ -1804,14 +1783,6 @@
   }
 }
 
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
-  return MarkAsCall(DefineFixed(result, r2), instr);
-}
-
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
   LInstruction* result =
@@ -1844,17 +1815,6 @@
   return DefineAsRegister(new (zone()) LLoadNamedField(obj));
 }
 
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LInstruction* result =
-      DefineFixed(new (zone()) LLoadNamedGeneric(context, object, vector), r2);
-  return MarkAsCall(result, instr);
-}
-
 LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
     HLoadFunctionPrototype* instr) {
   return AssignEnvironment(DefineAsRegister(
@@ -1909,18 +1869,6 @@
   return result;
 }
 
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LInstruction* result = DefineFixed(
-      new (zone()) LLoadKeyedGeneric(context, object, key, vector), r2);
-  return MarkAsCall(result, instr);
-}
-
 LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
   if (!instr->is_fixed_typed_array()) {
     DCHECK(instr->elements()->representation().IsTagged());
@@ -2175,7 +2123,6 @@
   inner->BindContext(instr->closure_context());
   inner->set_entry(instr);
   current_block_->UpdateEnvironment(inner);
-  chunk_->AddInlinedFunction(instr->shared());
   return NULL;
 }
 
diff --git a/src/crankshaft/s390/lithium-s390.h b/src/crankshaft/s390/lithium-s390.h
index 70670ac..b946d4f 100644
--- a/src/crankshaft/s390/lithium-s390.h
+++ b/src/crankshaft/s390/lithium-s390.h
@@ -71,9 +71,7 @@
   V(FlooringDivI)                            \
   V(ForInCacheArray)                         \
   V(ForInPrepareMap)                         \
-  V(GetCachedArrayIndex)                     \
   V(Goto)                                    \
-  V(HasCachedArrayIndexAndBranch)            \
   V(HasInPrototypeChainAndBranch)            \
   V(HasInstanceTypeAndBranch)                \
   V(InnerAllocatedObject)                    \
@@ -89,11 +87,8 @@
   V(LoadRoot)                                \
   V(LoadFieldByIndex)                        \
   V(LoadFunctionPrototype)                   \
-  V(LoadGlobalGeneric)                       \
   V(LoadKeyed)                               \
-  V(LoadKeyedGeneric)                        \
   V(LoadNamedField)                          \
-  V(LoadNamedGeneric)                        \
   V(MathAbs)                                 \
   V(MathClz32)                               \
   V(MathCos)                                 \
@@ -982,31 +977,6 @@
   void PrintDataTo(StringStream* stream) override;
 };
 
-class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LGetCachedArrayIndex(LOperand* value) { inputs_[0] = value; }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
-  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
- public:
-  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
-                               "has-cached-array-index-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
-  void PrintDataTo(StringStream* stream) override;
-};
-
 class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
  public:
   LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
@@ -1358,24 +1328,6 @@
   DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
 };
 
-class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
- public:
-  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-};
-
 class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadFunctionPrototype(LOperand* function) { inputs_[0] = function; }
@@ -1417,42 +1369,6 @@
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
 };
 
-class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
- public:
-  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
-                    LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = key;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
- public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
-    inputs_[0] = context;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-  TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
-};
-
 class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadContextSlot(LOperand* context) { inputs_[0] = context; }
@@ -1781,6 +1697,8 @@
 
   DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
   DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToNumber(); }
 };
 
 class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
diff --git a/src/crankshaft/typing.cc b/src/crankshaft/typing.cc
index d2b56e2..f21d235 100644
--- a/src/crankshaft/typing.cc
+++ b/src/crankshaft/typing.cc
@@ -85,10 +85,8 @@
                     store_.LookupBounds(parameter_index(i)).lower);
     }
 
-    ZoneList<Variable*>* local_vars = scope_->locals();
     int local_index = 0;
-    for (int i = 0; i < local_vars->length(); i++) {
-      Variable* var = local_vars->at(i);
+    for (Variable* var : *scope_->locals()) {
       if (var->IsStackLocal()) {
         PrintObserved(
             var, frame->GetExpression(local_index),
@@ -517,16 +515,12 @@
 void AstTyper::VisitCall(Call* expr) {
   // Collect type feedback.
   RECURSE(Visit(expr->expression()));
-  bool is_uninitialized = true;
-  if (expr->IsUsingCallFeedbackICSlot()) {
-    FeedbackVectorSlot slot = expr->CallFeedbackICSlot();
-    is_uninitialized = oracle()->CallIsUninitialized(slot);
-    if (!expr->expression()->IsProperty() &&
-        oracle()->CallIsMonomorphic(slot)) {
-      expr->set_target(oracle()->GetCallTarget(slot));
-      Handle<AllocationSite> site = oracle()->GetCallAllocationSite(slot);
-      expr->set_allocation_site(site);
-    }
+  FeedbackVectorSlot slot = expr->CallFeedbackICSlot();
+  bool is_uninitialized = oracle()->CallIsUninitialized(slot);
+  if (!expr->expression()->IsProperty() && oracle()->CallIsMonomorphic(slot)) {
+    expr->set_target(oracle()->GetCallTarget(slot));
+    Handle<AllocationSite> site = oracle()->GetCallAllocationSite(slot);
+    expr->set_allocation_site(site);
   }
 
   expr->set_is_uninitialized(is_uninitialized);
@@ -785,9 +779,8 @@
              : var->IsParameter() ? parameter_index(var->index()) : kNoVar;
 }
 
-void AstTyper::VisitDeclarations(ZoneList<Declaration*>* decls) {
-  for (int i = 0; i < decls->length(); ++i) {
-    Declaration* decl = decls->at(i);
+void AstTyper::VisitDeclarations(Declaration::List* decls) {
+  for (Declaration* decl : *decls) {
     RECURSE(Visit(decl));
   }
 }
diff --git a/src/crankshaft/typing.h b/src/crankshaft/typing.h
index eb88634..add457b 100644
--- a/src/crankshaft/typing.h
+++ b/src/crankshaft/typing.h
@@ -69,7 +69,7 @@
 
   int variable_index(Variable* var);
 
-  void VisitDeclarations(ZoneList<Declaration*>* declarations);
+  void VisitDeclarations(Declaration::List* declarations);
   void VisitStatements(ZoneList<Statement*>* statements);
 
 #define DECLARE_VISIT(type) void Visit##type(type* node);
diff --git a/src/crankshaft/x64/lithium-codegen-x64.cc b/src/crankshaft/x64/lithium-codegen-x64.cc
index 50e2aa0..6889040 100644
--- a/src/crankshaft/x64/lithium-codegen-x64.cc
+++ b/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -356,8 +356,7 @@
 
       HValue* value =
           instructions_->at(code->instruction_index())->hydrogen_value();
-      RecordAndWritePosition(
-          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+      RecordAndWritePosition(value->position());
 
       Comment(";;; <@%d,#%d> "
               "-------------------- Deferred %s --------------------",
@@ -1977,7 +1976,7 @@
       EmitBranch(instr, equal);
     } else if (type.IsSmi()) {
       DCHECK(!info()->IsStub());
-      __ SmiCompare(reg, Smi::FromInt(0));
+      __ SmiCompare(reg, Smi::kZero);
       EmitBranch(instr, not_equal);
     } else if (type.IsJSArray()) {
       DCHECK(!info()->IsStub());
@@ -1993,17 +1992,16 @@
       __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
       EmitBranch(instr, not_equal);
     } else {
-      ToBooleanICStub::Types expected =
-          instr->hydrogen()->expected_input_types();
+      ToBooleanHints expected = instr->hydrogen()->expected_input_types();
       // Avoid deopts in the case where we've never executed this path before.
-      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+      if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
 
-      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+      if (expected & ToBooleanHint::kUndefined) {
         // undefined -> false.
         __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
         __ j(equal, instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+      if (expected & ToBooleanHint::kBoolean) {
         // true -> true.
         __ CompareRoot(reg, Heap::kTrueValueRootIndex);
         __ j(equal, instr->TrueLabel(chunk_));
@@ -2011,28 +2009,28 @@
         __ CompareRoot(reg, Heap::kFalseValueRootIndex);
         __ j(equal, instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+      if (expected & ToBooleanHint::kNull) {
         // 'null' -> false.
         __ CompareRoot(reg, Heap::kNullValueRootIndex);
         __ j(equal, instr->FalseLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::SMI)) {
+      if (expected & ToBooleanHint::kSmallInteger) {
         // Smis: 0 -> false, all other -> true.
-        __ Cmp(reg, Smi::FromInt(0));
+        __ Cmp(reg, Smi::kZero);
         __ j(equal, instr->FalseLabel(chunk_));
         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
-      } else if (expected.NeedsMap()) {
+      } else if (expected & ToBooleanHint::kNeedsMap) {
         // If we need a map later and have a Smi -> deopt.
         __ testb(reg, Immediate(kSmiTagMask));
         DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
       }
 
       const Register map = kScratchRegister;
-      if (expected.NeedsMap()) {
+      if (expected & ToBooleanHint::kNeedsMap) {
         __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
 
-        if (expected.CanBeUndetectable()) {
+        if (expected & ToBooleanHint::kCanBeUndetectable) {
           // Undetectable -> false.
           __ testb(FieldOperand(map, Map::kBitFieldOffset),
                    Immediate(1 << Map::kIsUndetectable));
@@ -2040,13 +2038,13 @@
         }
       }
 
-      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+      if (expected & ToBooleanHint::kReceiver) {
         // spec object -> true.
         __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
         __ j(above_equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::STRING)) {
+      if (expected & ToBooleanHint::kString) {
         // String value -> false iff empty.
         Label not_string;
         __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
@@ -2057,19 +2055,19 @@
         __ bind(&not_string);
       }
 
-      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+      if (expected & ToBooleanHint::kSymbol) {
         // Symbol value -> true.
         __ CmpInstanceType(map, SYMBOL_TYPE);
         __ j(equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+      if (expected & ToBooleanHint::kSimdValue) {
         // SIMD value -> true.
         __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
         __ j(equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+      if (expected & ToBooleanHint::kHeapNumber) {
         // heap number -> false iff +0, -0, or NaN.
         Label not_heap_number;
         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
@@ -2082,7 +2080,7 @@
         __ bind(&not_heap_number);
       }
 
-      if (!expected.IsGeneric()) {
+      if (expected != ToBooleanHint::kAny) {
         // We've seen something for the first time -> deopt.
         // This can only happen if we are not generic already.
         DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
@@ -2337,29 +2335,6 @@
   EmitBranch(instr, BranchCondition(instr->hydrogen()));
 }
 
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
-  Register input = ToRegister(instr->value());
-  Register result = ToRegister(instr->result());
-
-  __ AssertString(input);
-
-  __ movl(result, FieldOperand(input, String::kHashFieldOffset));
-  DCHECK(String::kHashShift >= kSmiTagSize);
-  __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
-    LHasCachedArrayIndexAndBranch* instr) {
-  Register input = ToRegister(instr->value());
-
-  __ testl(FieldOperand(input, String::kHashFieldOffset),
-           Immediate(String::kContainsCachedArrayIndexMask));
-  EmitBranch(instr, equal);
-}
-
-
 // Branches to a label or falls through with the answer in the z flag.
 // Trashes the temp register.
 void LCodeGen::EmitClassOfTest(Label* is_true,
@@ -2522,35 +2497,6 @@
 }
 
 
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = LoadWithVectorDescriptor::SlotRegister();
-  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
-  DCHECK(slot_register.is(rax));
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ Move(vector_register, vector);
-  // No need to allocate this register.
-  FeedbackVectorSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ Move(slot_register, Smi::FromInt(index));
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(rsi));
-  DCHECK(ToRegister(instr->result()).is(rax));
-
-  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
-          .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
@@ -2654,18 +2600,6 @@
 }
 
 
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(rsi));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->result()).is(rax));
-
-  __ Move(LoadDescriptor::NameRegister(), instr->name());
-  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   Register function = ToRegister(instr->function());
   Register result = ToRegister(instr->result());
@@ -2888,11 +2822,11 @@
     __ j(not_equal, &done);
     if (info()->IsStub()) {
       // A stub can safely convert the hole to undefined only if the array
-      // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+      // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
       // it needs to bail out.
       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
       __ Cmp(FieldOperand(result, Cell::kValueOffset),
-             Smi::FromInt(Isolate::kArrayProtectorValid));
+             Smi::FromInt(Isolate::kProtectorValid));
       DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
     }
     __ Move(result, isolate()->factory()->undefined_value());
@@ -2940,18 +2874,6 @@
 }
 
 
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(rsi));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
-
-  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   Register result = ToRegister(instr->result());
 
@@ -4190,7 +4112,7 @@
   // result register contain a valid pointer because it is already
   // contained in the register pointer map.
   Register result = rax;
-  __ Move(result, Smi::FromInt(0));
+  __ Move(result, Smi::kZero);
 
   // We have to call a stub.
   {
@@ -4559,7 +4481,7 @@
   // result register contain a valid pointer because it is already
   // contained in the register pointer map.
   Register reg = ToRegister(instr->result());
-  __ Move(reg, Smi::FromInt(0));
+  __ Move(reg, Smi::kZero);
 
   {
     PushSafepointRegistersScope scope(this);
@@ -4608,8 +4530,7 @@
 
 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
                                 XMMRegister result_reg, NumberUntagDMode mode) {
-  bool can_convert_undefined_to_nan =
-      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool can_convert_undefined_to_nan = instr->truncating();
   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
 
   Label convert, load_smi, done;
@@ -4671,34 +4592,17 @@
   Register input_reg = ToRegister(instr->value());
 
   if (instr->truncating()) {
-    Label no_heap_number, check_bools, check_false;
-
-    // Heap number map check.
-    __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
-                   Heap::kHeapNumberMapRootIndex);
-    __ j(not_equal, &no_heap_number, Label::kNear);
+    Register input_map_reg = kScratchRegister;
+    Label truncate;
+    Label::Distance truncate_distance =
+        DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+    __ movp(input_map_reg, FieldOperand(input_reg, HeapObject::kMapOffset));
+    __ JumpIfRoot(input_map_reg, Heap::kHeapNumberMapRootIndex, &truncate,
+                  truncate_distance);
+    __ CmpInstanceType(input_map_reg, ODDBALL_TYPE);
+    DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotANumberOrOddball);
+    __ bind(&truncate);
     __ TruncateHeapNumberToI(input_reg, input_reg);
-    __ jmp(done);
-
-    __ bind(&no_heap_number);
-    // Check for Oddballs. Undefined/False is converted to zero and True to one
-    // for truncating conversions.
-    __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
-    __ j(not_equal, &check_bools, Label::kNear);
-    __ Set(input_reg, 0);
-    __ jmp(done);
-
-    __ bind(&check_bools);
-    __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
-    __ j(not_equal, &check_false, Label::kNear);
-    __ Set(input_reg, 1);
-    __ jmp(done);
-
-    __ bind(&check_false);
-    __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
-    DeoptimizeIf(not_equal, instr,
-                 DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
-    __ Set(input_reg, 0);
   } else {
     XMMRegister scratch = ToDoubleRegister(instr->temp());
     DCHECK(!scratch.is(double_scratch0()));
@@ -5123,7 +5027,7 @@
   // TODO(3095996): Get rid of this. For now, we need to make the
   // result register contain a valid pointer because it is already
   // contained in the register pointer map.
-  __ Move(result, Smi::FromInt(0));
+  __ Move(result, Smi::kZero);
 
   PushSafepointRegistersScope scope(this);
   if (instr->size()->IsRegister()) {
@@ -5431,7 +5335,7 @@
   Register result = ToRegister(instr->result());
   Label load_cache, done;
   __ EnumLength(result, map);
-  __ Cmp(result, Smi::FromInt(0));
+  __ Cmp(result, Smi::kZero);
   __ j(not_equal, &load_cache, Label::kNear);
   __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
   __ jmp(&done, Label::kNear);
diff --git a/src/crankshaft/x64/lithium-x64.cc b/src/crankshaft/x64/lithium-x64.cc
index 18fb5d4..bc9040b 100644
--- a/src/crankshaft/x64/lithium-x64.cc
+++ b/src/crankshaft/x64/lithium-x64.cc
@@ -219,13 +219,6 @@
 }
 
 
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if has_cached_array_index(");
-  value()->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
 void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if class_of_test(");
   value()->PrintTo(stream);
@@ -905,15 +898,15 @@
   HValue* value = instr->value();
   Representation r = value->representation();
   HType type = value->type();
-  ToBooleanICStub::Types expected = instr->expected_input_types();
-  if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+  ToBooleanHints expected = instr->expected_input_types();
+  if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
 
   bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
       type.IsJSArray() || type.IsHeapNumber() || type.IsString();
   LInstruction* branch = new(zone()) LBranch(UseRegister(value));
-  if (!easy_case &&
-      ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
-       !expected.IsGeneric())) {
+  if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
+                      (expected & ToBooleanHint::kNeedsMap)) ||
+                     expected != ToBooleanHint::kAny)) {
     branch = AssignEnvironment(branch);
   }
   return branch;
@@ -1704,24 +1697,6 @@
   return new(zone()) LHasInstanceTypeAndBranch(value);
 }
 
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
-    HGetCachedArrayIndex* instr)  {
-  DCHECK(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-
-  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
-    HHasCachedArrayIndexAndBranch* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-  return new(zone()) LHasCachedArrayIndexAndBranch(value);
-}
-
-
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
   LOperand* value = UseRegister(instr->value());
@@ -1984,15 +1959,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), rsi);
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
-  return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
   LInstruction* result =
@@ -2041,17 +2007,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), rsi);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
-      context, object, vector);
-  return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
     HLoadFunctionPrototype* instr) {
   return AssignEnvironment(DefineAsRegister(
@@ -2138,19 +2093,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), rsi);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LLoadKeyedGeneric* result =
-      new(zone()) LLoadKeyedGeneric(context, object, key, vector);
-  return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
   ElementsKind elements_kind = instr->elements_kind();
 
@@ -2474,7 +2416,6 @@
   inner->BindContext(instr->closure_context());
   inner->set_entry(instr);
   current_block_->UpdateEnvironment(inner);
-  chunk_->AddInlinedFunction(instr->shared());
   return NULL;
 }
 
diff --git a/src/crankshaft/x64/lithium-x64.h b/src/crankshaft/x64/lithium-x64.h
index e7eaa01..3c953ff 100644
--- a/src/crankshaft/x64/lithium-x64.h
+++ b/src/crankshaft/x64/lithium-x64.h
@@ -71,9 +71,7 @@
   V(FlooringDivI)                            \
   V(ForInCacheArray)                         \
   V(ForInPrepareMap)                         \
-  V(GetCachedArrayIndex)                     \
   V(Goto)                                    \
-  V(HasCachedArrayIndexAndBranch)            \
   V(HasInPrototypeChainAndBranch)            \
   V(HasInstanceTypeAndBranch)                \
   V(InnerAllocatedObject)                    \
@@ -89,11 +87,8 @@
   V(LoadRoot)                                \
   V(LoadFieldByIndex)                        \
   V(LoadFunctionPrototype)                   \
-  V(LoadGlobalGeneric)                       \
   V(LoadKeyed)                               \
-  V(LoadKeyedGeneric)                        \
   V(LoadNamedField)                          \
-  V(LoadNamedGeneric)                        \
   V(MathAbs)                                 \
   V(MathClz32)                               \
   V(MathCos)                                 \
@@ -1074,36 +1069,6 @@
   void PrintDataTo(StringStream* stream) override;
 };
 
-
-class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LGetCachedArrayIndex(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
-  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
- public:
-  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
-                               "has-cached-array-index-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
-  void PrintDataTo(StringStream* stream) override;
-};
-
-
 class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
  public:
   LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
@@ -1490,26 +1455,6 @@
 };
 
 
-class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
- public:
-  explicit LLoadNamedGeneric(LOperand* context, LOperand* object,
-                             LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    temps_[0] = vector;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
 class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadFunctionPrototype(LOperand* function) {
@@ -1569,43 +1514,6 @@
 };
 
 
-class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
- public:
-  LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
-                    LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = obj;
-    inputs_[2] = key;
-    temps_[0] = vector;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* temp_vector() { return temps_[0]; }
-};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
- public:
-  explicit LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
-    inputs_[0] = context;
-    temps_[0] = vector;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-  TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
-};
-
-
 class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadContextSlot(LOperand* context) {
@@ -1968,6 +1876,8 @@
 
   DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
   DECLARE_HYDROGEN_ACCESSOR(Change);
+
+  bool truncating() { return hydrogen()->CanTruncateToNumber(); }
 };
 
 
diff --git a/src/crankshaft/x87/lithium-codegen-x87.cc b/src/crankshaft/x87/lithium-codegen-x87.cc
index 2d597d4..b83d97f 100644
--- a/src/crankshaft/x87/lithium-codegen-x87.cc
+++ b/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -330,8 +330,7 @@
 
       HValue* value =
           instructions_->at(code->instruction_index())->hydrogen_value();
-      RecordAndWritePosition(
-          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+      RecordAndWritePosition(value->position());
 
       Comment(";;; <@%d,#%d> "
               "-------------------- Deferred %s --------------------",
@@ -2125,16 +2124,15 @@
       __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
       EmitBranch(instr, not_equal);
     } else {
-      ToBooleanICStub::Types expected =
-          instr->hydrogen()->expected_input_types();
-      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+      ToBooleanHints expected = instr->hydrogen()->expected_input_types();
+      if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
 
-      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+      if (expected & ToBooleanHint::kUndefined) {
         // undefined -> false.
         __ cmp(reg, factory()->undefined_value());
         __ j(equal, instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+      if (expected & ToBooleanHint::kBoolean) {
         // true -> true.
         __ cmp(reg, factory()->true_value());
         __ j(equal, instr->TrueLabel(chunk_));
@@ -2142,30 +2140,30 @@
         __ cmp(reg, factory()->false_value());
         __ j(equal, instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+      if (expected & ToBooleanHint::kNull) {
         // 'null' -> false.
         __ cmp(reg, factory()->null_value());
         __ j(equal, instr->FalseLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::SMI)) {
+      if (expected & ToBooleanHint::kSmallInteger) {
         // Smis: 0 -> false, all other -> true.
         __ test(reg, Operand(reg));
         __ j(equal, instr->FalseLabel(chunk_));
         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
-      } else if (expected.NeedsMap()) {
+      } else if (expected & ToBooleanHint::kNeedsMap) {
         // If we need a map later and have a Smi -> deopt.
         __ test(reg, Immediate(kSmiTagMask));
         DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
       }
 
       Register map = no_reg;  // Keep the compiler happy.
-      if (expected.NeedsMap()) {
+      if (expected & ToBooleanHint::kNeedsMap) {
         map = ToRegister(instr->temp());
         DCHECK(!map.is(reg));
         __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
 
-        if (expected.CanBeUndetectable()) {
+        if (expected & ToBooleanHint::kCanBeUndetectable) {
           // Undetectable -> false.
           __ test_b(FieldOperand(map, Map::kBitFieldOffset),
                     Immediate(1 << Map::kIsUndetectable));
@@ -2173,13 +2171,13 @@
         }
       }
 
-      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+      if (expected & ToBooleanHint::kReceiver) {
         // spec object -> true.
         __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
         __ j(above_equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::STRING)) {
+      if (expected & ToBooleanHint::kString) {
         // String value -> false iff empty.
         Label not_string;
         __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
@@ -2190,19 +2188,19 @@
         __ bind(&not_string);
       }
 
-      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+      if (expected & ToBooleanHint::kSymbol) {
         // Symbol value -> true.
         __ CmpInstanceType(map, SYMBOL_TYPE);
         __ j(equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+      if (expected & ToBooleanHint::kSimdValue) {
         // SIMD value -> true.
         __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
         __ j(equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+      if (expected & ToBooleanHint::kHeapNumber) {
         // heap number -> false iff +0, -0, or NaN.
         Label not_heap_number;
         __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
@@ -2216,7 +2214,7 @@
         __ bind(&not_heap_number);
       }
 
-      if (!expected.IsGeneric()) {
+      if (expected != ToBooleanHint::kAny) {
         // We've seen something for the first time -> deopt.
         // This can only happen if we are not generic already.
         DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
@@ -2477,28 +2475,6 @@
   EmitBranch(instr, BranchCondition(instr->hydrogen()));
 }
 
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
-  Register input = ToRegister(instr->value());
-  Register result = ToRegister(instr->result());
-
-  __ AssertString(input);
-
-  __ mov(result, FieldOperand(input, String::kHashFieldOffset));
-  __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
-    LHasCachedArrayIndexAndBranch* instr) {
-  Register input = ToRegister(instr->value());
-
-  __ test(FieldOperand(input, String::kHashFieldOffset),
-          Immediate(String::kContainsCachedArrayIndexMask));
-  EmitBranch(instr, equal);
-}
-
-
 // Branches to a label or falls through with the answer in the z flag.  Trashes
 // the temp registers, but not the input.
 void LCodeGen::EmitClassOfTest(Label* is_true,
@@ -2664,35 +2640,6 @@
 }
 
 
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = LoadWithVectorDescriptor::SlotRegister();
-  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
-  DCHECK(slot_register.is(eax));
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ mov(vector_register, vector);
-  // No need to allocate this register.
-  FeedbackVectorSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ mov(slot_register, Immediate(Smi::FromInt(index)));
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->result()).is(eax));
-
-  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
-          .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
@@ -2790,18 +2737,6 @@
 }
 
 
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->result()).is(eax));
-
-  __ mov(LoadDescriptor::NameRegister(), instr->name());
-  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   Register function = ToRegister(instr->function());
   Register temp = ToRegister(instr->temp());
@@ -2965,11 +2900,11 @@
     __ j(not_equal, &done);
     if (info()->IsStub()) {
       // A stub can safely convert the hole to undefined only if the array
-      // protector cell contains (Smi) Isolate::kArrayProtectorValid.
+      // protector cell contains (Smi) Isolate::kProtectorValid.
       // Otherwise it needs to bail out.
       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
       __ cmp(FieldOperand(result, PropertyCell::kValueOffset),
-             Immediate(Smi::FromInt(Isolate::kArrayProtectorValid)));
+             Immediate(Smi::FromInt(Isolate::kProtectorValid)));
       DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
     }
     __ mov(result, isolate()->factory()->undefined_value());
@@ -3020,18 +2955,6 @@
 }
 
 
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
-
-  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   Register result = ToRegister(instr->result());
 
@@ -4671,8 +4594,7 @@
 void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
                                       Register temp_reg, X87Register res_reg,
                                       NumberUntagDMode mode) {
-  bool can_convert_undefined_to_nan =
-      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool can_convert_undefined_to_nan = instr->truncating();
   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
 
   Label load_smi, done;
@@ -4748,34 +4670,18 @@
   __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
 
   if (instr->truncating()) {
-    Label no_heap_number, check_bools, check_false;
-
-    // Heap number map check.
+    Label truncate;
+    Label::Distance truncate_distance =
+        DeoptEveryNTimes() ? Label::kFar : Label::kNear;
     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
            factory()->heap_number_map());
-    __ j(not_equal, &no_heap_number, Label::kNear);
+    __ j(equal, &truncate, truncate_distance);
+    __ push(input_reg);
+    __ CmpObjectType(input_reg, ODDBALL_TYPE, input_reg);
+    __ pop(input_reg);
+    DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotANumberOrOddball);
+    __ bind(&truncate);
     __ TruncateHeapNumberToI(input_reg, input_reg);
-    __ jmp(done);
-
-    __ bind(&no_heap_number);
-    // Check for Oddballs. Undefined/False is converted to zero and True to one
-    // for truncating conversions.
-    __ cmp(input_reg, factory()->undefined_value());
-    __ j(not_equal, &check_bools, Label::kNear);
-    __ Move(input_reg, Immediate(0));
-    __ jmp(done);
-
-    __ bind(&check_bools);
-    __ cmp(input_reg, factory()->true_value());
-    __ j(not_equal, &check_false, Label::kNear);
-    __ Move(input_reg, Immediate(1));
-    __ jmp(done);
-
-    __ bind(&check_false);
-    __ cmp(input_reg, factory()->false_value());
-    DeoptimizeIf(not_equal, instr,
-                 DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
-    __ Move(input_reg, Immediate(0));
   } else {
     // TODO(olivf) Converting a number on the fpu is actually quite slow. We
     // should first try a fast conversion and then bailout to this slow case.
@@ -5332,7 +5238,7 @@
   // TODO(3095996): Get rid of this. For now, we need to make the
   // result register contain a valid pointer because it is already
   // contained in the register pointer map.
-  __ Move(result, Immediate(Smi::FromInt(0)));
+  __ Move(result, Immediate(Smi::kZero));
 
   PushSafepointRegistersScope scope(this);
   if (instr->size()->IsRegister()) {
@@ -5638,7 +5544,7 @@
   Register result = ToRegister(instr->result());
   Label load_cache, done;
   __ EnumLength(result, map);
-  __ cmp(result, Immediate(Smi::FromInt(0)));
+  __ cmp(result, Immediate(Smi::kZero));
   __ j(not_equal, &load_cache, Label::kNear);
   __ mov(result, isolate()->factory()->empty_fixed_array());
   __ jmp(&done, Label::kNear);
diff --git a/src/crankshaft/x87/lithium-x87.cc b/src/crankshaft/x87/lithium-x87.cc
index a319c0c..1844d24 100644
--- a/src/crankshaft/x87/lithium-x87.cc
+++ b/src/crankshaft/x87/lithium-x87.cc
@@ -235,14 +235,6 @@
   stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
 }
 
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if has_cached_array_index(");
-  value()->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
 void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if class_of_test(");
   value()->PrintTo(stream);
@@ -923,18 +915,20 @@
   HValue* value = instr->value();
   Representation r = value->representation();
   HType type = value->type();
-  ToBooleanICStub::Types expected = instr->expected_input_types();
-  if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+  ToBooleanHints expected = instr->expected_input_types();
+  if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
 
   bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
       type.IsJSArray() || type.IsHeapNumber() || type.IsString();
-  LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
+  LOperand* temp = !easy_case && (expected & ToBooleanHint::kNeedsMap)
+                       ? TempRegister()
+                       : NULL;
   LInstruction* branch =
       temp != NULL ? new (zone()) LBranch(UseRegister(value), temp)
                    : new (zone()) LBranch(UseRegisterAtStart(value), temp);
-  if (!easy_case &&
-      ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
-       !expected.IsGeneric())) {
+  if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
+                      (expected & ToBooleanHint::kNeedsMap)) ||
+                     expected != ToBooleanHint::kAny)) {
     branch = AssignEnvironment(branch);
   }
   return branch;
@@ -1703,24 +1697,6 @@
       TempRegister());
 }
 
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
-    HGetCachedArrayIndex* instr)  {
-  DCHECK(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-
-  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
-    HHasCachedArrayIndexAndBranch* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  return new(zone()) LHasCachedArrayIndexAndBranch(
-      UseRegisterAtStart(instr->value()));
-}
-
-
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
   DCHECK(instr->value()->representation().IsTagged());
@@ -2003,15 +1979,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
-  LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
   LInstruction* result =
@@ -2051,17 +2018,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
-      context, object, vector);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
     HLoadFunctionPrototype* instr) {
   return AssignEnvironment(DefineAsRegister(
@@ -2120,18 +2076,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  LLoadKeyedGeneric* result =
-      new(zone()) LLoadKeyedGeneric(context, object, key, vector);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
 LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
   ElementsKind elements_kind = instr->elements_kind();
 
@@ -2472,7 +2416,6 @@
   inner->BindContext(instr->closure_context());
   inner->set_entry(instr);
   current_block_->UpdateEnvironment(inner);
-  chunk_->AddInlinedFunction(instr->shared());
   return NULL;
 }
 
diff --git a/src/crankshaft/x87/lithium-x87.h b/src/crankshaft/x87/lithium-x87.h
index e2b8043..3653a2d 100644
--- a/src/crankshaft/x87/lithium-x87.h
+++ b/src/crankshaft/x87/lithium-x87.h
@@ -76,9 +76,7 @@
   V(FlooringDivI)                            \
   V(ForInCacheArray)                         \
   V(ForInPrepareMap)                         \
-  V(GetCachedArrayIndex)                     \
   V(Goto)                                    \
-  V(HasCachedArrayIndexAndBranch)            \
   V(HasInPrototypeChainAndBranch)            \
   V(HasInstanceTypeAndBranch)                \
   V(InnerAllocatedObject)                    \
@@ -93,11 +91,8 @@
   V(LoadContextSlot)                         \
   V(LoadFieldByIndex)                        \
   V(LoadFunctionPrototype)                   \
-  V(LoadGlobalGeneric)                       \
   V(LoadKeyed)                               \
-  V(LoadKeyedGeneric)                        \
   V(LoadNamedField)                          \
-  V(LoadNamedGeneric)                        \
   V(LoadRoot)                                \
   V(MathAbs)                                 \
   V(MathClz32)                               \
@@ -1074,35 +1069,6 @@
   void PrintDataTo(StringStream* stream) override;
 };
 
-
-class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LGetCachedArrayIndex(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
-  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
- public:
-  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
-                               "has-cached-array-index-and-branch")
-
-  void PrintDataTo(StringStream* stream) override;
-};
-
-
 class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
  public:
   LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
@@ -1483,25 +1449,6 @@
 };
 
 
-class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
- public:
-  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
 class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 1> {
  public:
   LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
@@ -1566,43 +1513,6 @@
 }
 
 
-class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
- public:
-  LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
-                    LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = obj;
-    inputs_[2] = key;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
- public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
-    inputs_[0] = context;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-  TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
-};
-
-
 class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadContextSlot(LOperand* context) {
@@ -1961,6 +1871,8 @@
 
   DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
   DECLARE_HYDROGEN_ACCESSOR(Change);
+
+  bool truncating() { return hydrogen()->CanTruncateToNumber(); }
 };
 
 
diff --git a/src/d8-posix.cc b/src/d8-posix.cc
index d2cf573..cdfc39c 100644
--- a/src/d8-posix.cc
+++ b/src/d8-posix.cc
@@ -772,4 +772,12 @@
                 FunctionTemplate::New(isolate, RemoveDirectory));
 }
 
+void Shell::Exit(int exit_code) {
+  // Use _exit instead of exit to avoid races between isolate
+  // threads and static destructors.
+  fflush(stdout);
+  fflush(stderr);
+  _exit(exit_code);
+}
+
 }  // namespace v8
diff --git a/src/d8-windows.cc b/src/d8-windows.cc
index ba89c41..e7ddca6 100644
--- a/src/d8-windows.cc
+++ b/src/d8-windows.cc
@@ -10,5 +10,12 @@
 
 void Shell::AddOSMethods(Isolate* isolate, Local<ObjectTemplate> os_templ) {}
 
+void Shell::Exit(int exit_code) {
+  // Use TerminateProcess avoid races between isolate threads and
+  // static destructors.
+  fflush(stdout);
+  fflush(stderr);
+  TerminateProcess(GetCurrentProcess(), exit_code);
+}
 
 }  // namespace v8
diff --git a/src/d8.cc b/src/d8.cc
index 01801f8..fd9afee 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -9,7 +9,7 @@
 
 #include <algorithm>
 #include <fstream>
-#include <map>
+#include <unordered_map>
 #include <utility>
 #include <vector>
 
@@ -34,6 +34,10 @@
 #include "src/utils.h"
 #include "src/v8.h"
 
+#ifdef V8_INSPECTOR_ENABLED
+#include "include/v8-inspector.h"
+#endif  // V8_INSPECTOR_ENABLED
+
 #if !defined(_WIN32) && !defined(_WIN64)
 #include <unistd.h>  // NOLINT
 #else
@@ -149,7 +153,6 @@
 
 v8::Platform* g_platform = NULL;
 
-
 static Local<Value> Throw(Isolate* isolate, const char* message) {
   return isolate->ThrowException(
       String::NewFromUtf8(isolate, message, NewStringType::kNormal)
@@ -196,11 +199,9 @@
 const char kRecordAsMuchAsPossible[] = "record-as-much-as-possible";
 
 const char kRecordModeParam[] = "record_mode";
-const char kEnableSamplingParam[] = "enable_sampling";
 const char kEnableSystraceParam[] = "enable_systrace";
 const char kEnableArgumentFilterParam[] = "enable_argument_filter";
 const char kIncludedCategoriesParam[] = "included_categories";
-const char kExcludedCategoriesParam[] = "excluded_categories";
 
 class TraceConfigParser {
  public:
@@ -221,10 +222,6 @@
     trace_config->SetTraceRecordMode(
         GetTraceRecordMode(isolate, context, trace_config_object));
     if (GetBoolean(isolate, context, trace_config_object,
-                   kEnableSamplingParam)) {
-      trace_config->EnableSampling();
-    }
-    if (GetBoolean(isolate, context, trace_config_object,
                    kEnableSystraceParam)) {
       trace_config->EnableSystrace();
     }
@@ -232,10 +229,8 @@
                    kEnableArgumentFilterParam)) {
       trace_config->EnableArgumentFilter();
     }
-    UpdateCategoriesList(isolate, context, trace_config_object,
-                         kIncludedCategoriesParam, trace_config);
-    UpdateCategoriesList(isolate, context, trace_config_object,
-                         kExcludedCategoriesParam, trace_config);
+    UpdateIncludedCategoriesList(isolate, context, trace_config_object,
+                                 trace_config);
   }
 
  private:
@@ -249,10 +244,11 @@
     return false;
   }
 
-  static int UpdateCategoriesList(
+  static int UpdateIncludedCategoriesList(
       v8::Isolate* isolate, Local<Context> context, Local<v8::Object> object,
-      const char* property, platform::tracing::TraceConfig* trace_config) {
-    Local<Value> value = GetValue(isolate, context, object, property);
+      platform::tracing::TraceConfig* trace_config) {
+    Local<Value> value =
+        GetValue(isolate, context, object, kIncludedCategoriesParam);
     if (value->IsArray()) {
       Local<Array> v8_array = Local<Array>::Cast(value);
       for (int i = 0, length = v8_array->Length(); i < length; ++i) {
@@ -261,11 +257,7 @@
                              ->ToString(context)
                              .ToLocalChecked();
         String::Utf8Value str(v->ToString(context).ToLocalChecked());
-        if (kIncludedCategoriesParam == property) {
-          trace_config->AddIncludedCategory(*str);
-        } else {
-          trace_config->AddExcludedCategory(*str);
-        }
+        trace_config->AddIncludedCategory(*str);
       }
       return v8_array->Length();
     }
@@ -553,34 +545,94 @@
   return path.substr(0, last_slash);
 }
 
-std::string EnsureAbsolutePath(const std::string& path,
-                               const std::string& dir_name) {
-  return IsAbsolutePath(path) ? path : dir_name + '/' + path;
+// Resolves path to an absolute path if necessary, and does some
+// normalization (eliding references to the current directory
+// and replacing backslashes with slashes).
+std::string NormalizePath(const std::string& path,
+                          const std::string& dir_name) {
+  std::string result;
+  if (IsAbsolutePath(path)) {
+    result = path;
+  } else {
+    result = dir_name + '/' + path;
+  }
+  std::replace(result.begin(), result.end(), '\\', '/');
+  size_t i;
+  while ((i = result.find("/./")) != std::string::npos) {
+    result.erase(i, 2);
+  }
+  return result;
+}
+
+// Per-context Module data, allowing sharing of module maps
+// across top-level module loads.
+class ModuleEmbedderData {
+ private:
+  class ModuleGlobalHash {
+   public:
+    explicit ModuleGlobalHash(Isolate* isolate) : isolate_(isolate) {}
+    size_t operator()(const Global<Module>& module) const {
+      return module.Get(isolate_)->GetIdentityHash();
+    }
+
+   private:
+    Isolate* isolate_;
+  };
+
+ public:
+  explicit ModuleEmbedderData(Isolate* isolate)
+      : module_to_directory_map(10, ModuleGlobalHash(isolate)) {}
+
+  // Map from normalized module specifier to Module.
+  std::unordered_map<std::string, Global<Module>> specifier_to_module_map;
+  // Map from Module to the directory that Module was loaded from.
+  std::unordered_map<Global<Module>, std::string, ModuleGlobalHash>
+      module_to_directory_map;
+};
+
+enum {
+  // The debugger reserves the first slot in the Context embedder data.
+  kDebugIdIndex = Context::kDebugIdIndex,
+  kModuleEmbedderDataIndex,
+  kInspectorClientIndex
+};
+
+void InitializeModuleEmbedderData(Local<Context> context) {
+  context->SetAlignedPointerInEmbedderData(
+      kModuleEmbedderDataIndex, new ModuleEmbedderData(context->GetIsolate()));
+}
+
+ModuleEmbedderData* GetModuleDataFromContext(Local<Context> context) {
+  return static_cast<ModuleEmbedderData*>(
+      context->GetAlignedPointerFromEmbedderData(kModuleEmbedderDataIndex));
+}
+
+void DisposeModuleEmbedderData(Local<Context> context) {
+  delete GetModuleDataFromContext(context);
+  context->SetAlignedPointerInEmbedderData(kModuleEmbedderDataIndex, nullptr);
 }
 
 MaybeLocal<Module> ResolveModuleCallback(Local<Context> context,
                                          Local<String> specifier,
-                                         Local<Module> referrer,
-                                         Local<Value> data) {
+                                         Local<Module> referrer) {
   Isolate* isolate = context->GetIsolate();
-  auto module_map = static_cast<std::map<std::string, Global<Module>>*>(
-      External::Cast(*data)->Value());
-  Local<String> dir_name = Local<String>::Cast(referrer->GetEmbedderData());
+  ModuleEmbedderData* d = GetModuleDataFromContext(context);
+  auto dir_name_it =
+      d->module_to_directory_map.find(Global<Module>(isolate, referrer));
+  CHECK(dir_name_it != d->module_to_directory_map.end());
   std::string absolute_path =
-      EnsureAbsolutePath(ToSTLString(specifier), ToSTLString(dir_name));
-  auto it = module_map->find(absolute_path);
-  if (it != module_map->end()) {
-    return it->second.Get(isolate);
-  }
-  return MaybeLocal<Module>();
+      NormalizePath(ToSTLString(specifier), dir_name_it->second);
+  auto module_it = d->specifier_to_module_map.find(absolute_path);
+  CHECK(module_it != d->specifier_to_module_map.end());
+  return module_it->second.Get(isolate);
 }
 
 }  // anonymous namespace
 
-MaybeLocal<Module> Shell::FetchModuleTree(
-    Isolate* isolate, const std::string& file_name,
-    std::map<std::string, Global<Module>>* module_map) {
+MaybeLocal<Module> Shell::FetchModuleTree(Local<Context> context,
+                                          const std::string& file_name) {
   DCHECK(IsAbsolutePath(file_name));
+  Isolate* isolate = context->GetIsolate();
   TryCatch try_catch(isolate);
   try_catch.SetVerbose(true);
   Local<String> source_text = ReadFile(isolate, file_name.c_str());
@@ -597,19 +649,22 @@
     ReportException(isolate, &try_catch);
     return MaybeLocal<Module>();
   }
-  module_map->insert(
-      std::make_pair(file_name, Global<Module>(isolate, module)));
+
+  ModuleEmbedderData* d = GetModuleDataFromContext(context);
+  CHECK(d->specifier_to_module_map
+            .insert(std::make_pair(file_name, Global<Module>(isolate, module)))
+            .second);
 
   std::string dir_name = DirName(file_name);
-  module->SetEmbedderData(
-      String::NewFromUtf8(isolate, dir_name.c_str(), NewStringType::kNormal)
-          .ToLocalChecked());
+  CHECK(d->module_to_directory_map
+            .insert(std::make_pair(Global<Module>(isolate, module), dir_name))
+            .second);
 
   for (int i = 0, length = module->GetModuleRequestsLength(); i < length; ++i) {
     Local<String> name = module->GetModuleRequest(i);
-    std::string absolute_path = EnsureAbsolutePath(ToSTLString(name), dir_name);
-    if (!module_map->count(absolute_path)) {
-      if (FetchModuleTree(isolate, absolute_path, module_map).IsEmpty()) {
+    std::string absolute_path = NormalizePath(ToSTLString(name), dir_name);
+    if (!d->specifier_to_module_map.count(absolute_path)) {
+      if (FetchModuleTree(context, absolute_path).IsEmpty()) {
         return MaybeLocal<Module>();
       }
     }
@@ -621,14 +676,14 @@
 bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
   HandleScope handle_scope(isolate);
 
-  std::string absolute_path =
-      EnsureAbsolutePath(file_name, GetWorkingDirectory());
-  std::replace(absolute_path.begin(), absolute_path.end(), '\\', '/');
+  PerIsolateData* data = PerIsolateData::Get(isolate);
+  Local<Context> realm = data->realms_[data->realm_current_].Get(isolate);
+  Context::Scope context_scope(realm);
+
+  std::string absolute_path = NormalizePath(file_name, GetWorkingDirectory());
 
   Local<Module> root_module;
-  std::map<std::string, Global<Module>> module_map;
-  if (!FetchModuleTree(isolate, absolute_path, &module_map)
-           .ToLocal(&root_module)) {
+  if (!FetchModuleTree(realm, absolute_path).ToLocal(&root_module)) {
     return false;
   }
 
@@ -636,16 +691,9 @@
   try_catch.SetVerbose(true);
 
   MaybeLocal<Value> maybe_result;
-  {
-    PerIsolateData* data = PerIsolateData::Get(isolate);
-    Local<Context> realm = data->realms_[data->realm_current_].Get(isolate);
-    Context::Scope context_scope(realm);
-
-    if (root_module->Instantiate(realm, ResolveModuleCallback,
-                                 External::New(isolate, &module_map))) {
-      maybe_result = root_module->Evaluate(realm);
-      EmptyMessageQueues(isolate);
-    }
+  if (root_module->Instantiate(realm, ResolveModuleCallback)) {
+    maybe_result = root_module->Evaluate(realm);
+    EmptyMessageQueues(isolate);
   }
   Local<Value> result;
   if (!maybe_result.ToLocal(&result)) {
@@ -670,9 +718,15 @@
 
 PerIsolateData::RealmScope::~RealmScope() {
   // Drop realms to avoid keeping them alive.
-  for (int i = 0; i < data_->realm_count_; ++i)
-    data_->realms_[i].Reset();
+  for (int i = 0; i < data_->realm_count_; ++i) {
+    Global<Context>& realm = data_->realms_[i];
+    if (realm.IsEmpty()) continue;
+    DisposeModuleEmbedderData(realm.Get(data_->isolate_));
+    // TODO(adamk): No need to reset manually, Globals reset when destructed.
+    realm.Reset();
+  }
   delete[] data_->realms_;
+  // TODO(adamk): No need to reset manually, Globals reset when destructed.
   if (!data_->realm_shared_.IsEmpty())
     data_->realm_shared_.Reset();
 }
@@ -775,6 +829,7 @@
     try_catch.ReThrow();
     return MaybeLocal<Context>();
   }
+  InitializeModuleEmbedderData(context);
   data->realms_[index].Reset(isolate, context);
   args.GetReturnValue().Set(index);
   return context;
@@ -808,6 +863,7 @@
     Throw(args.GetIsolate(), "Invalid realm index");
     return;
   }
+  DisposeModuleEmbedderData(data->realms_[index].Get(isolate));
   data->realms_[index].Reset();
   isolate->ContextDisposedNotification();
   isolate->IdleNotificationDeadline(g_platform->MonotonicallyIncreasingTime());
@@ -870,19 +926,11 @@
   data->realm_shared_.Reset(isolate, value);
 }
 
-
-void Shell::Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  Write(args);
-  printf("\n");
-  fflush(stdout);
-}
-
-
-void Shell::Write(const v8::FunctionCallbackInfo<v8::Value>& args) {
+void WriteToFile(FILE* file, const v8::FunctionCallbackInfo<v8::Value>& args) {
   for (int i = 0; i < args.Length(); i++) {
     HandleScope handle_scope(args.GetIsolate());
     if (i != 0) {
-      printf(" ");
+      fprintf(file, " ");
     }
 
     // Explicitly catch potential exceptions in toString().
@@ -900,14 +948,32 @@
     }
 
     v8::String::Utf8Value str(str_obj);
-    int n = static_cast<int>(fwrite(*str, sizeof(**str), str.length(), stdout));
+    int n = static_cast<int>(fwrite(*str, sizeof(**str), str.length(), file));
     if (n != str.length()) {
       printf("Error in fwrite\n");
-      Exit(1);
+      Shell::Exit(1);
     }
   }
 }
 
+void WriteAndFlush(FILE* file,
+                   const v8::FunctionCallbackInfo<v8::Value>& args) {
+  WriteToFile(file, args);
+  fprintf(file, "\n");
+  fflush(file);
+}
+
+void Shell::Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  WriteAndFlush(stdout, args);
+}
+
+void Shell::PrintErr(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  WriteAndFlush(stderr, args);
+}
+
+void Shell::Write(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  WriteToFile(stdout, args);
+}
 
 void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
   String::Utf8Value file(args[0]);
@@ -1324,6 +1390,10 @@
           .ToLocalChecked(),
       FunctionTemplate::New(isolate, Print));
   global_template->Set(
+      String::NewFromUtf8(isolate, "printErr", NewStringType::kNormal)
+          .ToLocalChecked(),
+      FunctionTemplate::New(isolate, PrintErr));
+  global_template->Set(
       String::NewFromUtf8(isolate, "write", NewStringType::kNormal)
           .ToLocalChecked(),
       FunctionTemplate::New(isolate, Write));
@@ -1476,6 +1546,7 @@
   EscapableHandleScope handle_scope(isolate);
   Local<Context> context = Context::New(isolate, NULL, global_template);
   DCHECK(!context.IsEmpty());
+  InitializeModuleEmbedderData(context);
   Context::Scope scope(context);
 
   i::Factory* factory = reinterpret_cast<i::Isolate*>(isolate)->factory();
@@ -1498,16 +1569,6 @@
   return handle_scope.Escape(context);
 }
 
-
-void Shell::Exit(int exit_code) {
-  // Use _exit instead of exit to avoid races between isolate
-  // threads and static destructors.
-  fflush(stdout);
-  fflush(stderr);
-  _exit(exit_code);
-}
-
-
 struct CounterAndKey {
   Counter* counter;
   const char* key;
@@ -1700,6 +1761,128 @@
   printf("\n");
 }
 
+#ifdef V8_INSPECTOR_ENABLED
+class InspectorFrontend final : public v8_inspector::V8Inspector::Channel {
+ public:
+  explicit InspectorFrontend(Local<Context> context) {
+    isolate_ = context->GetIsolate();
+    context_.Reset(isolate_, context);
+  }
+  virtual ~InspectorFrontend() = default;
+
+ private:
+  void sendProtocolResponse(int callId,
+                            const v8_inspector::StringView& message) override {
+    Send(message);
+  }
+  void sendProtocolNotification(
+      const v8_inspector::StringView& message) override {
+    Send(message);
+  }
+  void flushProtocolNotifications() override {}
+
+  void Send(const v8_inspector::StringView& string) {
+    int length = static_cast<int>(string.length());
+    DCHECK(length < v8::String::kMaxLength);
+    Local<String> message =
+        (string.is8Bit()
+             ? v8::String::NewFromOneByte(
+                   isolate_,
+                   reinterpret_cast<const uint8_t*>(string.characters8()),
+                   v8::NewStringType::kNormal, length)
+             : v8::String::NewFromTwoByte(
+                   isolate_,
+                   reinterpret_cast<const uint16_t*>(string.characters16()),
+                   v8::NewStringType::kNormal, length))
+            .ToLocalChecked();
+    Local<String> callback_name =
+        v8::String::NewFromUtf8(isolate_, "receive", v8::NewStringType::kNormal)
+            .ToLocalChecked();
+    Local<Context> context = context_.Get(isolate_);
+    Local<Value> callback =
+        context->Global()->Get(context, callback_name).ToLocalChecked();
+    if (callback->IsFunction()) {
+      v8::TryCatch try_catch(isolate_);
+      Local<Value> args[] = {message};
+      MaybeLocal<Value> result = Local<Function>::Cast(callback)->Call(
+          context, Undefined(isolate_), 1, args);
+      CHECK(!result.IsEmpty());  // Listeners may not throw.
+    }
+  }
+
+  Isolate* isolate_;
+  Global<Context> context_;
+};
+
+class InspectorClient : public v8_inspector::V8InspectorClient {
+ public:
+  InspectorClient(Local<Context> context, bool connect) {
+    if (!connect) return;
+    isolate_ = context->GetIsolate();
+    channel_.reset(new InspectorFrontend(context));
+    inspector_ = v8_inspector::V8Inspector::create(isolate_, this);
+    session_ =
+        inspector_->connect(1, channel_.get(), v8_inspector::StringView());
+    context->SetAlignedPointerInEmbedderData(kInspectorClientIndex, this);
+    inspector_->contextCreated(v8_inspector::V8ContextInfo(
+        context, kContextGroupId, v8_inspector::StringView()));
+
+    Local<Value> function =
+        FunctionTemplate::New(isolate_, SendInspectorMessage)
+            ->GetFunction(context)
+            .ToLocalChecked();
+    Local<String> function_name =
+        String::NewFromUtf8(isolate_, "send", NewStringType::kNormal)
+            .ToLocalChecked();
+    CHECK(context->Global()->Set(context, function_name, function).FromJust());
+
+    context_.Reset(isolate_, context);
+  }
+
+ private:
+  static v8_inspector::V8InspectorSession* GetSession(Local<Context> context) {
+    InspectorClient* inspector_client = static_cast<InspectorClient*>(
+        context->GetAlignedPointerFromEmbedderData(kInspectorClientIndex));
+    return inspector_client->session_.get();
+  }
+
+  Local<Context> ensureDefaultContextInGroup(int group_id) override {
+    DCHECK(isolate_);
+    DCHECK_EQ(kContextGroupId, group_id);
+    return context_.Get(isolate_);
+  }
+
+  static void SendInspectorMessage(
+      const v8::FunctionCallbackInfo<v8::Value>& args) {
+    Isolate* isolate = args.GetIsolate();
+    v8::HandleScope handle_scope(isolate);
+    Local<Context> context = isolate->GetCurrentContext();
+    args.GetReturnValue().Set(Undefined(isolate));
+    Local<String> message = args[0]->ToString(context).ToLocalChecked();
+    v8_inspector::V8InspectorSession* session =
+        InspectorClient::GetSession(context);
+    int length = message->Length();
+    std::unique_ptr<uint16_t[]> buffer(new uint16_t[length]);
+    message->Write(buffer.get(), 0, length);
+    v8_inspector::StringView message_view(buffer.get(), length);
+    session->dispatchProtocolMessage(message_view);
+    args.GetReturnValue().Set(True(isolate));
+  }
+
+  static const int kContextGroupId = 1;
+
+  std::unique_ptr<v8_inspector::V8Inspector> inspector_;
+  std::unique_ptr<v8_inspector::V8InspectorSession> session_;
+  std::unique_ptr<v8_inspector::V8Inspector::Channel> channel_;
+  Global<Context> context_;
+  Isolate* isolate_;
+};
+#else   // V8_INSPECTOR_ENABLED
+class InspectorClient {
+ public:
+  InspectorClient(Local<Context> context, bool connect) { CHECK(!connect); }
+};
+#endif  // V8_INSPECTOR_ENABLED
 
 SourceGroup::~SourceGroup() {
   delete thread_;
@@ -1783,7 +1966,6 @@
   return base::Thread::Options("IsolateThread", 2 * MB);
 }
 
-
 void SourceGroup::ExecuteInThread() {
   Isolate::CreateParams create_params;
   create_params.array_buffer_allocator = Shell::array_buffer_allocator;
@@ -1798,9 +1980,12 @@
         Local<Context> context = Shell::CreateEvaluationContext(isolate);
         {
           Context::Scope cscope(context);
+          InspectorClient inspector_client(context,
+                                           Shell::options.enable_inspector);
           PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
           Execute(isolate);
         }
+        DisposeModuleEmbedderData(context);
       }
       Shell::CollectGarbage(isolate);
     }
@@ -2060,6 +2245,7 @@
           }
         }
       }
+      DisposeModuleEmbedderData(context);
     }
     Shell::CollectGarbage(isolate);
   }
@@ -2191,6 +2377,9 @@
     } else if (strncmp(argv[i], "--trace-config=", 15) == 0) {
       options.trace_config = argv[i] + 15;
       argv[i] = NULL;
+    } else if (strcmp(argv[i], "--enable-inspector") == 0) {
+      options.enable_inspector = true;
+      argv[i] = NULL;
     }
   }
 
@@ -2240,9 +2429,11 @@
     }
     {
       Context::Scope cscope(context);
+      InspectorClient inspector_client(context, options.enable_inspector);
       PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
       options.isolate_sources[0].Execute(isolate);
     }
+    DisposeModuleEmbedderData(context);
   }
   CollectGarbage(isolate);
   for (int i = 1; i < options.num_isolates; ++i) {
@@ -2620,6 +2811,20 @@
                    ? new PredictablePlatform()
                    : v8::platform::CreateDefaultPlatform();
 
+  platform::tracing::TracingController* tracing_controller;
+  if (options.trace_enabled) {
+    trace_file.open("v8_trace.json");
+    tracing_controller = new platform::tracing::TracingController();
+    platform::tracing::TraceBuffer* trace_buffer =
+        platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer(
+            platform::tracing::TraceBuffer::kRingBufferChunks,
+            platform::tracing::TraceWriter::CreateJSONTraceWriter(trace_file));
+    tracing_controller->Initialize(trace_buffer);
+    if (!i::FLAG_verify_predictable) {
+      platform::SetTracingController(g_platform, tracing_controller);
+    }
+  }
+
   v8::V8::InitializePlatform(g_platform);
   v8::V8::Initialize();
   if (options.natives_blob || options.snapshot_blob) {
@@ -2649,11 +2854,12 @@
       base::SysInfo::AmountOfVirtualMemory());
 
   Shell::counter_map_ = new CounterMap();
-  if (i::FLAG_dump_counters || i::FLAG_track_gc_object_stats) {
+  if (i::FLAG_dump_counters || i::FLAG_gc_stats) {
     create_params.counter_lookup_callback = LookupCounter;
     create_params.create_histogram_callback = CreateHistogram;
     create_params.add_histogram_sample_callback = AddHistogramSample;
   }
+
   Isolate* isolate = Isolate::New(create_params);
   {
     Isolate::Scope scope(isolate);
@@ -2661,14 +2867,6 @@
     PerIsolateData data(isolate);
 
     if (options.trace_enabled) {
-      trace_file.open("v8_trace.json");
-      platform::tracing::TracingController* tracing_controller =
-          new platform::tracing::TracingController();
-      platform::tracing::TraceBuffer* trace_buffer =
-          platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer(
-              platform::tracing::TraceBuffer::kRingBufferChunks,
-              platform::tracing::TraceWriter::CreateJSONTraceWriter(
-                  trace_file));
       platform::tracing::TraceConfig* trace_config;
       if (options.trace_config) {
         int size = 0;
@@ -2681,11 +2879,7 @@
         trace_config =
             platform::tracing::TraceConfig::CreateDefaultTraceConfig();
       }
-      tracing_controller->Initialize(trace_buffer);
       tracing_controller->StartTracing(trace_config);
-      if (!i::FLAG_verify_predictable) {
-        platform::SetTracingController(g_platform, tracing_controller);
-      }
     }
 
     if (options.dump_heap_constants) {
@@ -2726,7 +2920,7 @@
       RunShell(isolate);
     }
 
-    if (i::FLAG_ignition && i::FLAG_trace_ignition_dispatches &&
+    if (i::FLAG_trace_ignition_dispatches &&
         i::FLAG_trace_ignition_dispatches_output_file != nullptr) {
       WriteIgnitionDispatchCountersFile(isolate);
     }
@@ -2747,6 +2941,9 @@
   V8::Dispose();
   V8::ShutdownPlatform();
   delete g_platform;
+  if (i::FLAG_verify_predictable) {
+    delete tracing_controller;
+  }
 
   return result;
 }
diff --git a/src/d8.gyp b/src/d8.gyp
index e0270f5..f6ceeaa 100644
--- a/src/d8.gyp
+++ b/src/d8.gyp
@@ -39,6 +39,7 @@
       'type': 'executable',
       'dependencies': [
         'v8.gyp:v8',
+        'v8.gyp:v8_libbase',
         'v8.gyp:v8_libplatform',
       ],
       # Generated source files need this explicitly:
diff --git a/src/d8.h b/src/d8.h
index 32a7d25..5e7abaf 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -5,7 +5,6 @@
 #ifndef V8_D8_H_
 #define V8_D8_H_
 
-#include <map>
 #include <string>
 
 #include "src/allocation.h"
@@ -275,6 +274,7 @@
         dump_heap_constants(false),
         expected_to_throw(false),
         mock_arraybuffer_allocator(false),
+        enable_inspector(false),
         num_isolates(1),
         compile_options(v8::ScriptCompiler::kNoCompileOptions),
         isolate_sources(NULL),
@@ -304,6 +304,7 @@
   bool dump_heap_constants;
   bool expected_to_throw;
   bool mock_arraybuffer_allocator;
+  bool enable_inspector;
   int num_isolates;
   v8::ScriptCompiler::CompileOptions compile_options;
   SourceGroup* isolate_sources;
@@ -371,6 +372,7 @@
                              const  PropertyCallbackInfo<void>& info);
 
   static void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
+  static void PrintErr(const v8::FunctionCallbackInfo<v8::Value>& args);
   static void Write(const v8::FunctionCallbackInfo<v8::Value>& args);
   static void QuitOnce(v8::FunctionCallbackInfo<v8::Value>* args);
   static void Quit(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -455,9 +457,8 @@
   static Local<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
   static MaybeLocal<Context> CreateRealm(
       const v8::FunctionCallbackInfo<v8::Value>& args);
-  static MaybeLocal<Module> FetchModuleTree(
-      Isolate* isolate, const std::string& file_name,
-      std::map<std::string, Global<Module>>* module_map);
+  static MaybeLocal<Module> FetchModuleTree(v8::Local<v8::Context> context,
+                                            const std::string& file_name);
 };
 
 
diff --git a/src/date.cc b/src/date.cc
index f98ad64..cc76033 100644
--- a/src/date.cc
+++ b/src/date.cc
@@ -25,7 +25,7 @@
 void DateCache::ResetDateCache() {
   static const int kMaxStamp = Smi::kMaxValue;
   if (stamp_->value() >= kMaxStamp) {
-    stamp_ = Smi::FromInt(0);
+    stamp_ = Smi::kZero;
   } else {
     stamp_ = Smi::FromInt(stamp_->value() + 1);
   }
diff --git a/src/debug/debug-frames.cc b/src/debug/debug-frames.cc
index c98f911..5da1656 100644
--- a/src/debug/debug-frames.cc
+++ b/src/debug/debug-frames.cc
@@ -77,23 +77,11 @@
                        : handle(frame_->GetExpression(index), isolate_);
 }
 
-
 int FrameInspector::GetSourcePosition() {
-  if (is_optimized_) return deoptimized_frame_->GetSourcePosition();
-  AbstractCode* code;
-  int code_offset;
-  if (is_interpreted_) {
-    InterpretedFrame* frame = reinterpret_cast<InterpretedFrame*>(frame_);
-    code = AbstractCode::cast(frame->GetBytecodeArray());
-    code_offset = frame->GetBytecodeOffset();
-  } else {
-    code = AbstractCode::cast(frame_->LookupCode());
-    code_offset = static_cast<int>(frame_->pc() - code->instruction_start());
-  }
-  return code->SourcePosition(code_offset);
+  return is_optimized_ ? deoptimized_frame_->GetSourcePosition()
+                       : frame_->position();
 }
 
-
 bool FrameInspector::IsConstructor() {
   return is_optimized_ && !is_bottommost_
              ? deoptimized_frame_->HasConstructStub()
diff --git a/src/debug/debug-interface.h b/src/debug/debug-interface.h
new file mode 100644
index 0000000..443ed42
--- /dev/null
+++ b/src/debug/debug-interface.h
@@ -0,0 +1,209 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_DEBUG_INTERFACE_H_
+#define V8_DEBUG_DEBUG_INTERFACE_H_
+
+#include "include/v8-debug.h"
+#include "include/v8-util.h"
+#include "include/v8.h"
+
+namespace v8 {
+
+class DebugInterface {
+ public:
+  /**
+   * An event details object passed to the debug event listener.
+   */
+  class EventDetails : public v8::Debug::EventDetails {
+   public:
+    /**
+     * Event type.
+     */
+    virtual v8::DebugEvent GetEvent() const = 0;
+
+    /**
+     * Access to execution state and event data of the debug event. Don't store
+     * these cross callbacks as their content becomes invalid.
+     */
+    virtual Local<Object> GetExecutionState() const = 0;
+    virtual Local<Object> GetEventData() const = 0;
+
+    /**
+     * Get the context active when the debug event happened. Note this is not
+     * the current active context as the JavaScript part of the debugger is
+     * running in its own context which is entered at this point.
+     */
+    virtual Local<Context> GetEventContext() const = 0;
+
+    /**
+     * Client data passed with the corresponding callback when it was
+     * registered.
+     */
+    virtual Local<Value> GetCallbackData() const = 0;
+
+    virtual ~EventDetails() {}
+  };
+
+  /**
+   * Debug event callback function.
+   *
+   * \param event_details object providing information about the debug event
+   *
+   * A EventCallback does not take possession of the event data,
+   * and must not rely on the data persisting after the handler returns.
+   */
+  typedef void (*EventCallback)(const EventDetails& event_details);
+
+  static bool SetDebugEventListener(Isolate* isolate, EventCallback that,
+                                    Local<Value> data = Local<Value>());
+
+  /**
+   * Debugger is running in its own context which is entered while debugger
+   * messages are being dispatched. This is an explicit getter for this
+   * debugger context. Note that the content of the debugger context is subject
+   * to change. The Context exists only when the debugger is active, i.e. at
+   * least one DebugEventListener or MessageHandler is set.
+   */
+  static Local<Context> GetDebugContext(Isolate* isolate);
+
+  /**
+   * Run a JavaScript function in the debugger.
+   * \param fun the function to call
+   * \param data passed as second argument to the function
+   * With this call the debugger is entered and the function specified is called
+   * with the execution state as the first argument. This makes it possible to
+   * get access to information otherwise not available during normal JavaScript
+   * execution e.g. details on stack frames. Receiver of the function call will
+   * be the debugger context global object, however this is a subject to change.
+   * The following example shows a JavaScript function which when passed to
+   * v8::Debug::Call will return the current line of JavaScript execution.
+   *
+   * \code
+   *   function frame_source_line(exec_state) {
+   *     return exec_state.frame(0).sourceLine();
+   *   }
+   * \endcode
+   */
+  // TODO(dcarney): data arg should be a MaybeLocal
+  static MaybeLocal<Value> Call(Local<Context> context,
+                                v8::Local<v8::Function> fun,
+                                Local<Value> data = Local<Value>());
+
+  /**
+   * Enable/disable LiveEdit functionality for the given Isolate
+   * (default Isolate if not provided). V8 will abort if LiveEdit is
+   * unexpectedly used. LiveEdit is enabled by default.
+   */
+  static void SetLiveEditEnabled(Isolate* isolate, bool enable);
+
+  // Schedule a debugger break to happen when JavaScript code is run
+  // in the given isolate.
+  static void DebugBreak(Isolate* isolate);
+
+  // Remove scheduled debugger break in given isolate if it has not
+  // happened yet.
+  static void CancelDebugBreak(Isolate* isolate);
+
+  /**
+   * Returns array of internal properties specific to the value type. Result has
+   * the following format: [<name>, <value>,...,<name>, <value>]. Result array
+   * will be allocated in the current context.
+   */
+  static MaybeLocal<Array> GetInternalProperties(Isolate* isolate,
+                                                 Local<Value> value);
+
+  enum ExceptionBreakState {
+    NoBreakOnException = 0,
+    BreakOnUncaughtException = 1,
+    BreakOnAnyException = 2
+  };
+
+  /**
+   * Defines if VM will pause on exceptions or not.
+   * If BreakOnAnyExceptions is set then VM will pause on caught and uncaught
+   * exception, if BreakOnUncaughtException is set then VM will pause only on
+   * uncaught exception, otherwise VM won't stop on any exception.
+   */
+  static void ChangeBreakOnException(Isolate* isolate,
+                                     ExceptionBreakState state);
+
+  enum StepAction {
+    StepOut = 0,   // Step out of the current function.
+    StepNext = 1,  // Step to the next statement in the current function.
+    StepIn = 2,    // Step into new functions invoked or the next statement
+                   // in the current function.
+    StepFrame = 3  // Step into a new frame or return to previous frame.
+  };
+
+  static void PrepareStep(Isolate* isolate, StepAction action);
+  static void ClearStepping(Isolate* isolate);
+
+  /**
+   * Defines location inside script.
+   * Lines and columns are 0-based.
+   */
+  class Location {
+   public:
+    Location(int lineNumber, int columnNumber);
+    /**
+     * Create empty location.
+     */
+    Location();
+
+    int GetLineNumber() const;
+    int GetColumnNumber() const;
+    bool IsEmpty() const;
+
+   private:
+    int lineNumber_;
+    int columnNumber_;
+  };
+
+  /**
+   * Native wrapper around v8::internal::Script object.
+   */
+  class Script {
+   public:
+    v8::Isolate* GetIsolate() const;
+
+    ScriptOriginOptions OriginOptions() const;
+    bool WasCompiled() const;
+    int Id() const;
+    int LineOffset() const;
+    int ColumnOffset() const;
+    std::vector<int> LineEnds() const;
+    MaybeLocal<String> Name() const;
+    MaybeLocal<String> SourceURL() const;
+    MaybeLocal<String> SourceMappingURL() const;
+    MaybeLocal<String> ContextData() const;
+    MaybeLocal<String> Source() const;
+    bool GetPossibleBreakpoints(const Location& start, const Location& end,
+                                std::vector<Location>* locations) const;
+
+    /**
+     * script parameter is a wrapper v8::internal::JSObject for
+     * v8::internal::Script.
+     * This function gets v8::internal::Script from v8::internal::JSObject and
+     * wraps it with DebugInterface::Script.
+     * Returns empty local if not called with a valid wrapper of
+     * v8::internal::Script.
+     */
+    static MaybeLocal<Script> Wrap(Isolate* isolate,
+                                   v8::Local<v8::Object> script);
+
+   private:
+    int GetSourcePosition(const Location& location) const;
+  };
+
+  /**
+   * Return array of compiled scripts.
+   */
+  static void GetLoadedScripts(Isolate* isolate,
+                               PersistentValueVector<Script>& scripts);
+};
+
+}  // namespace v8
+
+#endif  // V8_DEBUG_DEBUG_INTERFACE_H_
diff --git a/src/debug/debug-scopes.cc b/src/debug/debug-scopes.cc
index c7eb0f7..c84d32a 100644
--- a/src/debug/debug-scopes.cc
+++ b/src/debug/debug-scopes.cc
@@ -87,17 +87,13 @@
 
   // Reparse the code and analyze the scopes.
   // Check whether we are in global, eval or function code.
-  Zone zone(isolate->allocator());
+  Zone zone(isolate->allocator(), ZONE_NAME);
   std::unique_ptr<ParseInfo> info;
   if (scope_info->scope_type() != FUNCTION_SCOPE) {
     // Global or eval code.
     Handle<Script> script(Script::cast(shared_info->script()));
     info.reset(new ParseInfo(&zone, script));
-    info->set_toplevel();
-    if (scope_info->scope_type() == SCRIPT_SCOPE) {
-      info->set_global();
-    } else {
-      DCHECK(scope_info->scope_type() == EVAL_SCOPE);
+    if (scope_info->scope_type() == EVAL_SCOPE) {
       info->set_eval();
       if (!function->context()->IsNativeContext()) {
         info->set_outer_scope_info(handle(function->context()->scope_info()));
@@ -105,10 +101,14 @@
       // Language mode may be inherited from the eval caller.
       // Retrieve it from shared function info.
       info->set_language_mode(shared_info->language_mode());
+    } else if (scope_info->scope_type() == MODULE_SCOPE) {
+      info->set_module();
+    } else {
+      DCHECK(scope_info->scope_type() == SCRIPT_SCOPE);
     }
   } else {
     // Inner function.
-    info.reset(new ParseInfo(&zone, function));
+    info.reset(new ParseInfo(&zone, shared_info));
   }
   if (Parser::ParseStatic(info.get()) && Rewriter::Rewrite(info.get())) {
     DeclarationScope* scope = info->literal()->scope();
@@ -610,17 +610,10 @@
   Handle<Context> context = CurrentContext();
   DCHECK(context->IsModuleContext());
   Handle<ScopeInfo> scope_info(context->scope_info());
-
-  // Allocate and initialize a JSObject with all the members of the debugged
-  // module.
   Handle<JSObject> module_scope =
       isolate_->factory()->NewJSObjectWithNullProto();
-
-  // Fill all context locals.
   CopyContextLocalsToScopeObject(scope_info, context, module_scope);
-
-  // TODO(neis): Also collect stack locals as well as imports and exports.
-
+  CopyModuleVarsToScopeObject(scope_info, context, module_scope);
   return module_scope;
 }
 
@@ -791,6 +784,36 @@
   }
 }
 
+void ScopeIterator::CopyModuleVarsToScopeObject(Handle<ScopeInfo> scope_info,
+                                                Handle<Context> context,
+                                                Handle<JSObject> scope_object) {
+  Isolate* isolate = scope_info->GetIsolate();
+
+  int module_variable_count =
+      Smi::cast(scope_info->get(scope_info->ModuleVariableCountIndex()))
+          ->value();
+  for (int i = 0; i < module_variable_count; ++i) {
+    Handle<String> local_name;
+    Handle<Object> value;
+    {
+      String* name;
+      int index;
+      scope_info->ModuleVariable(i, &name, &index);
+      CHECK(!ScopeInfo::VariableIsSynthetic(name));
+      local_name = handle(name, isolate);
+      value = Module::LoadVariable(handle(context->module(), isolate), index);
+    }
+
+    // Reflect variables under TDZ as undefined in scope object.
+    if (value->IsTheHole(isolate)) continue;
+    // This should always succeed.
+    // TODO(verwaest): Use AddDataProperty instead.
+    JSObject::SetOwnPropertyIgnoreAttributes(scope_object, local_name, value,
+                                             NONE)
+        .Check();
+  }
+}
+
 void ScopeIterator::CopyContextExtensionToScopeObject(
     Handle<Context> context, Handle<JSObject> scope_object,
     KeyCollectionMode mode) {
diff --git a/src/debug/debug-scopes.h b/src/debug/debug-scopes.h
index 026a1da..87c85b8 100644
--- a/src/debug/debug-scopes.h
+++ b/src/debug/debug-scopes.h
@@ -153,6 +153,9 @@
   void CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
                                       Handle<Context> context,
                                       Handle<JSObject> scope_object);
+  void CopyModuleVarsToScopeObject(Handle<ScopeInfo> scope_info,
+                                   Handle<Context> context,
+                                   Handle<JSObject> scope_object);
   void CopyContextExtensionToScopeObject(Handle<Context> context,
                                          Handle<JSObject> scope_object,
                                          KeyCollectionMode mode);
diff --git a/src/debug/debug.cc b/src/debug/debug.cc
index 5323c13..960327b 100644
--- a/src/debug/debug.cc
+++ b/src/debug/debug.cc
@@ -27,7 +27,6 @@
 #include "src/log.h"
 #include "src/messages.h"
 #include "src/snapshot/natives.h"
-#include "src/wasm/wasm-debug.h"
 #include "src/wasm/wasm-module.h"
 
 #include "include/v8-debug.h"
@@ -212,7 +211,7 @@
   int offset = code_offset();
   while (!source_position_iterator_.done() &&
          source_position_iterator_.code_offset() <= offset) {
-    position_ = source_position_iterator_.source_position();
+    position_ = source_position_iterator_.source_position().ScriptOffset();
     if (source_position_iterator_.is_statement()) {
       statement_position_ = position_;
     }
@@ -296,7 +295,7 @@
     if (!first) source_position_iterator_.Advance();
     first = false;
     if (Done()) return;
-    position_ = source_position_iterator_.source_position();
+    position_ = source_position_iterator_.source_position().ScriptOffset();
     if (source_position_iterator_.is_statement()) {
       statement_position_ = position_;
     }
@@ -936,7 +935,7 @@
     it.Advance();
   }
 
-  if (last_step_action() == StepNext) {
+  if (last_step_action() == StepNext || last_step_action() == StepOut) {
     while (!it.done()) {
       Address current_fp = it.frame()->UnpaddedFP();
       if (current_fp >= thread_local_.target_fp_) break;
@@ -1264,7 +1263,8 @@
   DCHECK(shared->is_compiled());
 
   if (isolate_->concurrent_recompilation_enabled()) {
-    isolate_->optimizing_compile_dispatcher()->Flush();
+    isolate_->optimizing_compile_dispatcher()->Flush(
+        OptimizingCompileDispatcher::BlockingBehavior::kBlock);
   }
 
   List<Handle<JSFunction> > functions;
@@ -1329,8 +1329,7 @@
 
   // We do not need to recompile to debug bytecode.
   if (baseline_exists && !shared->code()->has_debug_break_slots()) {
-    DCHECK(functions.length() > 0);
-    if (!Compiler::CompileDebugCode(functions.first())) return false;
+    if (!Compiler::CompileDebugCode(shared)) return false;
   }
 
   for (Handle<JSFunction> const function : functions) {
@@ -1352,6 +1351,87 @@
   return true;
 }
 
+namespace {
+template <typename Iterator>
+void GetBreakablePositions(Iterator* it, int start_position, int end_position,
+                           BreakPositionAlignment alignment,
+                           std::set<int>* positions) {
+  it->SkipToPosition(start_position, alignment);
+  while (!it->Done() && it->position() < end_position &&
+         it->position() >= start_position) {
+    positions->insert(alignment == STATEMENT_ALIGNED ? it->statement_position()
+                                                     : it->position());
+    it->Next();
+  }
+}
+
+void FindBreakablePositions(Handle<DebugInfo> debug_info, int start_position,
+                            int end_position, BreakPositionAlignment alignment,
+                            std::set<int>* positions) {
+  if (debug_info->HasDebugCode()) {
+    CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+    GetBreakablePositions(&it, start_position, end_position, alignment,
+                          positions);
+  } else {
+    DCHECK(debug_info->HasDebugBytecodeArray());
+    BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+    GetBreakablePositions(&it, start_position, end_position, alignment,
+                          positions);
+  }
+}
+}  // namespace
+
+bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
+                                   int end_position, std::set<int>* positions) {
+  while (true) {
+    if (!script->shared_function_infos()->IsWeakFixedArray()) return false;
+
+    WeakFixedArray* infos =
+        WeakFixedArray::cast(script->shared_function_infos());
+    HandleScope scope(isolate_);
+    List<Handle<SharedFunctionInfo>> candidates;
+    {
+      WeakFixedArray::Iterator iterator(infos);
+      SharedFunctionInfo* info;
+      while ((info = iterator.Next<SharedFunctionInfo>())) {
+        if (info->end_position() < start_position ||
+            info->start_position() >= end_position) {
+          continue;
+        }
+        if (!info->IsSubjectToDebugging()) continue;
+        if (!info->HasDebugCode() && !info->allows_lazy_compilation()) continue;
+        candidates.Add(i::handle(info));
+      }
+    }
+
+    bool was_compiled = false;
+    for (int i = 0; i < candidates.length(); ++i) {
+      // Code that cannot be compiled lazily are internal and not debuggable.
+      DCHECK(candidates[i]->allows_lazy_compilation());
+      if (!candidates[i]->HasDebugCode()) {
+        if (!Compiler::CompileDebugCode(candidates[i])) {
+          return false;
+        } else {
+          was_compiled = true;
+        }
+      }
+      if (!EnsureDebugInfo(candidates[i], Handle<JSFunction>::null()))
+        return false;
+    }
+    if (was_compiled) continue;
+
+    for (int i = 0; i < candidates.length(); ++i) {
+      CHECK(candidates[i]->HasDebugInfo());
+      Handle<DebugInfo> debug_info(candidates[i]->GetDebugInfo());
+      FindBreakablePositions(debug_info, start_position, end_position,
+                             STATEMENT_ALIGNED, positions);
+    }
+    return true;
+  }
+  UNREACHABLE();
+  return false;
+}
+
 void Debug::RecordAsyncFunction(Handle<JSGeneratorObject> generator_object) {
   if (last_step_action() <= StepOut) return;
   if (!IsAsyncFunction(generator_object->function()->shared()->kind())) return;
@@ -1450,44 +1530,11 @@
         return shared_handle;
       }
     }
-    // If not, compile to reveal inner functions, if possible.
-    if (shared->allows_lazy_compilation_without_context()) {
-      HandleScope scope(isolate_);
-      if (!Compiler::CompileDebugCode(handle(shared))) break;
-      continue;
-    }
-
-    // If not possible, comb the heap for the best suitable compile target.
-    JSFunction* closure;
-    {
-      HeapIterator it(isolate_->heap());
-      SharedFunctionInfoFinder finder(position);
-      while (HeapObject* object = it.next()) {
-        JSFunction* candidate_closure = NULL;
-        SharedFunctionInfo* candidate = NULL;
-        if (object->IsJSFunction()) {
-          candidate_closure = JSFunction::cast(object);
-          candidate = candidate_closure->shared();
-        } else if (object->IsSharedFunctionInfo()) {
-          candidate = SharedFunctionInfo::cast(object);
-          if (!candidate->allows_lazy_compilation_without_context()) continue;
-        } else {
-          continue;
-        }
-        if (candidate->script() == *script) {
-          finder.NewCandidate(candidate, candidate_closure);
-        }
-      }
-      closure = finder.ResultClosure();
-      shared = finder.Result();
-    }
-    if (shared == NULL) break;
+    // If not, compile to reveal inner functions.
     HandleScope scope(isolate_);
-    if (closure == NULL) {
-      if (!Compiler::CompileDebugCode(handle(shared))) break;
-    } else {
-      if (!Compiler::CompileDebugCode(handle(closure))) break;
-    }
+    // Code that cannot be compiled lazily are internal and not debuggable.
+    DCHECK(shared->allows_lazy_compilation());
+    if (!Compiler::CompileDebugCode(handle(shared))) break;
   }
   return isolate_->factory()->undefined_value();
 }
@@ -1658,10 +1705,12 @@
   return CallFunction("MakeCompileEvent", arraysize(argv), argv);
 }
 
-
-MaybeHandle<Object> Debug::MakeAsyncTaskEvent(Handle<JSObject> task_event) {
+MaybeHandle<Object> Debug::MakeAsyncTaskEvent(Handle<String> type,
+                                              Handle<Object> id,
+                                              Handle<String> name) {
+  DCHECK(id->IsNumber());
   // Create the async task event object.
-  Handle<Object> argv[] = { task_event };
+  Handle<Object> argv[] = {type, id, name};
   return CallFunction("MakeAsyncTaskEvent", arraysize(argv), argv);
 }
 
@@ -1786,8 +1835,9 @@
   ProcessCompileEvent(v8::AfterCompile, script);
 }
 
-
-void Debug::OnAsyncTaskEvent(Handle<JSObject> data) {
+void Debug::OnAsyncTaskEvent(Handle<String> type, Handle<Object> id,
+                             Handle<String> name) {
+  DCHECK(id->IsNumber());
   if (in_debug_scope() || ignore_events()) return;
 
   HandleScope scope(isolate_);
@@ -1797,7 +1847,7 @@
   // Create the script collected state object.
   Handle<Object> event_data;
   // Bail out and don't call debugger if exception.
-  if (!MakeAsyncTaskEvent(data).ToHandle(&event_data)) return;
+  if (!MakeAsyncTaskEvent(type, id, name).ToHandle(&event_data)) return;
 
   // Process debug event.
   ProcessDebugEvent(v8::AsyncTaskEvent,
@@ -1843,8 +1893,8 @@
   in_debug_event_listener_ = true;
   if (event_listener_->IsForeign()) {
     // Invoke the C debug event listener.
-    v8::Debug::EventCallback callback =
-        FUNCTION_CAST<v8::Debug::EventCallback>(
+    v8::DebugInterface::EventCallback callback =
+        FUNCTION_CAST<v8::DebugInterface::EventCallback>(
             Handle<Foreign>::cast(event_listener_)->foreign_address());
     EventDetailsImpl event_details(event,
                                    Handle<JSObject>::cast(exec_state),
@@ -1852,7 +1902,7 @@
                                    event_listener_data_,
                                    client_data);
     callback(event_details);
-    DCHECK(!isolate_->has_scheduled_exception());
+    CHECK(!isolate_->has_scheduled_exception());
   } else {
     // Invoke the JavaScript debug event listener.
     DCHECK(event_listener_->IsJSFunction());
@@ -1861,8 +1911,10 @@
                               event_data,
                               event_listener_data_ };
     Handle<JSReceiver> global = isolate_->global_proxy();
-    Execution::TryCall(isolate_, Handle<JSFunction>::cast(event_listener_),
-                       global, arraysize(argv), argv);
+    MaybeHandle<Object> result =
+        Execution::Call(isolate_, Handle<JSFunction>::cast(event_listener_),
+                        global, arraysize(argv), argv);
+    CHECK(!result.is_null());  // Listeners must not throw.
   }
   in_debug_event_listener_ = previous;
 }
diff --git a/src/debug/debug.h b/src/debug/debug.h
index c4e8c17..6e49db6 100644
--- a/src/debug/debug.h
+++ b/src/debug/debug.h
@@ -11,6 +11,7 @@
 #include "src/base/atomicops.h"
 #include "src/base/hashmap.h"
 #include "src/base/platform/platform.h"
+#include "src/debug/debug-interface.h"
 #include "src/execution.h"
 #include "src/factory.h"
 #include "src/flags.h"
@@ -290,7 +291,7 @@
 
 
 // Details of the debug event delivered to the debug event listener.
-class EventDetailsImpl : public v8::Debug::EventDetails {
+class EventDetailsImpl : public v8::DebugInterface::EventDetails {
  public:
   EventDetailsImpl(DebugEvent event,
                    Handle<JSObject> exec_state,
@@ -417,7 +418,8 @@
   void OnCompileError(Handle<Script> script);
   void OnBeforeCompile(Handle<Script> script);
   void OnAfterCompile(Handle<Script> script);
-  void OnAsyncTaskEvent(Handle<JSObject> data);
+  void OnAsyncTaskEvent(Handle<String> type, Handle<Object> id,
+                        Handle<String> name);
 
   // API facing.
   void SetEventListener(Handle<Object> callback, Handle<Object> data);
@@ -459,6 +461,8 @@
   void ClearStepOut();
 
   bool PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared);
+  bool GetPossibleBreakpoints(Handle<Script> script, int start_position,
+                              int end_position, std::set<int>* positions);
 
   void RecordAsyncFunction(Handle<JSGeneratorObject> generator_object);
 
@@ -499,8 +503,11 @@
   void Iterate(ObjectVisitor* v);
 
   bool CheckExecutionState(int id) {
-    return is_active() && !debug_context().is_null() && break_id() != 0 &&
-           break_id() == id;
+    return CheckExecutionState() && break_id() == id;
+  }
+
+  bool CheckExecutionState() {
+    return is_active() && !debug_context().is_null() && break_id() != 0;
   }
 
   // Flags and states.
@@ -569,11 +576,11 @@
   }
 
   void clear_suspended_generator() {
-    thread_local_.suspended_generator_ = Smi::FromInt(0);
+    thread_local_.suspended_generator_ = Smi::kZero;
   }
 
   bool has_suspended_generator() const {
-    return thread_local_.suspended_generator_ != Smi::FromInt(0);
+    return thread_local_.suspended_generator_ != Smi::kZero;
   }
 
   void OnException(Handle<Object> exception, Handle<Object> promise);
@@ -588,8 +595,9 @@
       Handle<Object> promise);
   MUST_USE_RESULT MaybeHandle<Object> MakeCompileEvent(
       Handle<Script> script, v8::DebugEvent type);
-  MUST_USE_RESULT MaybeHandle<Object> MakeAsyncTaskEvent(
-      Handle<JSObject> task_event);
+  MUST_USE_RESULT MaybeHandle<Object> MakeAsyncTaskEvent(Handle<String> type,
+                                                         Handle<Object> id,
+                                                         Handle<String> name);
 
   // Mirror cache handling.
   void ClearMirrorCache();
diff --git a/src/debug/debug.js b/src/debug/debug.js
index b2111eb..8031763 100644
--- a/src/debug/debug.js
+++ b/src/debug/debug.js
@@ -858,16 +858,6 @@
   return debugger_flags;
 };
 
-Debug.getWasmFunctionOffsetTable = function(scriptId) {
-  var script = scriptById(scriptId);
-  return script ? %GetWasmFunctionOffsetTable(script) : UNDEFINED;
-}
-
-Debug.disassembleWasmFunction = function(scriptId) {
-  var script = scriptById(scriptId);
-  return script ? %DisassembleWasmFunction(script) : UNDEFINED;
-}
-
 Debug.MakeMirror = MakeMirror;
 
 function MakeExecutionState(break_id) {
@@ -1142,15 +1132,15 @@
 }
 
 
-function MakeAsyncTaskEvent(event_data) {
-  return new AsyncTaskEvent(event_data);
+function MakeAsyncTaskEvent(type, id, name) {
+  return new AsyncTaskEvent(type, id, name);
 }
 
 
-function AsyncTaskEvent(event_data) {
-  this.type_ = event_data.type;
-  this.name_ = event_data.name;
-  this.id_ = event_data.id;
+function AsyncTaskEvent(type, id, name) {
+  this.type_ = type;
+  this.id_ = id;
+  this.name_ = name;
 }
 
 
@@ -2196,6 +2186,7 @@
 };
 
 
+// TODO(5510): remove this.
 DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
   response.body = {
     V8Version: %GetV8Version()
diff --git a/src/debug/liveedit.cc b/src/debug/liveedit.cc
index b451842..ace8297 100644
--- a/src/debug/liveedit.cc
+++ b/src/debug/liveedit.cc
@@ -1105,15 +1105,16 @@
 void TranslateSourcePositionTable(Handle<AbstractCode> code,
                                   Handle<JSArray> position_change_array) {
   Isolate* isolate = code->GetIsolate();
-  Zone zone(isolate->allocator());
+  Zone zone(isolate->allocator(), ZONE_NAME);
   SourcePositionTableBuilder builder(&zone);
 
   Handle<ByteArray> source_position_table(code->source_position_table());
   for (SourcePositionTableIterator iterator(*source_position_table);
        !iterator.done(); iterator.Advance()) {
-    int position = iterator.source_position();
-    int new_position = TranslatePosition(position, position_change_array);
-    builder.AddPosition(iterator.code_offset(), new_position,
+    SourcePosition position = iterator.source_position();
+    position.SetScriptOffset(
+        TranslatePosition(position.ScriptOffset(), position_change_array));
+    builder.AddPosition(iterator.code_offset(), position,
                         iterator.is_statement());
   }
 
@@ -1426,7 +1427,7 @@
   for (Address a = unused_stack_top;
       a < unused_stack_bottom;
       a += kPointerSize) {
-    Memory::Object_at(a) = Smi::FromInt(0);
+    Memory::Object_at(a) = Smi::kZero;
   }
 
   return NULL;
@@ -1517,7 +1518,7 @@
                                                      TARGET& target,  // NOLINT
                                                      bool do_drop) {
   Debug* debug = isolate->debug();
-  Zone zone(isolate->allocator());
+  Zone zone(isolate->allocator(), ZONE_NAME);
   Vector<StackFrame*> frames = CreateStackMap(isolate, &zone);
 
 
@@ -1900,9 +1901,7 @@
   Scope* current_scope = scope;
   while (current_scope != NULL) {
     HandleScope handle_scope(isolate_);
-    ZoneList<Variable*>* locals = current_scope->locals();
-    for (int i = 0; i < locals->length(); i++) {
-      Variable* var = locals->at(i);
+    for (Variable* var : *current_scope->locals()) {
       if (!var->IsContextSlot()) continue;
       int context_index = var->index() - Context::MIN_CONTEXT_SLOTS;
       int location = scope_info_length + context_index * 2;
diff --git a/src/debug/mirrors.js b/src/debug/mirrors.js
index 165e172..4bc86da 100644
--- a/src/debug/mirrors.js
+++ b/src/debug/mirrors.js
@@ -257,6 +257,7 @@
                   Block:   5,
                   Script:  6,
                   Eval:    7,
+                  Module:  8,
                 };
 
 /**
@@ -1539,7 +1540,7 @@
 
 /**
  * Returns whether this property value is an exception.
- * @return {booolean} True if this property value is an exception
+ * @return {boolean} True if this property value is an exception
  */
 PropertyMirror.prototype.isException = function() {
   return this.exception_ ? true : false;
@@ -1558,7 +1559,7 @@
 
 /**
  * Returns whether this property has a getter defined through __defineGetter__.
- * @return {booolean} True if this property has a getter
+ * @return {boolean} True if this property has a getter
  */
 PropertyMirror.prototype.hasGetter = function() {
   return this.getter_ ? true : false;
@@ -1567,7 +1568,7 @@
 
 /**
  * Returns whether this property has a setter defined through __defineSetter__.
- * @return {booolean} True if this property has a setter
+ * @return {boolean} True if this property has a setter
  */
 PropertyMirror.prototype.hasSetter = function() {
   return this.setter_ ? true : false;
@@ -1878,6 +1879,15 @@
 };
 
 
+FrameMirror.prototype.script = function() {
+  if (!this.script_) {
+    this.script_ = MakeMirror(this.details_.script());
+  }
+
+  return this.script_;
+}
+
+
 FrameMirror.prototype.receiver = function() {
   return MakeMirror(this.details_.receiver());
 };
@@ -1954,12 +1964,9 @@
 
 
 FrameMirror.prototype.sourceLocation = function() {
-  var func = this.func();
-  if (func.resolved()) {
-    var script = func.script();
-    if (script) {
-      return script.locationFromPosition(this.sourcePosition(), true);
-    }
+  var script = this.script();
+  if (script) {
+    return script.locationFromPosition(this.sourcePosition(), true);
   }
 };
 
diff --git a/src/deoptimize-reason.h b/src/deoptimize-reason.h
index d28ec47..8b93839 100644
--- a/src/deoptimize-reason.h
+++ b/src/deoptimize-reason.h
@@ -10,74 +10,70 @@
 namespace v8 {
 namespace internal {
 
-#define DEOPTIMIZE_REASON_LIST(V)                                             \
-  V(AccessCheck, "Access check needed")                                       \
-  V(NoReason, "no reason")                                                    \
-  V(ConstantGlobalVariableAssignment, "Constant global variable assignment")  \
-  V(ConversionOverflow, "conversion overflow")                                \
-  V(DivisionByZero, "division by zero")                                       \
-  V(ElementsKindUnhandledInKeyedLoadGenericStub,                              \
-    "ElementsKind unhandled in KeyedLoadGenericStub")                         \
-  V(ExpectedHeapNumber, "Expected heap number")                               \
-  V(ExpectedSmi, "Expected smi")                                              \
-  V(ForcedDeoptToRuntime, "Forced deopt to runtime")                          \
-  V(Hole, "hole")                                                             \
-  V(InstanceMigrationFailed, "instance migration failed")                     \
-  V(InsufficientTypeFeedbackForCall, "Insufficient type feedback for call")   \
-  V(InsufficientTypeFeedbackForCallWithArguments,                             \
-    "Insufficient type feedback for call with arguments")                     \
-  V(FastPathFailed, "Falling off the fast path")                              \
-  V(InsufficientTypeFeedbackForCombinedTypeOfBinaryOperation,                 \
-    "Insufficient type feedback for combined type of binary operation")       \
-  V(InsufficientTypeFeedbackForGenericNamedAccess,                            \
-    "Insufficient type feedback for generic named access")                    \
-  V(InsufficientTypeFeedbackForGenericKeyedAccess,                            \
-    "Insufficient type feedback for generic keyed access")                    \
-  V(InsufficientTypeFeedbackForLHSOfBinaryOperation,                          \
-    "Insufficient type feedback for LHS of binary operation")                 \
-  V(InsufficientTypeFeedbackForRHSOfBinaryOperation,                          \
-    "Insufficient type feedback for RHS of binary operation")                 \
-  V(KeyIsNegative, "key is negative")                                         \
-  V(LostPrecision, "lost precision")                                          \
-  V(LostPrecisionOrNaN, "lost precision or NaN")                              \
-  V(MementoFound, "memento found")                                            \
-  V(MinusZero, "minus zero")                                                  \
-  V(NaN, "NaN")                                                               \
-  V(NegativeKeyEncountered, "Negative key encountered")                       \
-  V(NegativeValue, "negative value")                                          \
-  V(NoCache, "no cache")                                                      \
-  V(NonStrictElementsInKeyedLoadGenericStub,                                  \
-    "non-strict elements in KeyedLoadGenericStub")                            \
-  V(NotAHeapNumber, "not a heap number")                                      \
-  V(NotAHeapNumberUndefinedBoolean, "not a heap number/undefined/true/false") \
-  V(NotAHeapNumberUndefined, "not a heap number/undefined")                   \
-  V(NotAJavaScriptObject, "not a JavaScript object")                          \
-  V(NotASmi, "not a Smi")                                                     \
-  V(OutOfBounds, "out of bounds")                                             \
-  V(OutsideOfRange, "Outside of range")                                       \
-  V(Overflow, "overflow")                                                     \
-  V(Proxy, "proxy")                                                           \
-  V(ReceiverWasAGlobalObject, "receiver was a global object")                 \
-  V(Smi, "Smi")                                                               \
-  V(TooManyArguments, "too many arguments")                                   \
-  V(TracingElementsTransitions, "Tracing elements transitions")               \
-  V(TypeMismatchBetweenFeedbackAndConstant,                                   \
-    "Type mismatch between feedback and constant")                            \
-  V(UnexpectedCellContentsInConstantGlobalStore,                              \
-    "Unexpected cell contents in constant global store")                      \
-  V(UnexpectedCellContentsInGlobalStore,                                      \
-    "Unexpected cell contents in global store")                               \
-  V(UnexpectedObject, "unexpected object")                                    \
-  V(UnexpectedRHSOfBinaryOperation, "Unexpected RHS of binary operation")     \
-  V(UnknownMapInPolymorphicAccess, "Unknown map in polymorphic access")       \
-  V(UnknownMapInPolymorphicCall, "Unknown map in polymorphic call")           \
-  V(UnknownMapInPolymorphicElementAccess,                                     \
-    "Unknown map in polymorphic element access")                              \
-  V(UnknownMap, "Unknown map")                                                \
-  V(ValueMismatch, "value mismatch")                                          \
-  V(WrongInstanceType, "wrong instance type")                                 \
-  V(WrongMap, "wrong map")                                                    \
-  V(UndefinedOrNullInForIn, "null or undefined in for-in")                    \
+#define DEOPTIMIZE_REASON_LIST(V)                                            \
+  V(AccessCheck, "Access check needed")                                      \
+  V(NoReason, "no reason")                                                   \
+  V(ConstantGlobalVariableAssignment, "Constant global variable assignment") \
+  V(ConversionOverflow, "conversion overflow")                               \
+  V(DivisionByZero, "division by zero")                                      \
+  V(ExpectedHeapNumber, "Expected heap number")                              \
+  V(ExpectedSmi, "Expected smi")                                             \
+  V(ForcedDeoptToRuntime, "Forced deopt to runtime")                         \
+  V(Hole, "hole")                                                            \
+  V(InstanceMigrationFailed, "instance migration failed")                    \
+  V(InsufficientTypeFeedbackForCall, "Insufficient type feedback for call")  \
+  V(InsufficientTypeFeedbackForCallWithArguments,                            \
+    "Insufficient type feedback for call with arguments")                    \
+  V(FastPathFailed, "Falling off the fast path")                             \
+  V(InsufficientTypeFeedbackForCombinedTypeOfBinaryOperation,                \
+    "Insufficient type feedback for combined type of binary operation")      \
+  V(InsufficientTypeFeedbackForGenericNamedAccess,                           \
+    "Insufficient type feedback for generic named access")                   \
+  V(InsufficientTypeFeedbackForGenericKeyedAccess,                           \
+    "Insufficient type feedback for generic keyed access")                   \
+  V(InsufficientTypeFeedbackForLHSOfBinaryOperation,                         \
+    "Insufficient type feedback for LHS of binary operation")                \
+  V(InsufficientTypeFeedbackForRHSOfBinaryOperation,                         \
+    "Insufficient type feedback for RHS of binary operation")                \
+  V(KeyIsNegative, "key is negative")                                        \
+  V(LostPrecision, "lost precision")                                         \
+  V(LostPrecisionOrNaN, "lost precision or NaN")                             \
+  V(MementoFound, "memento found")                                           \
+  V(MinusZero, "minus zero")                                                 \
+  V(NaN, "NaN")                                                              \
+  V(NegativeKeyEncountered, "Negative key encountered")                      \
+  V(NegativeValue, "negative value")                                         \
+  V(NoCache, "no cache")                                                     \
+  V(NotAHeapNumber, "not a heap number")                                     \
+  V(NotAHeapNumberUndefined, "not a heap number/undefined")                  \
+  V(NotAJavaScriptObject, "not a JavaScript object")                         \
+  V(NotANumberOrOddball, "not a Number or Oddball")                          \
+  V(NotASmi, "not a Smi")                                                    \
+  V(OutOfBounds, "out of bounds")                                            \
+  V(OutsideOfRange, "Outside of range")                                      \
+  V(Overflow, "overflow")                                                    \
+  V(Proxy, "proxy")                                                          \
+  V(ReceiverWasAGlobalObject, "receiver was a global object")                \
+  V(Smi, "Smi")                                                              \
+  V(TooManyArguments, "too many arguments")                                  \
+  V(TracingElementsTransitions, "Tracing elements transitions")              \
+  V(TypeMismatchBetweenFeedbackAndConstant,                                  \
+    "Type mismatch between feedback and constant")                           \
+  V(UnexpectedCellContentsInConstantGlobalStore,                             \
+    "Unexpected cell contents in constant global store")                     \
+  V(UnexpectedCellContentsInGlobalStore,                                     \
+    "Unexpected cell contents in global store")                              \
+  V(UnexpectedObject, "unexpected object")                                   \
+  V(UnexpectedRHSOfBinaryOperation, "Unexpected RHS of binary operation")    \
+  V(UnknownMapInPolymorphicAccess, "Unknown map in polymorphic access")      \
+  V(UnknownMapInPolymorphicCall, "Unknown map in polymorphic call")          \
+  V(UnknownMapInPolymorphicElementAccess,                                    \
+    "Unknown map in polymorphic element access")                             \
+  V(UnknownMap, "Unknown map")                                               \
+  V(ValueMismatch, "value mismatch")                                         \
+  V(WrongInstanceType, "wrong instance type")                                \
+  V(WrongMap, "wrong map")                                                   \
+  V(UndefinedOrNullInForIn, "null or undefined in for-in")                   \
   V(UndefinedOrNullInToObject, "null or undefined in ToObject")
 
 enum class DeoptimizeReason : uint8_t {
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 971de9e..dddf62e 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -24,9 +24,8 @@
 
 static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
   return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
-                                  base::OS::CommitPageSize(),
-                                  EXECUTABLE,
-                                  NULL);
+                                  MemoryAllocator::GetCommitPageSize(),
+                                  EXECUTABLE, NULL);
 }
 
 
@@ -88,7 +87,7 @@
 size_t Deoptimizer::GetMaxDeoptTableSize() {
   int entries_size =
       Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
-  int commit_page_size = static_cast<int>(base::OS::CommitPageSize());
+  int commit_page_size = static_cast<int>(MemoryAllocator::GetCommitPageSize());
   int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
                     commit_page_size) + 1;
   return static_cast<size_t>(commit_page_size * page_count);
@@ -276,7 +275,7 @@
 
   // Move marked code from the optimized code list to the deoptimized
   // code list, collecting them into a ZoneList.
-  Zone zone(isolate->allocator());
+  Zone zone(isolate->allocator(), ZONE_NAME);
   ZoneList<Code*> codes(10, &zone);
 
   // Walk over all optimized code objects in this native context.
@@ -1001,7 +1000,7 @@
     }
   }
 
-  // Compute this frame's PC, state, and continuation.
+  // Compute this frame's PC and state.
   FixedArray* raw_data = non_optimized_code->deoptimization_data();
   DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
   Address start = non_optimized_code->instruction_start();
@@ -1024,7 +1023,7 @@
   // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
   // safety we use Smi(0) instead of the potential {arguments_marker} here.
   if (is_topmost) {
-    intptr_t context_value = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+    intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
     Register context_reg = JavaScriptFrame::context_register();
     output_frame->SetRegister(context_reg.code(), context_value);
   }
@@ -1243,7 +1242,7 @@
 
   // Translate the accumulator register (depending on frame position).
   if (is_topmost) {
-    // For topmost frmae, p ut the accumulator on the stack. The bailout state
+    // For topmost frame, put the accumulator on the stack. The bailout state
     // for interpreted frames is always set to {BailoutState::TOS_REGISTER} and
     // the {NotifyDeoptimized} builtin pops it off the topmost frame (possibly
     // after materialization).
@@ -1268,9 +1267,15 @@
   }
   CHECK_EQ(0u, output_offset);
 
+  // Compute this frame's PC and state. The PC will be a special builtin that
+  // continues the bytecode dispatch. Note that non-topmost and lazy-style
+  // bailout handlers also advance the bytecode offset before dispatch, hence
+  // simulating what normal handlers do upon completion of the operation.
   Builtins* builtins = isolate_->builtins();
   Code* dispatch_builtin =
-      builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
+      (!is_topmost || (bailout_type_ == LAZY)) && !goto_catch_handler
+          ? builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance)
+          : builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
   output_frame->SetPc(reinterpret_cast<intptr_t>(dispatch_builtin->entry()));
   // Restore accumulator (TOS) register.
   output_frame->SetState(
@@ -1292,7 +1297,7 @@
   // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
   // safety we use Smi(0) instead of the potential {arguments_marker} here.
   if (is_topmost) {
-    intptr_t context_value = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+    intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
     Register context_reg = JavaScriptFrame::context_register();
     output_frame->SetRegister(context_reg.code(), context_value);
   }
@@ -1602,12 +1607,6 @@
   output_frame->SetFrameSlot(output_offset, value);
   DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
 
-  // The allocation site.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<intptr_t>(isolate_->heap()->undefined_value());
-  output_frame->SetFrameSlot(output_offset, value);
-  DebugPrintOutputSlot(value, frame_index, output_offset, "allocation site\n");
-
   // Number of incoming arguments.
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
@@ -1659,7 +1658,7 @@
   // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
   // safety we use Smi(0) instead of the potential {arguments_marker} here.
   if (is_topmost) {
-    intptr_t context_value = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+    intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
     Register context_reg = JavaScriptFrame::context_register();
     output_frame->SetRegister(context_reg.code(), context_value);
   }
@@ -1843,7 +1842,7 @@
   // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
   // safety we use Smi(0) instead of the potential {arguments_marker} here.
   if (is_topmost) {
-    intptr_t context_value = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+    intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
     Register context_reg = JavaScriptFrame::context_register();
     output_frame->SetRegister(context_reg.code(), context_value);
   }
@@ -2287,7 +2286,7 @@
   }
 }
 
-void TranslationBuffer::Add(int32_t value, Zone* zone) {
+void TranslationBuffer::Add(int32_t value) {
   // This wouldn't handle kMinInt correctly if it ever encountered it.
   DCHECK(value != kMinInt);
   // Encode the sign bit in the least significant bit.
@@ -2298,7 +2297,7 @@
   // each byte to indicate whether or not more bytes follow.
   do {
     uint32_t next = bits >> 7;
-    contents_.Add(((bits << 1) & 0xFF) | (next != 0), zone);
+    contents_.push_back(((bits << 1) & 0xFF) | (next != 0));
     bits = next;
   } while (bits != 0);
 }
@@ -2322,167 +2321,166 @@
 
 
 Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) {
-  int length = contents_.length();
-  Handle<ByteArray> result = factory->NewByteArray(length, TENURED);
-  MemCopy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
+  Handle<ByteArray> result = factory->NewByteArray(CurrentIndex(), TENURED);
+  contents_.CopyTo(result->GetDataStartAddress());
   return result;
 }
 
 
 void Translation::BeginConstructStubFrame(int literal_id, unsigned height) {
-  buffer_->Add(CONSTRUCT_STUB_FRAME, zone());
-  buffer_->Add(literal_id, zone());
-  buffer_->Add(height, zone());
+  buffer_->Add(CONSTRUCT_STUB_FRAME);
+  buffer_->Add(literal_id);
+  buffer_->Add(height);
 }
 
 
 void Translation::BeginGetterStubFrame(int literal_id) {
-  buffer_->Add(GETTER_STUB_FRAME, zone());
-  buffer_->Add(literal_id, zone());
+  buffer_->Add(GETTER_STUB_FRAME);
+  buffer_->Add(literal_id);
 }
 
 
 void Translation::BeginSetterStubFrame(int literal_id) {
-  buffer_->Add(SETTER_STUB_FRAME, zone());
-  buffer_->Add(literal_id, zone());
+  buffer_->Add(SETTER_STUB_FRAME);
+  buffer_->Add(literal_id);
 }
 
 
 void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
-  buffer_->Add(ARGUMENTS_ADAPTOR_FRAME, zone());
-  buffer_->Add(literal_id, zone());
-  buffer_->Add(height, zone());
+  buffer_->Add(ARGUMENTS_ADAPTOR_FRAME);
+  buffer_->Add(literal_id);
+  buffer_->Add(height);
 }
 
 void Translation::BeginTailCallerFrame(int literal_id) {
-  buffer_->Add(TAIL_CALLER_FRAME, zone());
-  buffer_->Add(literal_id, zone());
+  buffer_->Add(TAIL_CALLER_FRAME);
+  buffer_->Add(literal_id);
 }
 
 void Translation::BeginJSFrame(BailoutId node_id,
                                int literal_id,
                                unsigned height) {
-  buffer_->Add(JS_FRAME, zone());
-  buffer_->Add(node_id.ToInt(), zone());
-  buffer_->Add(literal_id, zone());
-  buffer_->Add(height, zone());
+  buffer_->Add(JS_FRAME);
+  buffer_->Add(node_id.ToInt());
+  buffer_->Add(literal_id);
+  buffer_->Add(height);
 }
 
 
 void Translation::BeginInterpretedFrame(BailoutId bytecode_offset,
                                         int literal_id, unsigned height) {
-  buffer_->Add(INTERPRETED_FRAME, zone());
-  buffer_->Add(bytecode_offset.ToInt(), zone());
-  buffer_->Add(literal_id, zone());
-  buffer_->Add(height, zone());
+  buffer_->Add(INTERPRETED_FRAME);
+  buffer_->Add(bytecode_offset.ToInt());
+  buffer_->Add(literal_id);
+  buffer_->Add(height);
 }
 
 
 void Translation::BeginCompiledStubFrame(int height) {
-  buffer_->Add(COMPILED_STUB_FRAME, zone());
-  buffer_->Add(height, zone());
+  buffer_->Add(COMPILED_STUB_FRAME);
+  buffer_->Add(height);
 }
 
 
 void Translation::BeginArgumentsObject(int args_length) {
-  buffer_->Add(ARGUMENTS_OBJECT, zone());
-  buffer_->Add(args_length, zone());
+  buffer_->Add(ARGUMENTS_OBJECT);
+  buffer_->Add(args_length);
 }
 
 
 void Translation::BeginCapturedObject(int length) {
-  buffer_->Add(CAPTURED_OBJECT, zone());
-  buffer_->Add(length, zone());
+  buffer_->Add(CAPTURED_OBJECT);
+  buffer_->Add(length);
 }
 
 
 void Translation::DuplicateObject(int object_index) {
-  buffer_->Add(DUPLICATED_OBJECT, zone());
-  buffer_->Add(object_index, zone());
+  buffer_->Add(DUPLICATED_OBJECT);
+  buffer_->Add(object_index);
 }
 
 
 void Translation::StoreRegister(Register reg) {
-  buffer_->Add(REGISTER, zone());
-  buffer_->Add(reg.code(), zone());
+  buffer_->Add(REGISTER);
+  buffer_->Add(reg.code());
 }
 
 
 void Translation::StoreInt32Register(Register reg) {
-  buffer_->Add(INT32_REGISTER, zone());
-  buffer_->Add(reg.code(), zone());
+  buffer_->Add(INT32_REGISTER);
+  buffer_->Add(reg.code());
 }
 
 
 void Translation::StoreUint32Register(Register reg) {
-  buffer_->Add(UINT32_REGISTER, zone());
-  buffer_->Add(reg.code(), zone());
+  buffer_->Add(UINT32_REGISTER);
+  buffer_->Add(reg.code());
 }
 
 
 void Translation::StoreBoolRegister(Register reg) {
-  buffer_->Add(BOOL_REGISTER, zone());
-  buffer_->Add(reg.code(), zone());
+  buffer_->Add(BOOL_REGISTER);
+  buffer_->Add(reg.code());
 }
 
 void Translation::StoreFloatRegister(FloatRegister reg) {
-  buffer_->Add(FLOAT_REGISTER, zone());
-  buffer_->Add(reg.code(), zone());
+  buffer_->Add(FLOAT_REGISTER);
+  buffer_->Add(reg.code());
 }
 
 void Translation::StoreDoubleRegister(DoubleRegister reg) {
-  buffer_->Add(DOUBLE_REGISTER, zone());
-  buffer_->Add(reg.code(), zone());
+  buffer_->Add(DOUBLE_REGISTER);
+  buffer_->Add(reg.code());
 }
 
 
 void Translation::StoreStackSlot(int index) {
-  buffer_->Add(STACK_SLOT, zone());
-  buffer_->Add(index, zone());
+  buffer_->Add(STACK_SLOT);
+  buffer_->Add(index);
 }
 
 
 void Translation::StoreInt32StackSlot(int index) {
-  buffer_->Add(INT32_STACK_SLOT, zone());
-  buffer_->Add(index, zone());
+  buffer_->Add(INT32_STACK_SLOT);
+  buffer_->Add(index);
 }
 
 
 void Translation::StoreUint32StackSlot(int index) {
-  buffer_->Add(UINT32_STACK_SLOT, zone());
-  buffer_->Add(index, zone());
+  buffer_->Add(UINT32_STACK_SLOT);
+  buffer_->Add(index);
 }
 
 
 void Translation::StoreBoolStackSlot(int index) {
-  buffer_->Add(BOOL_STACK_SLOT, zone());
-  buffer_->Add(index, zone());
+  buffer_->Add(BOOL_STACK_SLOT);
+  buffer_->Add(index);
 }
 
 void Translation::StoreFloatStackSlot(int index) {
-  buffer_->Add(FLOAT_STACK_SLOT, zone());
-  buffer_->Add(index, zone());
+  buffer_->Add(FLOAT_STACK_SLOT);
+  buffer_->Add(index);
 }
 
 void Translation::StoreDoubleStackSlot(int index) {
-  buffer_->Add(DOUBLE_STACK_SLOT, zone());
-  buffer_->Add(index, zone());
+  buffer_->Add(DOUBLE_STACK_SLOT);
+  buffer_->Add(index);
 }
 
 
 void Translation::StoreLiteral(int literal_id) {
-  buffer_->Add(LITERAL, zone());
-  buffer_->Add(literal_id, zone());
+  buffer_->Add(LITERAL);
+  buffer_->Add(literal_id);
 }
 
 
 void Translation::StoreArgumentsObject(bool args_known,
                                        int args_index,
                                        int args_length) {
-  buffer_->Add(ARGUMENTS_OBJECT, zone());
-  buffer_->Add(args_known, zone());
-  buffer_->Add(args_index, zone());
-  buffer_->Add(args_length, zone());
+  buffer_->Add(ARGUMENTS_OBJECT);
+  buffer_->Add(args_known);
+  buffer_->Add(args_index);
+  buffer_->Add(args_length);
 }
 
 
@@ -2728,16 +2726,19 @@
   int last_deopt_id = kNoDeoptimizationId;
   int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) |
              RelocInfo::ModeMask(RelocInfo::DEOPT_ID) |
-             RelocInfo::ModeMask(RelocInfo::DEOPT_POSITION);
+             RelocInfo::ModeMask(RelocInfo::DEOPT_SCRIPT_OFFSET) |
+             RelocInfo::ModeMask(RelocInfo::DEOPT_INLINING_ID);
   for (RelocIterator it(code, mask); !it.done(); it.next()) {
     RelocInfo* info = it.rinfo();
     if (info->pc() >= pc) {
       return DeoptInfo(last_position, last_reason, last_deopt_id);
     }
-    if (info->rmode() == RelocInfo::DEOPT_POSITION) {
-      int raw_position = static_cast<int>(info->data());
-      last_position = raw_position ? SourcePosition::FromRaw(raw_position)
-                                   : SourcePosition::Unknown();
+    if (info->rmode() == RelocInfo::DEOPT_SCRIPT_OFFSET) {
+      int script_offset = static_cast<int>(info->data());
+      it.next();
+      DCHECK(it.rinfo()->rmode() == RelocInfo::DEOPT_INLINING_ID);
+      int inlining_id = static_cast<int>(it.rinfo()->data());
+      last_position = SourcePosition(script_offset, inlining_id);
     } else if (info->rmode() == RelocInfo::DEOPT_ID) {
       last_deopt_id = static_cast<int>(info->data());
     } else if (info->rmode() == RelocInfo::DEOPT_REASON) {
@@ -2765,11 +2766,8 @@
 int Deoptimizer::ComputeSourcePositionFromBytecodeArray(
     SharedFunctionInfo* shared, BailoutId node_id) {
   DCHECK(shared->HasBytecodeArray());
-  // BailoutId points to the next bytecode in the bytecode aray. Subtract
-  // 1 to get the end of current bytecode.
-  int code_offset = node_id.ToInt() - 1;
   return AbstractCode::cast(shared->bytecode_array())
-      ->SourcePosition(code_offset);
+      ->SourcePosition(node_id.ToInt());
 }
 
 // static
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 4fb7851..4d84fb7 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -9,6 +9,7 @@
 #include "src/deoptimize-reason.h"
 #include "src/macro-assembler.h"
 #include "src/source-position.h"
+#include "src/zone/zone-chunk-list.h"
 
 namespace v8 {
 namespace internal {
@@ -844,15 +845,15 @@
 
 class TranslationBuffer BASE_EMBEDDED {
  public:
-  explicit TranslationBuffer(Zone* zone) : contents_(256, zone) { }
+  explicit TranslationBuffer(Zone* zone) : contents_(zone) {}
 
-  int CurrentIndex() const { return contents_.length(); }
-  void Add(int32_t value, Zone* zone);
+  int CurrentIndex() const { return static_cast<int>(contents_.size()); }
+  void Add(int32_t value);
 
   Handle<ByteArray> CreateByteArray(Factory* factory);
 
  private:
-  ZoneList<uint8_t> contents_;
+  ZoneChunkList<uint8_t> contents_;
 };
 
 
@@ -917,9 +918,9 @@
       : buffer_(buffer),
         index_(buffer->CurrentIndex()),
         zone_(zone) {
-    buffer_->Add(BEGIN, zone);
-    buffer_->Add(frame_count, zone);
-    buffer_->Add(jsframe_count, zone);
+    buffer_->Add(BEGIN);
+    buffer_->Add(frame_count);
+    buffer_->Add(jsframe_count);
   }
 
   int index() const { return index_; }
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 1da9171..7036e1b 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -172,8 +172,11 @@
       }
 
       RelocInfo::Mode rmode = relocinfo.rmode();
-      if (rmode == RelocInfo::DEOPT_POSITION) {
-        out.AddFormatted("    ;; debug: deopt position '%d'",
+      if (rmode == RelocInfo::DEOPT_SCRIPT_OFFSET) {
+        out.AddFormatted("    ;; debug: deopt position, script offset '%d'",
+                         static_cast<int>(relocinfo.data()));
+      } else if (rmode == RelocInfo::DEOPT_INLINING_ID) {
+        out.AddFormatted("    ;; debug: deopt position, inlining id '%d'",
                          static_cast<int>(relocinfo.data()));
       } else if (rmode == RelocInfo::DEOPT_REASON) {
         DeoptimizeReason reason =
diff --git a/src/eh-frame.cc b/src/eh-frame.cc
index 5f0f1c1..ce5552f 100644
--- a/src/eh-frame.cc
+++ b/src/eh-frame.cc
@@ -252,7 +252,7 @@
   DCHECK_GE(pc_offset, last_pc_offset_);
   uint32_t delta = pc_offset - last_pc_offset_;
 
-  DCHECK_EQ(delta % EhFrameConstants::kCodeAlignmentFactor, 0);
+  DCHECK_EQ(delta % EhFrameConstants::kCodeAlignmentFactor, 0u);
   uint32_t factored_delta = delta / EhFrameConstants::kCodeAlignmentFactor;
 
   if (factored_delta <= EhFrameConstants::kLocationMask) {
diff --git a/src/eh-frame.h b/src/eh-frame.h
index 6e703d4..3da4612 100644
--- a/src/eh-frame.h
+++ b/src/eh-frame.h
@@ -5,12 +5,15 @@
 #ifndef V8_EH_FRAME_H_
 #define V8_EH_FRAME_H_
 
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
 #include "src/macro-assembler.h"
 
 namespace v8 {
 namespace internal {
 
-class EhFrameConstants final : public AllStatic {
+class V8_EXPORT_PRIVATE EhFrameConstants final
+    : public NON_EXPORTED_BASE(AllStatic) {
  public:
   enum class DwarfOpcodes : byte {
     kNop = 0x00,
@@ -61,7 +64,7 @@
   static const int kEhFrameHdrSize = 20;
 };
 
-class EhFrameWriter {
+class V8_EXPORT_PRIVATE EhFrameWriter {
  public:
   explicit EhFrameWriter(Zone* zone);
 
@@ -196,7 +199,7 @@
   DISALLOW_COPY_AND_ASSIGN(EhFrameWriter);
 };
 
-class EhFrameIterator {
+class V8_EXPORT_PRIVATE EhFrameIterator {
  public:
   EhFrameIterator(const byte* start, const byte* end)
       : start_(start), next_(start), end_(end) {
diff --git a/src/elements.cc b/src/elements.cc
index fb73d6c..ccbdb40 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -566,8 +566,10 @@
                            Handle<FixedArrayBase> backing_store, uint32_t start,
                            uint32_t end) {
     if (IsFastPackedElementsKind(kind())) return true;
+    Isolate* isolate = backing_store->GetIsolate();
     for (uint32_t i = start; i < end; i++) {
-      if (!Subclass::HasElementImpl(holder, i, backing_store, ALL_PROPERTIES)) {
+      if (!Subclass::HasElementImpl(isolate, holder, i, backing_store,
+                                    ALL_PROPERTIES)) {
         return false;
       }
     }
@@ -594,14 +596,16 @@
   bool HasElement(Handle<JSObject> holder, uint32_t index,
                   Handle<FixedArrayBase> backing_store,
                   PropertyFilter filter) final {
-    return Subclass::HasElementImpl(holder, index, backing_store, filter);
+    return Subclass::HasElementImpl(holder->GetIsolate(), holder, index,
+                                    backing_store, filter);
   }
 
-  static bool HasElementImpl(Handle<JSObject> holder, uint32_t index,
+  static bool HasElementImpl(Isolate* isolate, Handle<JSObject> holder,
+                             uint32_t index,
                              Handle<FixedArrayBase> backing_store,
                              PropertyFilter filter) {
-    return Subclass::GetEntryForIndexImpl(*holder, *backing_store, index,
-                                          filter) != kMaxUInt32;
+    return Subclass::GetEntryForIndexImpl(isolate, *holder, *backing_store,
+                                          index, filter) != kMaxUInt32;
   }
 
   bool HasAccessors(JSObject* holder) final {
@@ -772,6 +776,15 @@
     JSObject::ValidateElements(array);
   }
 
+  uint32_t NumberOfElements(JSObject* receiver) final {
+    return Subclass::NumberOfElementsImpl(receiver, receiver->elements());
+  }
+
+  static uint32_t NumberOfElementsImpl(JSObject* receiver,
+                                       FixedArrayBase* backing_store) {
+    UNREACHABLE();
+  }
+
   static uint32_t GetMaxIndex(JSObject* receiver, FixedArrayBase* elements) {
     if (receiver->IsJSArray()) {
       DCHECK(JSArray::cast(receiver)->length()->IsSmi());
@@ -973,6 +986,12 @@
                                packed_size, copy_size);
   }
 
+  void CopyElements(Handle<FixedArrayBase> source, ElementsKind source_kind,
+                    Handle<FixedArrayBase> destination, int size) {
+    Subclass::CopyElementsImpl(*source, 0, *destination, source_kind, 0,
+                               kPackedSizeNotKnown, size);
+  }
+
   Handle<SeededNumberDictionary> Normalize(Handle<JSObject> object) final {
     return Subclass::NormalizeImpl(object, handle(object->elements()));
   }
@@ -1009,7 +1028,7 @@
       if (!key->ToUint32(&index)) continue;
 
       uint32_t entry = Subclass::GetEntryForIndexImpl(
-          *object, object->elements(), index, filter);
+          isolate, *object, object->elements(), index, filter);
       if (entry == kMaxUInt32) continue;
 
       PropertyDetails details = Subclass::GetDetailsImpl(*object, entry);
@@ -1045,9 +1064,10 @@
     // Non-dictionary elements can't have all-can-read accessors.
     uint32_t length = Subclass::GetMaxIndex(*object, *backing_store);
     PropertyFilter filter = keys->filter();
-    Factory* factory = keys->isolate()->factory();
+    Isolate* isolate = keys->isolate();
+    Factory* factory = isolate->factory();
     for (uint32_t i = 0; i < length; i++) {
-      if (Subclass::HasElementImpl(object, i, backing_store, filter)) {
+      if (Subclass::HasElementImpl(isolate, object, i, backing_store, filter)) {
         keys->AddKey(factory->NewNumberFromUint(i));
       }
     }
@@ -1060,7 +1080,7 @@
       uint32_t insertion_index = 0) {
     uint32_t length = Subclass::GetMaxIndex(*object, *backing_store);
     for (uint32_t i = 0; i < length; i++) {
-      if (Subclass::HasElementImpl(object, i, backing_store, filter)) {
+      if (Subclass::HasElementImpl(isolate, object, i, backing_store, filter)) {
         if (convert == GetKeysConversion::kConvertToString) {
           Handle<String> index_string = isolate->factory()->Uint32ToString(i);
           list->set(insertion_index, *index_string);
@@ -1090,6 +1110,7 @@
     uint32_t nof_property_keys = keys->length();
     uint32_t initial_list_length =
         Subclass::GetMaxNumberOfEntries(*object, *backing_store);
+
     initial_list_length += nof_property_keys;
     if (initial_list_length > FixedArray::kMaxLength ||
         initial_list_length < nof_property_keys) {
@@ -1097,13 +1118,30 @@
           MessageTemplate::kInvalidArrayLength));
     }
 
+    // Collect the element indices into a new list.
+    MaybeHandle<FixedArray> raw_array =
+        isolate->factory()->TryNewFixedArray(initial_list_length);
+    Handle<FixedArray> combined_keys;
+
+    // If we have a holey backing store try to precisely estimate the backing
+    // store size as a last emergency measure if we cannot allocate the big
+    // array.
+    if (!raw_array.ToHandle(&combined_keys)) {
+      if (IsHoleyElementsKind(kind())) {
+        // If we overestimate the result list size we might end up in the
+        // large-object space which doesn't free memory on shrinking the list.
+        // Hence we try to estimate the final size for holey backing stores more
+        // precisely here.
+        initial_list_length =
+            Subclass::NumberOfElementsImpl(*object, *backing_store);
+        initial_list_length += nof_property_keys;
+      }
+      combined_keys = isolate->factory()->NewFixedArray(initial_list_length);
+    }
+
+    uint32_t nof_indices = 0;
     bool needs_sorting =
         IsDictionaryElementsKind(kind()) || IsSloppyArgumentsElements(kind());
-
-    // Collect the element indices into a new list.
-    uint32_t nof_indices = 0;
-    Handle<FixedArray> combined_keys =
-        isolate->factory()->NewFixedArray(initial_list_length);
     combined_keys = Subclass::DirectCollectElementIndicesImpl(
         isolate, object, backing_store,
         needs_sorting ? GetKeysConversion::kKeepNumbers : convert, filter,
@@ -1186,13 +1224,14 @@
     return entry;
   }
 
-  static uint32_t GetEntryForIndexImpl(JSObject* holder,
+  static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject* holder,
                                        FixedArrayBase* backing_store,
                                        uint32_t index, PropertyFilter filter) {
     uint32_t length = Subclass::GetMaxIndex(holder, backing_store);
     if (IsHoleyElementsKind(kind())) {
       return index < length &&
-                     !BackingStore::cast(backing_store)->is_the_hole(index)
+                     !BackingStore::cast(backing_store)
+                          ->is_the_hole(isolate, index)
                  ? index
                  : kMaxUInt32;
     } else {
@@ -1200,9 +1239,10 @@
     }
   }
 
-  uint32_t GetEntryForIndex(JSObject* holder, FixedArrayBase* backing_store,
+  uint32_t GetEntryForIndex(Isolate* isolate, JSObject* holder,
+                            FixedArrayBase* backing_store,
                             uint32_t index) final {
-    return Subclass::GetEntryForIndexImpl(holder, backing_store, index,
+    return Subclass::GetEntryForIndexImpl(isolate, holder, backing_store, index,
                                           ALL_PROPERTIES);
   }
 
@@ -1239,6 +1279,11 @@
 
   static uint32_t GetMaxNumberOfEntries(JSObject* receiver,
                                         FixedArrayBase* backing_store) {
+    return NumberOfElementsImpl(receiver, backing_store);
+  }
+
+  static uint32_t NumberOfElementsImpl(JSObject* receiver,
+                                       FixedArrayBase* backing_store) {
     SeededNumberDictionary* dict = SeededNumberDictionary::cast(backing_store);
     return dict->NumberOfElements();
   }
@@ -1387,11 +1432,12 @@
     object->set_elements(*new_dictionary);
   }
 
-  static bool HasEntryImpl(FixedArrayBase* store, uint32_t entry) {
+  static bool HasEntryImpl(Isolate* isolate, FixedArrayBase* store,
+                           uint32_t entry) {
     DisallowHeapAllocation no_gc;
     SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
     Object* index = dict->KeyAt(entry);
-    return !index->IsTheHole(dict->GetIsolate());
+    return !index->IsTheHole(isolate);
   }
 
   static uint32_t GetIndexForEntryImpl(FixedArrayBase* store, uint32_t entry) {
@@ -1402,11 +1448,12 @@
     return result;
   }
 
-  static uint32_t GetEntryForIndexImpl(JSObject* holder, FixedArrayBase* store,
-                                       uint32_t index, PropertyFilter filter) {
+  static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject* holder,
+                                       FixedArrayBase* store, uint32_t index,
+                                       PropertyFilter filter) {
     DisallowHeapAllocation no_gc;
     SeededNumberDictionary* dictionary = SeededNumberDictionary::cast(store);
-    int entry = dictionary->FindEntry(index);
+    int entry = dictionary->FindEntry(isolate, index);
     if (entry == SeededNumberDictionary::kNotFound) return kMaxUInt32;
     if (filter != ALL_PROPERTIES) {
       PropertyDetails details = dictionary->DetailsAt(entry);
@@ -1502,8 +1549,8 @@
     Isolate* isolate = accumulator->isolate();
     Handle<Object> undefined = isolate->factory()->undefined_value();
     Handle<Object> the_hole = isolate->factory()->the_hole_value();
-    SeededNumberDictionary* dictionary =
-        SeededNumberDictionary::cast(receiver->elements());
+    Handle<SeededNumberDictionary> dictionary(
+        SeededNumberDictionary::cast(receiver->elements()), isolate);
     int capacity = dictionary->Capacity();
     for (int i = 0; i < capacity; i++) {
       Object* k = dictionary->KeyAt(i);
@@ -1723,7 +1770,7 @@
     int j = 0;
     for (int i = 0; j < capacity; i++) {
       if (IsHoleyElementsKind(kind)) {
-        if (BackingStore::cast(*store)->is_the_hole(i)) continue;
+        if (BackingStore::cast(*store)->is_the_hole(isolate, i)) continue;
       }
       Handle<Object> value = Subclass::GetImpl(*store, i);
       dictionary = SeededNumberDictionary::AddNumberEntry(
@@ -1736,12 +1783,12 @@
   static void DeleteAtEnd(Handle<JSObject> obj,
                           Handle<BackingStore> backing_store, uint32_t entry) {
     uint32_t length = static_cast<uint32_t>(backing_store->length());
-    Heap* heap = obj->GetHeap();
+    Isolate* isolate = obj->GetIsolate();
     for (; entry > 0; entry--) {
-      if (!backing_store->is_the_hole(entry - 1)) break;
+      if (!backing_store->is_the_hole(isolate, entry - 1)) break;
     }
     if (entry == 0) {
-      FixedArray* empty = heap->empty_fixed_array();
+      FixedArray* empty = isolate->heap()->empty_fixed_array();
       // Dynamically ask for the elements kind here since we manually redirect
       // the operations for argument backing stores.
       if (obj->GetElementsKind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
@@ -1752,8 +1799,8 @@
       return;
     }
 
-    heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(*backing_store,
-                                                           length - entry);
+    isolate->heap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
+        *backing_store, length - entry);
   }
 
   static void DeleteCommon(Handle<JSObject> obj, uint32_t entry,
@@ -1768,6 +1815,7 @@
       return;
     }
 
+    Isolate* isolate = obj->GetIsolate();
     backing_store->set_the_hole(entry);
 
     // TODO(verwaest): Move this out of elements.cc.
@@ -1784,12 +1832,13 @@
     } else {
       length = static_cast<uint32_t>(store->length());
     }
-    if ((entry > 0 && backing_store->is_the_hole(entry - 1)) ||
-        (entry + 1 < length && backing_store->is_the_hole(entry + 1))) {
+    if ((entry > 0 && backing_store->is_the_hole(isolate, entry - 1)) ||
+        (entry + 1 < length &&
+         backing_store->is_the_hole(isolate, entry + 1))) {
       if (!obj->IsJSArray()) {
         uint32_t i;
         for (i = entry + 1; i < length; i++) {
-          if (!backing_store->is_the_hole(i)) break;
+          if (!backing_store->is_the_hole(isolate, i)) break;
         }
         if (i == length) {
           DeleteAtEnd(obj, backing_store, entry);
@@ -1798,7 +1847,7 @@
       }
       int num_used = 0;
       for (int i = 0; i < backing_store->length(); ++i) {
-        if (!backing_store->is_the_hole(i)) {
+        if (!backing_store->is_the_hole(isolate, i)) {
           ++num_used;
           // Bail out if a number dictionary wouldn't be able to save at least
           // 75% space.
@@ -1859,19 +1908,32 @@
     DeleteCommon(obj, entry, handle(obj->elements()));
   }
 
-  static bool HasEntryImpl(FixedArrayBase* backing_store, uint32_t entry) {
-    return !BackingStore::cast(backing_store)->is_the_hole(entry);
+  static bool HasEntryImpl(Isolate* isolate, FixedArrayBase* backing_store,
+                           uint32_t entry) {
+    return !BackingStore::cast(backing_store)->is_the_hole(isolate, entry);
+  }
+
+  static uint32_t NumberOfElementsImpl(JSObject* receiver,
+                                       FixedArrayBase* backing_store) {
+    uint32_t max_index = Subclass::GetMaxIndex(receiver, backing_store);
+    if (IsFastPackedElementsKind(Subclass::kind())) return max_index;
+    Isolate* isolate = receiver->GetIsolate();
+    uint32_t count = 0;
+    for (uint32_t i = 0; i < max_index; i++) {
+      if (Subclass::HasEntryImpl(isolate, backing_store, i)) count++;
+    }
+    return count;
   }
 
   static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
                                               KeyAccumulator* accumulator,
                                               AddKeyConversion convert) {
-    Handle<FixedArrayBase> elements(receiver->elements(),
-                                    accumulator->isolate());
+    Isolate* isolate = accumulator->isolate();
+    Handle<FixedArrayBase> elements(receiver->elements(), isolate);
     uint32_t length = Subclass::GetMaxNumberOfEntries(*receiver, *elements);
     for (uint32_t i = 0; i < length; i++) {
       if (IsFastPackedElementsKind(KindTraits::Kind) ||
-          HasEntryImpl(*elements, i)) {
+          HasEntryImpl(isolate, *elements, i)) {
         accumulator->AddKey(Subclass::GetImpl(*elements, i), convert);
       }
     }
@@ -1900,12 +1962,12 @@
       for (int i = 0; i < length; i++) {
         DCHECK(BackingStore::get(*backing_store, i, isolate)->IsSmi() ||
                (IsFastHoleyElementsKind(KindTraits::Kind) &&
-                backing_store->is_the_hole(i)));
+                backing_store->is_the_hole(isolate, i)));
       }
     } else if (KindTraits::Kind == FAST_ELEMENTS ||
                KindTraits::Kind == FAST_DOUBLE_ELEMENTS) {
       for (int i = 0; i < length; i++) {
-        DCHECK(!backing_store->is_the_hole(i));
+        DCHECK(!backing_store->is_the_hole(isolate, i));
       }
     } else {
       DCHECK(IsFastHoleyElementsKind(KindTraits::Kind));
@@ -1970,7 +2032,7 @@
 
     if (new_length == 0) {
       receiver->set_elements(heap->empty_fixed_array());
-      receiver->set_length(Smi::FromInt(0));
+      receiver->set_length(Smi::kZero);
       return isolate->factory()->NewJSArrayWithElements(
           backing_store, KindTraits::Kind, delete_count);
     }
@@ -2007,11 +2069,13 @@
       Isolate* isolate, Handle<JSObject> object,
       Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
       PropertyFilter filter) {
+    Handle<BackingStore> elements(BackingStore::cast(object->elements()),
+                                  isolate);
     int count = 0;
-    uint32_t length = object->elements()->length();
+    uint32_t length = elements->length();
     for (uint32_t index = 0; index < length; ++index) {
-      if (!HasEntryImpl(object->elements(), index)) continue;
-      Handle<Object> value = Subclass::GetImpl(object->elements(), index);
+      if (!HasEntryImpl(isolate, *elements, index)) continue;
+      Handle<Object> value = Subclass::GetImpl(*elements, index);
       if (get_entries) {
         value = MakeEntryPair(isolate, index, value);
       }
@@ -2351,7 +2415,6 @@
     return backing_store->get(index);
   }
 
-
   // NOTE: this method violates the handlified function signature convention:
   // raw pointer parameters in the function that allocates.
   // See ElementsAccessor::CopyElements() for details.
@@ -2650,7 +2713,8 @@
     return PropertyDetails(DONT_DELETE, DATA, 0, PropertyCellType::kNoCell);
   }
 
-  static bool HasElementImpl(Handle<JSObject> holder, uint32_t index,
+  static bool HasElementImpl(Isolate* isolate, Handle<JSObject> holder,
+                             uint32_t index,
                              Handle<FixedArrayBase> backing_store,
                              PropertyFilter filter) {
     return index < AccessorClass::GetCapacityImpl(*holder, *backing_store);
@@ -2677,7 +2741,7 @@
     return entry;
   }
 
-  static uint32_t GetEntryForIndexImpl(JSObject* holder,
+  static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject* holder,
                                        FixedArrayBase* backing_store,
                                        uint32_t index, PropertyFilter filter) {
     return index < AccessorClass::GetCapacityImpl(holder, backing_store)
@@ -2692,6 +2756,11 @@
     return backing_store->length();
   }
 
+  static uint32_t NumberOfElementsImpl(JSObject* receiver,
+                                       FixedArrayBase* backing_store) {
+    return AccessorClass::GetCapacityImpl(receiver, backing_store);
+  }
+
   static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
                                               KeyAccumulator* accumulator,
                                               AddKeyConversion convert) {
@@ -2930,19 +2999,34 @@
            ArgumentsAccessor::GetMaxNumberOfEntries(holder, arguments);
   }
 
+  static uint32_t NumberOfElementsImpl(JSObject* receiver,
+                                       FixedArrayBase* backing_store) {
+    FixedArray* parameter_map = FixedArray::cast(backing_store);
+    FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
+    uint32_t nof_elements = 0;
+    uint32_t length = parameter_map->length() - 2;
+    for (uint32_t entry = 0; entry < length; entry++) {
+      if (HasParameterMapArg(parameter_map, entry)) nof_elements++;
+    }
+    return nof_elements +
+           ArgumentsAccessor::NumberOfElementsImpl(receiver, arguments);
+  }
+
   static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
                                               KeyAccumulator* accumulator,
                                               AddKeyConversion convert) {
-    FixedArrayBase* elements = receiver->elements();
-    uint32_t length = GetCapacityImpl(*receiver, elements);
+    Isolate* isolate = accumulator->isolate();
+    Handle<FixedArrayBase> elements(receiver->elements(), isolate);
+    uint32_t length = GetCapacityImpl(*receiver, *elements);
     for (uint32_t entry = 0; entry < length; entry++) {
-      if (!HasEntryImpl(elements, entry)) continue;
-      Handle<Object> value = GetImpl(elements, entry);
+      if (!HasEntryImpl(isolate, *elements, entry)) continue;
+      Handle<Object> value = GetImpl(*elements, entry);
       accumulator->AddKey(value, convert);
     }
   }
 
-  static bool HasEntryImpl(FixedArrayBase* parameters, uint32_t entry) {
+  static bool HasEntryImpl(Isolate* isolate, FixedArrayBase* parameters,
+                           uint32_t entry) {
     FixedArray* parameter_map = FixedArray::cast(parameters);
     uint32_t length = parameter_map->length() - 2;
     if (entry < length) {
@@ -2950,7 +3034,7 @@
     }
 
     FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
-    return ArgumentsAccessor::HasEntryImpl(arguments, entry - length);
+    return ArgumentsAccessor::HasEntryImpl(isolate, arguments, entry - length);
   }
 
   static bool HasAccessorsImpl(JSObject* holder,
@@ -2970,15 +3054,15 @@
     return ArgumentsAccessor::GetIndexForEntryImpl(arguments, entry - length);
   }
 
-  static uint32_t GetEntryForIndexImpl(JSObject* holder,
+  static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject* holder,
                                        FixedArrayBase* parameters,
                                        uint32_t index, PropertyFilter filter) {
     FixedArray* parameter_map = FixedArray::cast(parameters);
     if (HasParameterMapArg(parameter_map, index)) return index;
 
     FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
-    uint32_t entry = ArgumentsAccessor::GetEntryForIndexImpl(holder, arguments,
-                                                             index, filter);
+    uint32_t entry = ArgumentsAccessor::GetEntryForIndexImpl(
+        isolate, holder, arguments, index, filter);
     if (entry == kMaxUInt32) return kMaxUInt32;
     return (parameter_map->length() - 2) + entry;
   }
@@ -3065,8 +3149,8 @@
     bool search_for_hole = value->IsUndefined(isolate);
 
     for (uint32_t k = start_from; k < length; ++k) {
-      uint32_t entry =
-          GetEntryForIndexImpl(*object, *parameter_map, k, ALL_PROPERTIES);
+      uint32_t entry = GetEntryForIndexImpl(isolate, *object, *parameter_map, k,
+                                            ALL_PROPERTIES);
       if (entry == kMaxUInt32) {
         if (search_for_hole) return Just(true);
         continue;
@@ -3105,8 +3189,8 @@
                                      isolate);
 
     for (uint32_t k = start_from; k < length; ++k) {
-      uint32_t entry =
-          GetEntryForIndexImpl(*object, *parameter_map, k, ALL_PROPERTIES);
+      uint32_t entry = GetEntryForIndexImpl(isolate, *object, *parameter_map, k,
+                                            ALL_PROPERTIES);
       if (entry == kMaxUInt32) {
         continue;
       }
@@ -3253,9 +3337,9 @@
     FixedArray* parameters = FixedArray::cast(receiver->elements());
     uint32_t insertion_index = 0;
     for (uint32_t i = start; i < end; i++) {
-      uint32_t entry =
-          GetEntryForIndexImpl(*receiver, parameters, i, ALL_PROPERTIES);
-      if (entry != kMaxUInt32 && HasEntryImpl(parameters, entry)) {
+      uint32_t entry = GetEntryForIndexImpl(isolate, *receiver, parameters, i,
+                                            ALL_PROPERTIES);
+      if (entry != kMaxUInt32 && HasEntryImpl(isolate, parameters, entry)) {
         elements->set(insertion_index, *GetImpl(parameters, entry));
       } else {
         elements->set_the_hole(insertion_index);
@@ -3378,13 +3462,13 @@
     return BackingStoreAccessor::GetDetailsImpl(holder, entry - length);
   }
 
-  static uint32_t GetEntryForIndexImpl(JSObject* holder,
+  static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject* holder,
                                        FixedArrayBase* backing_store,
                                        uint32_t index, PropertyFilter filter) {
     uint32_t length = static_cast<uint32_t>(GetString(holder)->length());
     if (index < length) return index;
     uint32_t backing_store_entry = BackingStoreAccessor::GetEntryForIndexImpl(
-        holder, backing_store, index, filter);
+        isolate, holder, backing_store, index, filter);
     if (backing_store_entry == kMaxUInt32) return kMaxUInt32;
     DCHECK(backing_store_entry < kMaxUInt32 - length);
     return backing_store_entry + length;
@@ -3491,6 +3575,13 @@
     }
   }
 
+  static uint32_t NumberOfElementsImpl(JSObject* object,
+                                       FixedArrayBase* backing_store) {
+    uint32_t length = GetString(object)->length();
+    return length +
+           BackingStoreAccessor::NumberOfElementsImpl(object, backing_store);
+  }
+
  private:
   static String* GetString(JSObject* holder) {
     DCHECK(holder->IsJSValue());
diff --git a/src/elements.h b/src/elements.h
index 76e1aa6..fc2e6a4 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -56,6 +56,7 @@
 
   virtual PropertyDetails GetDetails(JSObject* holder, uint32_t entry) = 0;
   virtual bool HasAccessors(JSObject* holder) = 0;
+  virtual uint32_t NumberOfElements(JSObject* holder) = 0;
 
   // Modifies the length data property as specified for JSArrays and resizes the
   // underlying backing store accordingly. The method honors the semantics of
@@ -170,6 +171,10 @@
                                       Handle<Object> value, uint32_t start,
                                       uint32_t length) = 0;
 
+  virtual void CopyElements(Handle<FixedArrayBase> source,
+                            ElementsKind source_kind,
+                            Handle<FixedArrayBase> destination, int size) = 0;
+
  protected:
   friend class LookupIterator;
 
@@ -181,7 +186,7 @@
   // indices are equivalent to entries. In the NumberDictionary
   // ElementsAccessor, entries are mapped to an index using the KeyAt method on
   // the NumberDictionary.
-  virtual uint32_t GetEntryForIndex(JSObject* holder,
+  virtual uint32_t GetEntryForIndex(Isolate* isolate, JSObject* holder,
                                     FixedArrayBase* backing_store,
                                     uint32_t index) = 0;
 
diff --git a/src/extensions/statistics-extension.cc b/src/extensions/statistics-extension.cc
index da53336..ad41297 100644
--- a/src/extensions/statistics-extension.cc
+++ b/src/extensions/statistics-extension.cc
@@ -35,14 +35,14 @@
   }
 }
 
-static void AddNumber(v8::Isolate* isolate,
-                      v8::Local<v8::Object> object,
-                      intptr_t value,
-                      const char* name) {
-  object->Set(isolate->GetCurrentContext(),
-              v8::String::NewFromUtf8(isolate, name, NewStringType::kNormal)
-                  .ToLocalChecked(),
-              v8::Number::New(isolate, static_cast<double>(value))).FromJust();
+static void AddNumber(v8::Isolate* isolate, v8::Local<v8::Object> object,
+                      double value, const char* name) {
+  object
+      ->Set(isolate->GetCurrentContext(),
+            v8::String::NewFromUtf8(isolate, name, NewStringType::kNormal)
+                .ToLocalChecked(),
+            v8::Number::New(isolate, value))
+      .FromJust();
 }
 
 
@@ -112,29 +112,24 @@
   }
 
   struct StatisticNumber {
-    intptr_t number;
+    size_t number;
     const char* name;
   };
 
   const StatisticNumber numbers[] = {
-      {static_cast<intptr_t>(heap->memory_allocator()->Size()),
-       "total_committed_bytes"},
+      {heap->memory_allocator()->Size(), "total_committed_bytes"},
       {heap->new_space()->Size(), "new_space_live_bytes"},
       {heap->new_space()->Available(), "new_space_available_bytes"},
-      {static_cast<intptr_t>(heap->new_space()->CommittedMemory()),
-       "new_space_commited_bytes"},
+      {heap->new_space()->CommittedMemory(), "new_space_commited_bytes"},
       {heap->old_space()->Size(), "old_space_live_bytes"},
       {heap->old_space()->Available(), "old_space_available_bytes"},
-      {static_cast<intptr_t>(heap->old_space()->CommittedMemory()),
-       "old_space_commited_bytes"},
+      {heap->old_space()->CommittedMemory(), "old_space_commited_bytes"},
       {heap->code_space()->Size(), "code_space_live_bytes"},
       {heap->code_space()->Available(), "code_space_available_bytes"},
-      {static_cast<intptr_t>(heap->code_space()->CommittedMemory()),
-       "code_space_commited_bytes"},
+      {heap->code_space()->CommittedMemory(), "code_space_commited_bytes"},
       {heap->lo_space()->Size(), "lo_space_live_bytes"},
       {heap->lo_space()->Available(), "lo_space_available_bytes"},
-      {static_cast<intptr_t>(heap->lo_space()->CommittedMemory()),
-       "lo_space_commited_bytes"},
+      {heap->lo_space()->CommittedMemory(), "lo_space_commited_bytes"},
   };
 
   for (size_t i = 0; i < arraysize(numbers); i++) {
diff --git a/src/external-reference-table.cc b/src/external-reference-table.cc
index f908be1..2e9fc46 100644
--- a/src/external-reference-table.cc
+++ b/src/external-reference-table.cc
@@ -11,6 +11,11 @@
 #include "src/deoptimizer.h"
 #include "src/ic/stub-cache.h"
 
+#if defined(DEBUG) && defined(V8_OS_LINUX) && !defined(V8_OS_ANDROID)
+#define SYMBOLIZE_FUNCTION
+#include <execinfo.h>
+#endif  // DEBUG && V8_OS_LINUX && !V8_OS_ANDROID
+
 namespace v8 {
 namespace internal {
 
@@ -31,10 +36,11 @@
 }
 
 ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
+  // nullptr is preserved through serialization/deserialization.
+  Add(nullptr, "nullptr");
   AddReferences(isolate);
   AddBuiltins(isolate);
   AddRuntimeFunctions(isolate);
-  AddStatCounters(isolate);
   AddIsolateAddresses(isolate);
   AddAccessors(isolate);
   AddStubCache(isolate);
@@ -42,6 +48,28 @@
   AddApiReferences(isolate);
 }
 
+#ifdef DEBUG
+void ExternalReferenceTable::ResetCount() {
+  for (ExternalReferenceEntry& entry : refs_) entry.count = 0;
+}
+
+void ExternalReferenceTable::PrintCount() {
+  for (int i = 0; i < refs_.length(); i++) {
+    v8::base::OS::Print("index=%5d count=%5d  %-60s\n", i, refs_[i].count,
+                        refs_[i].name);
+  }
+}
+#endif  // DEBUG
+
+// static
+const char* ExternalReferenceTable::ResolveSymbol(void* address) {
+#ifdef SYMBOLIZE_FUNCTION
+  return backtrace_symbols(&address, 1)[0];
+#else
+  return "<unresolved>";
+#endif  // SYMBOLIZE_FUNCTION
+}
+
 void ExternalReferenceTable::AddReferences(Isolate* isolate) {
   // Miscellaneous
   Add(ExternalReference::roots_array_start(isolate).address(),
@@ -56,11 +84,6 @@
       "Heap::NewSpaceAllocationTopAddress()");
   Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
       "mod_two_doubles");
-  // Keyed lookup cache.
-  Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
-      "KeyedLookupCache::keys()");
-  Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
-      "KeyedLookupCache::field_offsets()");
   Add(ExternalReference::handle_scope_next_address(isolate).address(),
       "HandleScope::next");
   Add(ExternalReference::handle_scope_limit_address(isolate).address(),
@@ -78,8 +101,6 @@
   Add(ExternalReference::isolate_address(isolate).address(), "isolate");
   Add(ExternalReference::interpreter_dispatch_table_address(isolate).address(),
       "Interpreter::dispatch_table_address");
-  Add(ExternalReference::interpreter_dispatch_counters(isolate).address(),
-      "Interpreter::interpreter_dispatch_counters");
   Add(ExternalReference::address_of_negative_infinity().address(),
       "LDoubleConstant::negative_infinity");
   Add(ExternalReference::power_double_double_function(isolate).address(),
@@ -320,32 +341,6 @@
   }
 }
 
-void ExternalReferenceTable::AddStatCounters(Isolate* isolate) {
-  // Stat counters
-  struct StatsRefTableEntry {
-    StatsCounter* (Counters::*counter)();
-    const char* name;
-  };
-
-  static const StatsRefTableEntry stats_ref_table[] = {
-#define COUNTER_ENTRY(name, caption) {&Counters::name, "Counters::" #name},
-      STATS_COUNTER_LIST_1(COUNTER_ENTRY) STATS_COUNTER_LIST_2(COUNTER_ENTRY)
-#undef COUNTER_ENTRY
-  };
-
-  Counters* counters = isolate->counters();
-  for (unsigned i = 0; i < arraysize(stats_ref_table); ++i) {
-    // To make sure the indices are not dependent on whether counters are
-    // enabled, use a dummy address as filler.
-    Address address = NotAvailable();
-    StatsCounter* counter = (counters->*(stats_ref_table[i].counter))();
-    if (counter->Enabled()) {
-      address = reinterpret_cast<Address>(counter->GetInternalPointer());
-    }
-    Add(address, stats_ref_table[i].name);
-  }
-}
-
 void ExternalReferenceTable::AddIsolateAddresses(Isolate* isolate) {
   // Top addresses
   static const char* address_names[] = {
@@ -368,22 +363,24 @@
   };
 
   static const AccessorRefTable getters[] = {
-#define ACCESSOR_INFO_DECLARATION(name) \
-  {FUNCTION_ADDR(&Accessors::name##Getter), "Accessors::" #name "Getter"},
+#define ACCESSOR_INFO_DECLARATION(name)     \
+  { FUNCTION_ADDR(&Accessors::name##Getter), \
+    "Redirect to Accessors::" #name "Getter"},
       ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
 #undef ACCESSOR_INFO_DECLARATION
   };
   static const AccessorRefTable setters[] = {
 #define ACCESSOR_SETTER_DECLARATION(name) \
-  {FUNCTION_ADDR(&Accessors::name), "Accessors::" #name},
+  { FUNCTION_ADDR(&Accessors::name), "Accessors::" #name},
       ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
 #undef ACCESSOR_INFO_DECLARATION
   };
 
   for (unsigned i = 0; i < arraysize(getters); ++i) {
-    Add(getters[i].address, getters[i].name);
+    const char* name = getters[i].name + 12;  // Skip "Redirect to " prefix.
+    Add(getters[i].address, name);
     Add(AccessorInfo::redirect(isolate, getters[i].address, ACCESSOR_GETTER),
-        "");
+        getters[i].name);
   }
 
   for (unsigned i = 0; i < arraysize(setters); ++i) {
@@ -444,7 +441,8 @@
   intptr_t* api_external_references = isolate->api_external_references();
   if (api_external_references != nullptr) {
     while (*api_external_references != 0) {
-      Add(reinterpret_cast<Address>(*api_external_references), "<embedder>");
+      Address address = reinterpret_cast<Address>(*api_external_references);
+      Add(address, ResolveSymbol(address));
       api_external_references++;
     }
   }
diff --git a/src/external-reference-table.h b/src/external-reference-table.h
index dc30dab..e1b97f9 100644
--- a/src/external-reference-table.h
+++ b/src/external-reference-table.h
@@ -19,11 +19,18 @@
  public:
   static ExternalReferenceTable* instance(Isolate* isolate);
 
-  int size() const { return refs_.length(); }
-  Address address(int i) { return refs_[i].address; }
-  const char* name(int i) { return refs_[i].name; }
+  uint32_t size() const { return static_cast<uint32_t>(refs_.length()); }
+  Address address(uint32_t i) { return refs_[i].address; }
+  const char* name(uint32_t i) { return refs_[i].name; }
 
-  inline static Address NotAvailable() { return NULL; }
+#ifdef DEBUG
+  void increment_count(uint32_t i) { refs_[i].count++; }
+  int count(uint32_t i) { return refs_[i].count; }
+  void ResetCount();
+  void PrintCount();
+#endif  // DEBUG
+
+  static const char* ResolveSymbol(void* address);
 
   static const int kDeoptTableSerializeEntryCount = 64;
 
@@ -31,19 +38,25 @@
   struct ExternalReferenceEntry {
     Address address;
     const char* name;
+#ifdef DEBUG
+    int count;
+#endif  // DEBUG
   };
 
   explicit ExternalReferenceTable(Isolate* isolate);
 
   void Add(Address address, const char* name) {
+#ifdef DEBUG
+    ExternalReferenceEntry entry = {address, name, 0};
+#else
     ExternalReferenceEntry entry = {address, name};
+#endif  // DEBUG
     refs_.Add(entry);
   }
 
   void AddReferences(Isolate* isolate);
   void AddBuiltins(Isolate* isolate);
   void AddRuntimeFunctions(Isolate* isolate);
-  void AddStatCounters(Isolate* isolate);
   void AddIsolateAddresses(Isolate* isolate);
   void AddAccessors(Isolate* isolate);
   void AddStubCache(Isolate* isolate);
diff --git a/src/factory.cc b/src/factory.cc
index 163e864..3e812d5 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -97,11 +97,20 @@
       Handle<PrototypeInfo>::cast(NewStruct(PROTOTYPE_INFO_TYPE));
   result->set_prototype_users(WeakFixedArray::Empty());
   result->set_registry_slot(PrototypeInfo::UNREGISTERED);
-  result->set_validity_cell(Smi::FromInt(0));
+  result->set_validity_cell(Smi::kZero);
   result->set_bit_field(0);
   return result;
 }
 
+Handle<Tuple3> Factory::NewTuple3(Handle<Object> value1, Handle<Object> value2,
+                                  Handle<Object> value3) {
+  Handle<Tuple3> result = Handle<Tuple3>::cast(NewStruct(TUPLE3_TYPE));
+  result->set_value1(*value1);
+  result->set_value2(*value2);
+  result->set_value3(*value3);
+  return result;
+}
+
 Handle<ContextExtension> Factory::NewContextExtension(
     Handle<ScopeInfo> scope_info, Handle<Object> extension) {
   Handle<ContextExtension> result =
@@ -128,6 +137,15 @@
       FixedArray);
 }
 
+MaybeHandle<FixedArray> Factory::TryNewFixedArray(int size,
+                                                  PretenureFlag pretenure) {
+  DCHECK(0 <= size);
+  AllocationResult allocation =
+      isolate()->heap()->AllocateFixedArray(size, pretenure);
+  Object* array = NULL;
+  if (!allocation.To(&array)) return MaybeHandle<FixedArray>();
+  return Handle<FixedArray>(FixedArray::cast(array), isolate());
+}
 
 Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size,
                                                    PretenureFlag pretenure) {
@@ -179,7 +197,7 @@
   DCHECK_LE(0, number_of_frames);
   Handle<FixedArray> result =
       NewFixedArrayWithHoles(FrameArray::LengthFor(number_of_frames));
-  result->set(FrameArray::kFrameCountIndex, Smi::FromInt(0));
+  result->set(FrameArray::kFrameCountIndex, Smi::kZero);
   return Handle<FrameArray>::cast(result);
 }
 
@@ -297,6 +315,44 @@
   return result;
 }
 
+MaybeHandle<String> Factory::NewStringFromUtf8SubString(
+    Handle<SeqOneByteString> str, int begin, int length,
+    PretenureFlag pretenure) {
+  // Check for ASCII first since this is the common case.
+  const char* start = reinterpret_cast<const char*>(str->GetChars() + begin);
+  int non_ascii_start = String::NonAsciiStart(start, length);
+  if (non_ascii_start >= length) {
+    // If the string is ASCII, we can just make a substring.
+    // TODO(v8): the pretenure flag is ignored in this case.
+    return NewSubString(str, begin, begin + length);
+  }
+
+  // Non-ASCII and we need to decode.
+  Access<UnicodeCache::Utf8Decoder> decoder(
+      isolate()->unicode_cache()->utf8_decoder());
+  decoder->Reset(start + non_ascii_start, length - non_ascii_start);
+  int utf16_length = static_cast<int>(decoder->Utf16Length());
+  DCHECK(utf16_length > 0);
+  // Allocate string.
+  Handle<SeqTwoByteString> result;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate(), result,
+      NewRawTwoByteString(non_ascii_start + utf16_length, pretenure), String);
+
+  // Reset the decoder, because the original {str} may have moved.
+  const char* ascii_data =
+      reinterpret_cast<const char*>(str->GetChars() + begin);
+  decoder->Reset(ascii_data + non_ascii_start, length - non_ascii_start);
+  // Copy ASCII portion.
+  uint16_t* data = result->GetChars();
+  for (int i = 0; i < non_ascii_start; i++) {
+    *data++ = *ascii_data++;
+  }
+  // Now write the remainder.
+  decoder->WriteUtf16(data, utf16_length);
+  return result;
+}
+
 MaybeHandle<String> Factory::NewStringFromTwoByte(const uc16* string,
                                                   int length,
                                                   PretenureFlag pretenure) {
@@ -779,7 +835,8 @@
   array->set_map_no_write_barrier(*native_context_map());
   Handle<Context> context = Handle<Context>::cast(array);
   context->set_native_context(*context);
-  context->set_errors_thrown(Smi::FromInt(0));
+  context->set_errors_thrown(Smi::kZero);
+  context->set_math_random_index(Smi::kZero);
   Handle<WeakCell> weak_cell = NewWeakCell(context);
   context->set_self_weak_cell(*weak_cell);
   DCHECK(context->IsNativeContext());
@@ -914,6 +971,14 @@
   return context;
 }
 
+Handle<Context> Factory::NewPromiseResolvingFunctionContext(int length) {
+  DCHECK_GE(length, Context::MIN_CONTEXT_SLOTS);
+  Handle<FixedArray> array = NewFixedArray(length);
+  array->set_map_no_write_barrier(*function_context_map());
+  Handle<Context> context = Handle<Context>::cast(array);
+  context->set_extension(*the_hole_value());
+  return context;
+}
 
 Handle<Struct> Factory::NewStruct(InstanceType type) {
   CALL_HEAP_FUNCTION(
@@ -922,18 +987,36 @@
       Struct);
 }
 
-Handle<PromiseContainer> Factory::NewPromiseContainer(
+Handle<PromiseResolveThenableJobInfo> Factory::NewPromiseResolveThenableJobInfo(
     Handle<JSReceiver> thenable, Handle<JSReceiver> then,
     Handle<JSFunction> resolve, Handle<JSFunction> reject,
-    Handle<Object> before_debug_event, Handle<Object> after_debug_event) {
-  Handle<PromiseContainer> result =
-      Handle<PromiseContainer>::cast(NewStruct(PROMISE_CONTAINER_TYPE));
+    Handle<Object> debug_id, Handle<Object> debug_name,
+    Handle<Context> context) {
+  Handle<PromiseResolveThenableJobInfo> result =
+      Handle<PromiseResolveThenableJobInfo>::cast(
+          NewStruct(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE));
   result->set_thenable(*thenable);
   result->set_then(*then);
   result->set_resolve(*resolve);
   result->set_reject(*reject);
-  result->set_before_debug_event(*before_debug_event);
-  result->set_after_debug_event(*after_debug_event);
+  result->set_debug_id(*debug_id);
+  result->set_debug_name(*debug_name);
+  result->set_context(*context);
+  return result;
+}
+
+Handle<PromiseReactionJobInfo> Factory::NewPromiseReactionJobInfo(
+    Handle<Object> value, Handle<Object> tasks, Handle<Object> deferred,
+    Handle<Object> debug_id, Handle<Object> debug_name,
+    Handle<Context> context) {
+  Handle<PromiseReactionJobInfo> result = Handle<PromiseReactionJobInfo>::cast(
+      NewStruct(PROMISE_REACTION_JOB_INFO_TYPE));
+  result->set_value(*value);
+  result->set_tasks(*tasks);
+  result->set_deferred(*deferred);
+  result->set_debug_id(*debug_id);
+  result->set_debug_name(*debug_name);
+  result->set_context(*context);
   return result;
 }
 
@@ -970,7 +1053,7 @@
   script->set_line_ends(heap->undefined_value());
   script->set_eval_from_shared(heap->undefined_value());
   script->set_eval_from_position(0);
-  script->set_shared_function_infos(Smi::FromInt(0));
+  script->set_shared_function_infos(Smi::kZero);
   script->set_flags(0);
 
   heap->set_script_list(*WeakFixedArray::Add(script_list(), script));
@@ -1272,6 +1355,8 @@
 DEFINE_ERROR(ReferenceError, reference_error)
 DEFINE_ERROR(SyntaxError, syntax_error)
 DEFINE_ERROR(TypeError, type_error)
+DEFINE_ERROR(WasmCompileError, wasm_compile_error)
+DEFINE_ERROR(WasmRuntimeError, wasm_runtime_error)
 #undef DEFINE_ERROR
 
 Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
@@ -1446,12 +1531,6 @@
   return scope_info;
 }
 
-Handle<ModuleInfoEntry> Factory::NewModuleInfoEntry() {
-  Handle<FixedArray> array = NewFixedArray(ModuleInfoEntry::kLength, TENURED);
-  array->set_map_no_write_barrier(*module_info_entry_map());
-  return Handle<ModuleInfoEntry>::cast(array);
-}
-
 Handle<ModuleInfo> Factory::NewModuleInfo() {
   Handle<FixedArray> array = NewFixedArray(ModuleInfo::kLength, TENURED);
   array->set_map_no_write_barrier(*module_info_map());
@@ -1504,7 +1583,7 @@
   // The code object has not been fully initialized yet.  We rely on the
   // fact that no allocation will happen from this point on.
   DisallowHeapAllocation no_gc;
-  code->set_gc_metadata(Smi::FromInt(0));
+  code->set_gc_metadata(Smi::kZero);
   code->set_ic_age(isolate()->heap()->global_ic_age());
   code->set_instruction_size(desc.instr_size);
   code->set_relocation_info(*reloc_info);
@@ -1514,7 +1593,7 @@
   code->set_raw_kind_specific_flags2(0);
   code->set_is_crankshafted(crankshafted);
   code->set_deoptimization_data(*empty_fixed_array(), SKIP_WRITE_BARRIER);
-  code->set_raw_type_feedback_info(Smi::FromInt(0));
+  code->set_raw_type_feedback_info(Smi::kZero);
   code->set_next_code_link(*undefined_value(), SKIP_WRITE_BARRIER);
   code->set_handler_table(*empty_fixed_array(), SKIP_WRITE_BARRIER);
   code->set_source_position_table(*empty_byte_array(), SKIP_WRITE_BARRIER);
@@ -1572,16 +1651,6 @@
 }
 
 
-Handle<JSObject> Factory::NewJSObjectWithMemento(
-    Handle<JSFunction> constructor,
-    Handle<AllocationSite> site) {
-  JSFunction::EnsureHasInitialMap(constructor);
-  CALL_HEAP_FUNCTION(
-      isolate(),
-      isolate()->heap()->AllocateJSObject(*constructor, NOT_TENURED, *site),
-      JSObject);
-}
-
 Handle<JSObject> Factory::NewJSObjectWithNullProto() {
   Handle<JSObject> result = NewJSObject(isolate()->object_function());
   Handle<Map> new_map =
@@ -1706,7 +1775,7 @@
   DCHECK(capacity >= length);
 
   if (capacity == 0) {
-    array->set_length(Smi::FromInt(0));
+    array->set_length(Smi::kZero);
     array->set_elements(*empty_fixed_array());
     return;
   }
@@ -1735,6 +1804,10 @@
   array->set_length(Smi::FromInt(length));
 }
 
+Handle<JSModuleNamespace> Factory::NewJSModuleNamespace() {
+  Handle<Map> map = isolate()->js_module_namespace_map();
+  return Handle<JSModuleNamespace>::cast(NewJSObjectFromMap(map));
+}
 
 Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
     Handle<JSFunction> function) {
@@ -1752,23 +1825,26 @@
   Handle<ModuleInfo> module_info(code->scope_info()->ModuleDescriptorInfo(),
                                  isolate());
   Handle<ObjectHashTable> exports =
-      ObjectHashTable::New(isolate(), module_info->regular_exports()->length());
+      ObjectHashTable::New(isolate(), module_info->RegularExportCount());
+  Handle<FixedArray> regular_exports =
+      NewFixedArray(module_info->RegularExportCount());
+  Handle<FixedArray> regular_imports =
+      NewFixedArray(module_info->regular_imports()->length());
   int requested_modules_length = module_info->module_requests()->length();
   Handle<FixedArray> requested_modules =
       requested_modules_length > 0 ? NewFixedArray(requested_modules_length)
                                    : empty_fixed_array();
 
-  // To make it easy to hash Modules, we set a new symbol as the name of
-  // SharedFunctionInfo representing this Module.
-  Handle<Symbol> name_symbol = NewSymbol();
-  code->set_name(*name_symbol);
-
   Handle<Module> module = Handle<Module>::cast(NewStruct(MODULE_TYPE));
   module->set_code(*code);
   module->set_exports(*exports);
+  module->set_regular_exports(*regular_exports);
+  module->set_regular_imports(*regular_imports);
+  module->set_hash(isolate()->GenerateIdentityHash(Smi::kMaxValue));
+  module->set_module_namespace(isolate()->heap()->undefined_value());
   module->set_requested_modules(*requested_modules);
-  module->set_flags(0);
-  module->set_embedder_data(isolate()->heap()->undefined_value());
+  DCHECK(!module->instantiated());
+  DCHECK(!module->evaluated());
   return module;
 }
 
@@ -1934,6 +2010,12 @@
   DCHECK(byte_offset + byte_length <=
          static_cast<size_t>(buffer->byte_length()->Number()));
 
+  DCHECK_EQ(obj->GetInternalFieldCount(),
+            v8::ArrayBufferView::kInternalFieldCount);
+  for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
+    obj->SetInternalField(i, Smi::kZero);
+  }
+
   obj->set_buffer(*buffer);
 
   i::Handle<i::Object> byte_offset_object =
@@ -2003,6 +2085,11 @@
                                               size_t number_of_elements,
                                               PretenureFlag pretenure) {
   Handle<JSTypedArray> obj = NewJSTypedArray(elements_kind, pretenure);
+  DCHECK_EQ(obj->GetInternalFieldCount(),
+            v8::ArrayBufferView::kInternalFieldCount);
+  for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
+    obj->SetInternalField(i, Smi::kZero);
+  }
 
   size_t element_size = GetFixedTypedArraysElementSize(elements_kind);
   ExternalArrayType array_type = GetArrayTypeFromElementsKind(elements_kind);
@@ -2012,7 +2099,7 @@
   CHECK(number_of_elements <= static_cast<size_t>(Smi::kMaxValue));
   size_t byte_length = number_of_elements * element_size;
 
-  obj->set_byte_offset(Smi::FromInt(0));
+  obj->set_byte_offset(Smi::kZero);
   i::Handle<i::Object> byte_length_object =
       NewNumberFromSize(byte_length, pretenure);
   obj->set_byte_length(*byte_length_object);
@@ -2111,11 +2198,10 @@
   return result;
 }
 
-
-Handle<JSGlobalProxy> Factory::NewUninitializedJSGlobalProxy() {
+Handle<JSGlobalProxy> Factory::NewUninitializedJSGlobalProxy(int size) {
   // Create an empty shell of a JSGlobalProxy that needs to be reinitialized
   // via ReinitializeJSGlobalProxy later.
-  Handle<Map> map = NewMap(JS_GLOBAL_PROXY_TYPE, JSGlobalProxy::kSize);
+  Handle<Map> map = NewMap(JS_GLOBAL_PROXY_TYPE, size);
   // Maintain invariant expected from any JSGlobalProxy.
   map->set_is_access_check_needed(true);
   CALL_HEAP_FUNCTION(
@@ -2133,12 +2219,11 @@
   // The proxy's hash should be retained across reinitialization.
   Handle<Object> hash(object->hash(), isolate());
 
-  JSObject::InvalidatePrototypeChains(*old_map);
   if (old_map->is_prototype_map()) {
     map = Map::Copy(map, "CopyAsPrototypeForJSGlobalProxy");
     map->set_is_prototype_map(true);
   }
-  JSObject::UpdatePrototypeUserRegistration(old_map, map, isolate());
+  JSObject::NotifyMapChange(old_map, map, isolate());
 
   // Check that the already allocated object has the same size and type as
   // objects allocated using the constructor.
@@ -2215,7 +2300,7 @@
     code = isolate()->builtins()->Illegal();
   }
   share->set_code(*code);
-  share->set_optimized_code_map(*cleared_optimized_code_map());
+  share->set_optimized_code_map(*empty_fixed_array());
   share->set_scope_info(ScopeInfo::Empty(isolate()));
   share->set_outer_scope_info(*the_hole_value());
   Handle<Code> construct_stub =
@@ -2459,13 +2544,30 @@
   store->set(JSRegExp::kIrregexpUC16CodeIndex, uninitialized);
   store->set(JSRegExp::kIrregexpLatin1CodeSavedIndex, uninitialized);
   store->set(JSRegExp::kIrregexpUC16CodeSavedIndex, uninitialized);
-  store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(0));
+  store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::kZero);
   store->set(JSRegExp::kIrregexpCaptureCountIndex,
              Smi::FromInt(capture_count));
   store->set(JSRegExp::kIrregexpCaptureNameMapIndex, uninitialized);
   regexp->set_data(*store);
 }
 
+Handle<RegExpMatchInfo> Factory::NewRegExpMatchInfo() {
+  // Initially, the last match info consists of all fixed fields plus space for
+  // the match itself (i.e., 2 capture indices).
+  static const int kInitialSize = RegExpMatchInfo::kFirstCaptureIndex +
+                                  RegExpMatchInfo::kInitialCaptureIndices;
+
+  Handle<FixedArray> elems = NewFixedArray(kInitialSize);
+  Handle<RegExpMatchInfo> result = Handle<RegExpMatchInfo>::cast(elems);
+
+  result->SetNumberOfCaptureRegisters(RegExpMatchInfo::kInitialCaptureIndices);
+  result->SetLastSubject(*empty_string());
+  result->SetLastInput(*undefined_value());
+  result->SetCapture(0, 0);
+  result->SetCapture(1, 0);
+
+  return result;
+}
 
 Handle<Object> Factory::GlobalConstantFor(Handle<Name> name) {
   if (Name::Equals(name, undefined_string())) return undefined_value();
@@ -2608,5 +2710,26 @@
   }
 }
 
+Handle<JSFixedArrayIterator> Factory::NewJSFixedArrayIterator(
+    Handle<FixedArray> array) {
+  // Create the "next" function (must be unique per iterator object).
+  Handle<Code> code(
+      isolate()->builtins()->builtin(Builtins::kFixedArrayIteratorNext));
+  // TODO(neis): Don't create a new SharedFunctionInfo each time.
+  Handle<JSFunction> next = isolate()->factory()->NewFunctionWithoutPrototype(
+      isolate()->factory()->next_string(), code, false);
+  next->shared()->set_native(true);
+
+  // Create the iterator.
+  Handle<Map> map(isolate()->native_context()->fixed_array_iterator_map());
+  Handle<JSFixedArrayIterator> iterator =
+      Handle<JSFixedArrayIterator>::cast(NewJSObjectFromMap(map));
+  iterator->set_initial_next(*next);
+  iterator->set_array(*array);
+  iterator->set_index(0);
+  iterator->InObjectPropertyAtPut(JSFixedArrayIterator::kNextIndex, *next);
+  return iterator;
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/factory.h b/src/factory.h
index 82c2317..d059b10 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -5,6 +5,7 @@
 #ifndef V8_FACTORY_H_
 #define V8_FACTORY_H_
 
+#include "src/globals.h"
 #include "src/isolate.h"
 #include "src/messages.h"
 #include "src/type-feedback-vector.h"
@@ -21,14 +22,22 @@
 };
 
 // Interface for handle based allocation.
-class Factory final {
+class V8_EXPORT_PRIVATE Factory final {
  public:
   Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
                              Handle<Object> to_number, const char* type_of,
                              byte kind);
 
   // Allocates a fixed array initialized with undefined values.
-  V8_EXPORT_PRIVATE Handle<FixedArray> NewFixedArray(
+  Handle<FixedArray> NewFixedArray(int size,
+                                   PretenureFlag pretenure = NOT_TENURED);
+  // Tries allocating a fixed array initialized with undefined values.
+  // In case of an allocation failure (OOM) an empty handle is returned.
+  // The caller has to manually signal an
+  // v8::internal::Heap::FatalProcessOutOfMemory typically by calling
+  // NewFixedArray as a fallback.
+  MUST_USE_RESULT
+  MaybeHandle<FixedArray> TryNewFixedArray(
       int size, PretenureFlag pretenure = NOT_TENURED);
 
   // Allocate a new fixed array with non-existing entries (the hole).
@@ -60,15 +69,26 @@
   // Create a new boxed value.
   Handle<Box> NewBox(Handle<Object> value);
 
-  // Create a new PromiseContainer struct.
-  Handle<PromiseContainer> NewPromiseContainer(
+  // Create a new PromiseReactionJobInfo struct.
+  Handle<PromiseReactionJobInfo> NewPromiseReactionJobInfo(
+      Handle<Object> value, Handle<Object> tasks, Handle<Object> deferred,
+      Handle<Object> debug_id, Handle<Object> debug_name,
+      Handle<Context> context);
+
+  // Create a new PromiseResolveThenableJobInfo struct.
+  Handle<PromiseResolveThenableJobInfo> NewPromiseResolveThenableJobInfo(
       Handle<JSReceiver> thenable, Handle<JSReceiver> then,
       Handle<JSFunction> resolve, Handle<JSFunction> reject,
-      Handle<Object> before_debug_event, Handle<Object> after_debug_event);
+      Handle<Object> debug_id, Handle<Object> debug_name,
+      Handle<Context> context);
 
   // Create a new PrototypeInfo struct.
   Handle<PrototypeInfo> NewPrototypeInfo();
 
+  // Create a new Tuple3 struct.
+  Handle<Tuple3> NewTuple3(Handle<Object> value1, Handle<Object> value2,
+                           Handle<Object> value3);
+
   // Create a new ContextExtension struct.
   Handle<ContextExtension> NewContextExtension(Handle<ScopeInfo> scope_info,
                                                Handle<Object> extension);
@@ -81,8 +101,7 @@
 
   // Finds the internalized copy for string in the string table.
   // If not found, a new string is added to the table and returned.
-  V8_EXPORT_PRIVATE Handle<String> InternalizeUtf8String(
-      Vector<const char> str);
+  Handle<String> InternalizeUtf8String(Vector<const char> str);
   Handle<String> InternalizeUtf8String(const char* str) {
     return InternalizeUtf8String(CStrVector(str));
   }
@@ -127,7 +146,7 @@
   //     will be converted to Latin1, otherwise it will be left as two-byte.
   //
   // One-byte strings are pretenured when used as keys in the SourceCodeCache.
-  V8_EXPORT_PRIVATE MUST_USE_RESULT MaybeHandle<String> NewStringFromOneByte(
+  MUST_USE_RESULT MaybeHandle<String> NewStringFromOneByte(
       Vector<const uint8_t> str, PretenureFlag pretenure = NOT_TENURED);
 
   template <size_t N>
@@ -170,10 +189,14 @@
 
   // UTF8 strings are pretenured when used for regexp literal patterns and
   // flags in the parser.
-  MUST_USE_RESULT V8_EXPORT_PRIVATE MaybeHandle<String> NewStringFromUtf8(
+  MUST_USE_RESULT MaybeHandle<String> NewStringFromUtf8(
       Vector<const char> str, PretenureFlag pretenure = NOT_TENURED);
 
-  V8_EXPORT_PRIVATE MUST_USE_RESULT MaybeHandle<String> NewStringFromTwoByte(
+  MUST_USE_RESULT MaybeHandle<String> NewStringFromUtf8SubString(
+      Handle<SeqOneByteString> str, int begin, int end,
+      PretenureFlag pretenure = NOT_TENURED);
+
+  MUST_USE_RESULT MaybeHandle<String> NewStringFromTwoByte(
       Vector<const uc16> str, PretenureFlag pretenure = NOT_TENURED);
 
   MUST_USE_RESULT MaybeHandle<String> NewStringFromTwoByte(
@@ -296,6 +319,8 @@
   Handle<Context> NewBlockContext(Handle<JSFunction> function,
                                   Handle<Context> previous,
                                   Handle<ScopeInfo> scope_info);
+  // Create a promise context.
+  Handle<Context> NewPromiseResolvingFunctionContext(int length);
 
   // Allocate a new struct.  The struct is pretenured (allocated directly in
   // the old generation).
@@ -306,7 +331,7 @@
 
   Handle<AccessorInfo> NewAccessorInfo();
 
-  V8_EXPORT_PRIVATE Handle<Script> NewScript(Handle<String> source);
+  Handle<Script> NewScript(Handle<String> source);
 
   // Foreign objects are pretenured when allocated by the bootstrapper.
   Handle<Foreign> NewForeign(Address addr,
@@ -416,12 +441,6 @@
   SIMD128_TYPES(SIMD128_NEW_DECL)
 #undef SIMD128_NEW_DECL
 
-  // These objects are used by the api to create env-independent data
-  // structures in the heap.
-  inline Handle<JSObject> NewNeanderObject() {
-    return NewJSObjectFromMap(neander_map());
-  }
-
   Handle<JSWeakMap> NewJSWeakMap();
 
   Handle<JSObject> NewArgumentsObject(Handle<JSFunction> callee, int length);
@@ -430,9 +449,6 @@
   // runtime.
   Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
                                PretenureFlag pretenure = NOT_TENURED);
-  // JSObject that should have a memento pointing to the allocation site.
-  Handle<JSObject> NewJSObjectWithMemento(Handle<JSFunction> constructor,
-                                          Handle<AllocationSite> site);
   // JSObject without a prototype.
   Handle<JSObject> NewJSObjectWithNullProto();
 
@@ -450,7 +466,7 @@
 
   // Create a JSArray with a specified length and elements initialized
   // according to the specified mode.
-  V8_EXPORT_PRIVATE Handle<JSArray> NewJSArray(
+  Handle<JSArray> NewJSArray(
       ElementsKind elements_kind, int length, int capacity,
       ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
       PretenureFlag pretenure = NOT_TENURED);
@@ -466,11 +482,11 @@
   }
 
   // Create a JSArray with the given elements.
-  V8_EXPORT_PRIVATE Handle<JSArray> NewJSArrayWithElements(
-      Handle<FixedArrayBase> elements, ElementsKind elements_kind, int length,
-      PretenureFlag pretenure = NOT_TENURED);
+  Handle<JSArray> NewJSArrayWithElements(Handle<FixedArrayBase> elements,
+                                         ElementsKind elements_kind, int length,
+                                         PretenureFlag pretenure = NOT_TENURED);
 
-  V8_EXPORT_PRIVATE Handle<JSArray> NewJSArrayWithElements(
+  Handle<JSArray> NewJSArrayWithElements(
       Handle<FixedArrayBase> elements,
       ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
       PretenureFlag pretenure = NOT_TENURED) {
@@ -486,6 +502,8 @@
 
   Handle<JSGeneratorObject> NewJSGeneratorObject(Handle<JSFunction> function);
 
+  Handle<JSModuleNamespace> NewJSModuleNamespace();
+
   Handle<Module> NewModule(Handle<SharedFunctionInfo> code);
 
   Handle<JSArrayBuffer> NewJSArrayBuffer(
@@ -522,6 +540,9 @@
   Handle<JSMapIterator> NewJSMapIterator();
   Handle<JSSetIterator> NewJSSetIterator();
 
+  Handle<JSFixedArrayIterator> NewJSFixedArrayIterator(
+      Handle<FixedArray> array);
+
   // Allocates a bound function.
   MaybeHandle<JSBoundFunction> NewJSBoundFunction(
       Handle<JSReceiver> target_function, Handle<Object> bound_this,
@@ -538,7 +559,7 @@
   void ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> global,
                                  Handle<JSFunction> constructor);
 
-  Handle<JSGlobalProxy> NewUninitializedJSGlobalProxy();
+  Handle<JSGlobalProxy> NewUninitializedJSGlobalProxy(int size);
 
   Handle<JSFunction> NewFunction(Handle<Map> map,
                                  Handle<SharedFunctionInfo> info,
@@ -623,7 +644,9 @@
   DECLARE_ERROR(ReferenceError)
   DECLARE_ERROR(SyntaxError)
   DECLARE_ERROR(TypeError)
-#undef DEFINE_ERROR
+  DECLARE_ERROR(WasmCompileError)
+  DECLARE_ERROR(WasmRuntimeError)
+#undef DECLARE_ERROR
 
   Handle<String> NumberToString(Handle<Object> number,
                                 bool check_number_string_cache = true);
@@ -709,6 +732,8 @@
                                         int number_of_properties,
                                         bool* is_result_from_cache);
 
+  Handle<RegExpMatchInfo> NewRegExpMatchInfo();
+
   // Creates a new FixedArray that holds the data associated with the
   // atom regexp and stores it in the regexp.
   void SetRegExpAtomData(Handle<JSRegExp> regexp,
diff --git a/src/fast-accessor-assembler.cc b/src/fast-accessor-assembler.cc
index a9cde70..ee9b241 100644
--- a/src/fast-accessor-assembler.cc
+++ b/src/fast-accessor-assembler.cc
@@ -17,7 +17,7 @@
 namespace internal {
 
 FastAccessorAssembler::FastAccessorAssembler(Isolate* isolate)
-    : zone_(isolate->allocator()),
+    : zone_(isolate->allocator(), ZONE_NAME),
       isolate_(isolate),
       assembler_(new CodeStubAssembler(isolate, zone(), 1,
                                        Code::ComputeFlags(Code::STUB),
diff --git a/src/field-index-inl.h b/src/field-index-inl.h
index c2f25bb..a728eb3 100644
--- a/src/field-index-inl.h
+++ b/src/field-index-inl.h
@@ -6,7 +6,6 @@
 #define V8_FIELD_INDEX_INL_H_
 
 #include "src/field-index.h"
-#include "src/ic/handler-configuration.h"
 
 namespace v8 {
 namespace internal {
@@ -85,39 +84,6 @@
   return is_double() ? (result | 1) : result;
 }
 
-// Takes an offset as computed by GetLoadByFieldOffset and reconstructs a
-// FieldIndex object from it.
-// static
-inline FieldIndex FieldIndex::ForLoadByFieldOffset(Map* map, int offset) {
-  DCHECK(LoadHandlerTypeBit::decode(offset) == kLoadICHandlerForProperties);
-  bool is_inobject = FieldOffsetIsInobject::decode(offset);
-  bool is_double = FieldOffsetIsDouble::decode(offset);
-  int field_index = FieldOffsetOffset::decode(offset) >> kPointerSizeLog2;
-  int first_inobject_offset = 0;
-  if (is_inobject) {
-    first_inobject_offset =
-        map->IsJSObjectMap() ? map->GetInObjectPropertyOffset(0) : 0;
-  } else {
-    first_inobject_offset = FixedArray::kHeaderSize;
-  }
-  int inobject_properties =
-      map->IsJSObjectMap() ? map->GetInObjectProperties() : 0;
-  FieldIndex result(is_inobject, field_index, is_double, inobject_properties,
-                    first_inobject_offset);
-  DCHECK(result.GetLoadByFieldOffset() == offset);
-  return result;
-}
-
-// Returns the offset format consumed by TurboFan stubs:
-// (offset << 3) | (is_double << 2) | (is_inobject << 1) | is_property
-// Where |offset| is relative to object start or FixedArray start, respectively.
-inline int FieldIndex::GetLoadByFieldOffset() const {
-  return FieldOffsetIsInobject::encode(is_inobject()) |
-         FieldOffsetIsDouble::encode(is_double()) |
-         FieldOffsetOffset::encode(index() << kPointerSizeLog2) |
-         LoadHandlerTypeBit::encode(kLoadICHandlerForProperties);
-}
-
 inline FieldIndex FieldIndex::ForDescriptor(Map* map, int descriptor_index) {
   PropertyDetails details =
       map->instance_descriptors()->GetDetails(descriptor_index);
@@ -126,30 +92,10 @@
                           details.representation().IsDouble());
 }
 
-
-inline FieldIndex FieldIndex::ForKeyedLookupCacheIndex(Map* map, int index) {
-  if (FLAG_compiled_keyed_generic_loads) {
-    return ForLoadByFieldIndex(map, index);
-  } else {
-    return ForPropertyIndex(map, index);
-  }
-}
-
-
 inline FieldIndex FieldIndex::FromFieldAccessStubKey(int key) {
   return FieldIndex(key);
 }
 
-
-inline int FieldIndex::GetKeyedLookupCacheIndex() const {
-  if (FLAG_compiled_keyed_generic_loads) {
-    return GetLoadByFieldIndex();
-  } else {
-    return property_index();
-  }
-}
-
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/field-index.h b/src/field-index.h
index 404c0f6..37b2f3c 100644
--- a/src/field-index.h
+++ b/src/field-index.h
@@ -27,12 +27,9 @@
   static FieldIndex ForInObjectOffset(int offset, Map* map = NULL);
   static FieldIndex ForDescriptor(Map* map, int descriptor_index);
   static FieldIndex ForLoadByFieldIndex(Map* map, int index);
-  static FieldIndex ForLoadByFieldOffset(Map* map, int index);
-  static FieldIndex ForKeyedLookupCacheIndex(Map* map, int index);
   static FieldIndex FromFieldAccessStubKey(int key);
 
   int GetLoadByFieldIndex() const;
-  int GetLoadByFieldOffset() const;
 
   bool is_inobject() const {
     return IsInObjectBits::decode(bit_field_);
@@ -69,8 +66,6 @@
     return result;
   }
 
-  int GetKeyedLookupCacheIndex() const;
-
   int GetFieldAccessStubKey() const {
     return bit_field_ &
         (IsInObjectBits::kMask | IsDoubleBits::kMask | IndexBits::kMask);
diff --git a/src/field-type.cc b/src/field-type.cc
index b3b24e2..16bccf2 100644
--- a/src/field-type.cc
+++ b/src/field-type.cc
@@ -13,7 +13,7 @@
 
 // static
 FieldType* FieldType::None() {
-  // Do not Smi::FromInt(0) here or for Any(), as that may translate
+  // Do not Smi::kZero here or for Any(), as that may translate
   // as `nullptr` which is not a valid value for `this`.
   return reinterpret_cast<FieldType*>(Smi::FromInt(2));
 }
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 779a589..a7efe11 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -199,19 +199,18 @@
   V(harmony_sharedarraybuffer, "harmony sharedarraybuffer")             \
   V(harmony_simd, "harmony simd")                                       \
   V(harmony_do_expressions, "harmony do-expressions")                   \
-  V(harmony_restrictive_generators,                                     \
-    "harmony restrictions on generator declarations")                   \
   V(harmony_regexp_named_captures, "harmony regexp named captures")     \
   V(harmony_regexp_property, "harmony unicode regexp property classes") \
-  V(harmony_for_in, "harmony for-in syntax")                            \
-  V(harmony_trailing_commas,                                            \
-    "harmony trailing commas in function parameter lists")              \
   V(harmony_class_fields, "harmony public fields in class literals")
 
 // Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED_BASE(V)                                               \
-  V(harmony_regexp_lookbehind, "harmony regexp lookbehind")                  \
-  V(harmony_tailcalls, "harmony tail calls")                                 \
+#define HARMONY_STAGED_BASE(V)                              \
+  V(harmony_regexp_lookbehind, "harmony regexp lookbehind") \
+  V(harmony_restrictive_generators,                         \
+    "harmony restrictions on generator declarations")       \
+  V(harmony_tailcalls, "harmony tail calls")                \
+  V(harmony_trailing_commas,                                \
+    "harmony trailing commas in function parameter lists")  \
   V(harmony_string_padding, "harmony String-padding methods")
 
 #ifdef V8_I18N_SUPPORT
@@ -224,13 +223,7 @@
 #endif
 
 // Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING(V)                                                  \
-  V(harmony_async_await, "harmony async-await")                              \
-  V(harmony_restrictive_declarations,                                        \
-    "harmony limitations on sloppy mode function declarations")              \
-  V(harmony_object_values_entries, "harmony Object.values / Object.entries") \
-  V(harmony_object_own_property_descriptors,                                 \
-    "harmony Object.getOwnPropertyDescriptors()")
+#define HARMONY_SHIPPING(V) V(harmony_async_await, "harmony async-await")
 
 // Once a shipping feature has proved stable in the wild, it will be dropped
 // from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -255,9 +248,12 @@
 HARMONY_SHIPPING(FLAG_SHIPPING_FEATURES)
 #undef FLAG_SHIPPING_FEATURES
 
+DEFINE_BOOL(future, false,
+            "Implies all staged features that we want to ship in the "
+            "not-too-far future")
+DEFINE_IMPLICATION(future, ignition_staging)
+
 // Flags for experimental implementation features.
-DEFINE_BOOL(compiled_keyed_generic_loads, false,
-            "use optimizing compiler to generate keyed generic load stubs")
 DEFINE_BOOL(allocation_site_pretenuring, true,
             "pretenure with allocation sites")
 DEFINE_BOOL(page_promotion, true, "promote pages based on utilization")
@@ -278,6 +274,8 @@
 DEFINE_IMPLICATION(track_field_types, track_fields)
 DEFINE_IMPLICATION(track_field_types, track_heap_object_fields)
 DEFINE_BOOL(smi_binop, true, "support smi representation in binary operations")
+DEFINE_BOOL(mark_shared_functions_for_tier_up, true,
+            "mark shared functions for tier up")
 
 // Flags for optimization types.
 DEFINE_BOOL(optimize_for_size, false,
@@ -294,20 +292,14 @@
 DEFINE_BOOL(ignition, false, "use ignition interpreter")
 DEFINE_BOOL(ignition_staging, false, "use ignition with all staged features")
 DEFINE_IMPLICATION(ignition_staging, ignition)
-DEFINE_IMPLICATION(ignition_staging, ignition_osr)
-DEFINE_IMPLICATION(ignition_staging, turbo_from_bytecode)
-DEFINE_IMPLICATION(ignition_staging, ignition_preserve_bytecode)
-DEFINE_BOOL(ignition_eager, false, "eagerly compile and parse with ignition")
 DEFINE_STRING(ignition_filter, "*", "filter for ignition interpreter")
 DEFINE_BOOL(ignition_deadcode, true,
             "use ignition dead code elimination optimizer")
-DEFINE_BOOL(ignition_osr, false, "enable support for OSR from ignition code")
+DEFINE_BOOL(ignition_osr, true, "enable support for OSR from ignition code")
 DEFINE_BOOL(ignition_peephole, true, "use ignition peephole optimizer")
 DEFINE_BOOL(ignition_reo, true, "use ignition register equivalence optimizer")
 DEFINE_BOOL(ignition_filter_expression_positions, true,
             "filter expression positions before the bytecode pipeline")
-DEFINE_BOOL(ignition_preserve_bytecode, false,
-            "preserve generated bytecode even when switching tiers")
 DEFINE_BOOL(print_bytecode, false,
             "print bytecode generated by ignition interpreter")
 DEFINE_BOOL(trace_ignition, false,
@@ -406,8 +398,7 @@
 DEFINE_BOOL(inline_construct, true, "inline constructor calls")
 DEFINE_BOOL(inline_arguments, true, "inline functions with arguments object")
 DEFINE_BOOL(inline_accessors, true, "inline JavaScript accessors")
-DEFINE_BOOL(inline_into_try, false, "inline into try blocks")
-DEFINE_IMPLICATION(turbo, inline_into_try)
+DEFINE_BOOL(inline_into_try, true, "inline into try blocks")
 DEFINE_INT(escape_analysis_iterations, 2,
            "maximum number of escape analysis fix-point iterations")
 
@@ -430,7 +421,7 @@
 DEFINE_BOOL(turbo, false, "enable TurboFan compiler")
 DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
 DEFINE_IMPLICATION(turbo, turbo_loop_peeling)
-DEFINE_BOOL(turbo_from_bytecode, false, "enable building graphs from bytecode")
+DEFINE_IMPLICATION(turbo, turbo_escape)
 DEFINE_BOOL(turbo_sp_frame_access, false,
             "use stack pointer-relative access to frame wherever possible")
 DEFINE_BOOL(turbo_preprocess_ranges, true,
@@ -453,21 +444,16 @@
 DEFINE_BOOL(turbo_asm_deoptimization, false,
             "enable deoptimization in TurboFan for asm.js code")
 DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
-DEFINE_BOOL(turbo_verify_machine_graph, false,
-            "verify TurboFan machine graph before instruction selection")
+DEFINE_STRING(turbo_verify_machine_graph, nullptr,
+              "verify TurboFan machine graph before instruction selection")
 DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
 DEFINE_BOOL(turbo_stats_nvp, false,
             "print TurboFan statistics in machine-readable format")
 DEFINE_BOOL(turbo_splitting, true, "split nodes during scheduling in TurboFan")
 DEFINE_BOOL(turbo_type_feedback, true,
             "use typed feedback for representation inference in Turbofan")
-DEFINE_BOOL(turbo_source_positions, false,
-            "track source code positions when building TurboFan IR")
-DEFINE_IMPLICATION(trace_turbo, turbo_source_positions)
 DEFINE_BOOL(function_context_specialization, false,
             "enable function context specialization in TurboFan")
-DEFINE_BOOL(native_context_specialization, true,
-            "enable native context specialization in TurboFan")
 DEFINE_BOOL(turbo_inlining, true, "enable inlining in TurboFan")
 DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
 DEFINE_BOOL(turbo_load_elimination, true, "enable load elimination in TurboFan")
@@ -485,8 +471,6 @@
 DEFINE_BOOL(turbo_loop_variable, true, "Turbofan loop variable optimization")
 DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
 DEFINE_BOOL(turbo_frame_elision, true, "elide frames in TurboFan")
-DEFINE_BOOL(turbo_cache_shared_code, true, "cache context-independent code")
-DEFINE_BOOL(turbo_preserve_shared_code, false, "keep context-independent code")
 DEFINE_BOOL(turbo_escape, false, "enable escape analysis")
 DEFINE_BOOL(turbo_instruction_scheduling, false,
             "enable instruction scheduling in TurboFan")
@@ -535,6 +519,8 @@
             "enable prototype exception handling opcodes for wasm")
 DEFINE_BOOL(wasm_mv_prototype, false,
             "enable prototype multi-value support for wasm")
+DEFINE_BOOL(wasm_atomics_prototype, false,
+            "enable prototype atomic opcodes for wasm")
 
 DEFINE_BOOL(wasm_trap_handler, false,
             "use signal handlers to catch out of bounds memory access in wasm"
@@ -641,6 +627,10 @@
 DEFINE_BOOL(serialize_eager, false, "compile eagerly when caching scripts")
 DEFINE_BOOL(serialize_age_code, false, "pre age code in the code cache")
 DEFINE_BOOL(trace_serializer, false, "print code serializer trace")
+#ifdef DEBUG
+DEFINE_BOOL(external_reference_stats, false,
+            "print statistics on external references used during serialization")
+#endif  // DEBUG
 
 // compiler.cc
 DEFINE_INT(min_preparse_length, 1024,
@@ -739,13 +729,15 @@
             "track un-executed functions to age code and flush only "
             "old code (required for code flushing)")
 DEFINE_BOOL(incremental_marking, true, "use incremental marking")
-DEFINE_BOOL(incremental_marking_wrappers, false,
+DEFINE_BOOL(incremental_marking_wrappers, true,
             "use incremental marking for marking wrappers")
 DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
            "keep finalizing incremental marking as long as we discover at "
            "least this many unmarked objects")
 DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
            "at most try this many times to finalize incremental marking")
+DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
+DEFINE_NEG_IMPLICATION(minor_mc, incremental_marking)
 DEFINE_BOOL(black_allocation, false, "use black allocation")
 DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
 DEFINE_BOOL(parallel_compaction, true, "use parallel compaction")
@@ -757,7 +749,10 @@
             "track object counts and memory usage")
 DEFINE_BOOL(trace_gc_object_stats, false,
             "trace object counts and memory usage")
+DEFINE_INT(gc_stats, 0, "Used by tracing internally to enable gc statistics")
 DEFINE_IMPLICATION(trace_gc_object_stats, track_gc_object_stats)
+DEFINE_VALUE_IMPLICATION(track_gc_object_stats, gc_stats, 1)
+DEFINE_VALUE_IMPLICATION(trace_gc_object_stats, gc_stats, 1)
 DEFINE_NEG_IMPLICATION(trace_gc_object_stats, incremental_marking)
 DEFINE_BOOL(track_detached_contexts, true,
             "track native contexts that are expected to be garbage collected")
@@ -769,11 +764,12 @@
 #endif
 DEFINE_BOOL(move_object_start, true, "enable moving of object starts")
 DEFINE_BOOL(memory_reducer, true, "use memory reducer")
-DEFINE_BOOL(scavenge_reclaim_unmodified_objects, true,
-            "remove unmodified and unreferenced objects")
 DEFINE_INT(heap_growing_percent, 0,
            "specifies heap growing factor as (1 + heap_growing_percent/100)")
 
+// spaces.cc
+DEFINE_INT(v8_os_page_size, 0, "override OS page size (in KBytes)")
+
 // execution.cc, messages.cc
 DEFINE_BOOL(clear_exceptions_on_js_entry, false,
             "clear pending exceptions when entering JavaScript")
@@ -802,7 +798,7 @@
 // ic.cc
 DEFINE_BOOL(use_ic, true, "use inline caching")
 DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions")
-DEFINE_BOOL(tf_load_ic_stub, true, "use TF LoadIC stub")
+DEFINE_BOOL_READONLY(tf_load_ic_stub, true, "use TF LoadIC stub")
 DEFINE_BOOL(tf_store_ic_stub, true, "use TF StoreIC stub")
 
 // macro-assembler-ia32.cc
@@ -839,6 +835,7 @@
 // parser.cc
 DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
 DEFINE_BOOL(trace_parse, false, "trace parsing and preparsing")
+DEFINE_BOOL(trace_preparse, false, "trace preparsing decisions")
 DEFINE_BOOL(lazy_inner_functions, false, "enable lazy parsing inner functions")
 
 // simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
@@ -884,9 +881,14 @@
            "Fixed seed to use to hash property keys (0 means random)"
            "(with snapshots this option cannot override the baked-in seed)")
 DEFINE_BOOL(trace_rail, false, "trace RAIL mode")
+DEFINE_BOOL(print_all_exceptions, false,
+            "print exception object and stack trace on each thrown exception")
 
 // runtime.cc
 DEFINE_BOOL(runtime_call_stats, false, "report runtime call counts and times")
+DEFINE_INT(runtime_stats, 0,
+           "internal usage only for controlling runtime statistics")
+DEFINE_VALUE_IMPLICATION(runtime_call_stats, runtime_stats, 1)
 
 // snapshot-common.cc
 DEFINE_BOOL(profile_deserialization, false,
@@ -915,12 +917,6 @@
 DEFINE_BOOL(profile_hydrogen_code_stub_compilation, false,
             "Print the time it takes to lazily compile hydrogen code stubs.")
 
-DEFINE_BOOL(predictable, false, "enable predictable mode")
-DEFINE_NEG_IMPLICATION(predictable, concurrent_recompilation)
-DEFINE_NEG_IMPLICATION(predictable, concurrent_sweeping)
-DEFINE_NEG_IMPLICATION(predictable, parallel_compaction)
-DEFINE_NEG_IMPLICATION(predictable, memory_reducer)
-
 // mark-compact.cc
 DEFINE_BOOL(force_marking_deque_overflows, false,
             "force overflows of marking deque by reducing it's size "
@@ -1026,7 +1022,6 @@
             "(requires heap_stats)")
 DEFINE_BOOL(trace_live_bytes, false,
             "trace incrementing and resetting of live bytes")
-
 DEFINE_BOOL(trace_isolates, false, "trace isolate state changes")
 
 // Regexp
@@ -1067,7 +1062,6 @@
 DEFINE_IMPLICATION(prof, prof_cpp)
 DEFINE_BOOL(prof_browser_mode, true,
             "Used with --prof, turns on browser-compatible mode for profiling.")
-DEFINE_BOOL(log_regexp, false, "Log regular expression execution.")
 DEFINE_STRING(logfile, "v8.log", "Specify the name of the log file.")
 DEFINE_BOOL(logfile_per_isolate, true, "Separate log files for each isolate.")
 DEFINE_BOOL(ll_prof, false, "Enable low-level linux profiler.")
@@ -1165,10 +1159,27 @@
 #endif
 #endif
 
+#undef FLAG
+#define FLAG FLAG_FULL
 
 //
-// VERIFY_PREDICTABLE related flags
+// Predictable mode related flags.
 //
+
+DEFINE_BOOL(predictable, false, "enable predictable mode")
+DEFINE_IMPLICATION(predictable, single_threaded)
+DEFINE_NEG_IMPLICATION(predictable, memory_reducer)
+
+//
+// Threading related flags.
+//
+
+DEFINE_BOOL(single_threaded, false, "disable the use of background tasks")
+DEFINE_NEG_IMPLICATION(single_threaded, concurrent_recompilation)
+DEFINE_NEG_IMPLICATION(single_threaded, concurrent_sweeping)
+DEFINE_NEG_IMPLICATION(single_threaded, parallel_compaction)
+
+
 #undef FLAG
 
 #ifdef VERIFY_PREDICTABLE
@@ -1182,7 +1193,6 @@
 DEFINE_INT(dump_allocations_digest_at_alloc, -1,
            "dump allocations digest each n-th allocation")
 
-
 //
 // Read-only flags
 //
@@ -1197,7 +1207,6 @@
             "enable in-object double fields unboxing (64-bit only)")
 DEFINE_IMPLICATION(unbox_double_fields, track_double_fields)
 
-
 // Cleanup...
 #undef FLAG_FULL
 #undef FLAG_READONLY
diff --git a/src/frames.cc b/src/frames.cc
index c67fdc2..3b73027 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -15,8 +15,8 @@
 #include "src/safepoint-table.h"
 #include "src/string-stream.h"
 #include "src/vm-state-inl.h"
-#include "src/wasm/wasm-debug.h"
 #include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
 
 namespace v8 {
 namespace internal {
@@ -404,17 +404,17 @@
 static bool IsInterpreterFramePc(Isolate* isolate, Address pc) {
   Code* interpreter_entry_trampoline =
       isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
+  Code* interpreter_bytecode_advance =
+      isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeAdvance);
   Code* interpreter_bytecode_dispatch =
       isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
-  Code* interpreter_baseline_on_return =
-      isolate->builtins()->builtin(Builtins::kInterpreterMarkBaselineOnReturn);
 
   return (pc >= interpreter_entry_trampoline->instruction_start() &&
           pc < interpreter_entry_trampoline->instruction_end()) ||
+         (pc >= interpreter_bytecode_advance->instruction_start() &&
+          pc < interpreter_bytecode_advance->instruction_end()) ||
          (pc >= interpreter_bytecode_dispatch->instruction_start() &&
-          pc < interpreter_bytecode_dispatch->instruction_end()) ||
-         (pc >= interpreter_baseline_on_return->instruction_start() &&
-          pc < interpreter_baseline_on_return->instruction_end());
+          pc < interpreter_bytecode_dispatch->instruction_end());
 }
 
 StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
@@ -439,8 +439,8 @@
     if (!marker->IsSmi()) {
       if (maybe_function->IsSmi()) {
         return NONE;
-      } else if (FLAG_ignition && IsInterpreterFramePc(iterator->isolate(),
-                                                       *(state->pc_address))) {
+      } else if (IsInterpreterFramePc(iterator->isolate(),
+                                      *(state->pc_address))) {
         return INTERPRETED;
       } else {
         return JAVA_SCRIPT;
@@ -720,6 +720,12 @@
   return isolate()->heap()->undefined_value();
 }
 
+int StandardFrame::position() const {
+  AbstractCode* code = AbstractCode::cast(LookupCode());
+  int code_offset = static_cast<int>(pc() - code->instruction_start());
+  return code->SourcePosition(code_offset);
+}
+
 int StandardFrame::ComputeExpressionsCount() const {
   Address base = GetExpressionAddress(0);
   Address limit = sp() - kPointerSize;
@@ -985,16 +991,16 @@
   return code->LookupRangeInHandlerTable(pc_offset, stack_depth, prediction);
 }
 
-void JavaScriptFrame::PrintFunctionAndOffset(JSFunction* function, Code* code,
-                                             Address pc, FILE* file,
+void JavaScriptFrame::PrintFunctionAndOffset(JSFunction* function,
+                                             AbstractCode* code,
+                                             int code_offset, FILE* file,
                                              bool print_line_number) {
   PrintF(file, "%s", function->IsOptimized() ? "*" : "~");
   function->PrintName(file);
-  int code_offset = static_cast<int>(pc - code->instruction_start());
   PrintF(file, "+%d", code_offset);
   if (print_line_number) {
     SharedFunctionInfo* shared = function->shared();
-    int source_pos = AbstractCode::cast(code)->SourcePosition(code_offset);
+    int source_pos = code->SourcePosition(code_offset);
     Object* maybe_script = shared->script();
     if (maybe_script->IsScript()) {
       Script* script = Script::cast(maybe_script);
@@ -1024,8 +1030,17 @@
     if (it.frame()->is_java_script()) {
       JavaScriptFrame* frame = it.frame();
       if (frame->IsConstructor()) PrintF(file, "new ");
-      PrintFunctionAndOffset(frame->function(), frame->unchecked_code(),
-                             frame->pc(), file, print_line_number);
+      JSFunction* function = frame->function();
+      int code_offset = 0;
+      if (frame->is_interpreted()) {
+        InterpretedFrame* iframe = reinterpret_cast<InterpretedFrame*>(frame);
+        code_offset = iframe->GetBytecodeOffset();
+      } else {
+        Code* code = frame->unchecked_code();
+        code_offset = static_cast<int>(frame->pc() - code->instruction_start());
+      }
+      PrintFunctionAndOffset(function, function->abstract_code(), code_offset,
+                             file, print_line_number);
       if (print_args) {
         // function arguments
         // (we are intentionally only printing the actually
@@ -1208,9 +1223,7 @@
         abstract_code = AbstractCode::cast(code);
       } else {
         DCHECK_EQ(frame_opcode, Translation::INTERPRETED_FRAME);
-        // BailoutId points to the next bytecode in the bytecode aray. Subtract
-        // 1 to get the end of current bytecode.
-        code_offset = bailout_id.ToInt() - 1;
+        code_offset = bailout_id.ToInt();  // Points to current bytecode.
         abstract_code = AbstractCode::cast(shared_info->bytecode_array());
       }
       FrameSummary summary(receiver, function, abstract_code, code_offset,
@@ -1270,6 +1283,19 @@
   return nullptr;
 }
 
+Object* OptimizedFrame::receiver() const {
+  Code* code = LookupCode();
+  if (code->kind() == Code::BUILTIN) {
+    Address argc_ptr = fp() + OptimizedBuiltinFrameConstants::kArgCOffset;
+    intptr_t argc = *reinterpret_cast<intptr_t*>(argc_ptr);
+    intptr_t args_size =
+        (StandardFrameConstants::kFixedSlotCountAboveFp + argc) * kPointerSize;
+    Address receiver_ptr = fp() + args_size;
+    return *reinterpret_cast<Object**>(receiver_ptr);
+  } else {
+    return JavaScriptFrame::receiver();
+  }
+}
 
 void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) const {
   DCHECK(functions->length() == 0);
@@ -1335,6 +1361,12 @@
   return Memory::Object_at(fp() + StackSlotOffsetRelativeToFp(index));
 }
 
+int InterpretedFrame::position() const {
+  AbstractCode* code = AbstractCode::cast(GetBytecodeArray());
+  int code_offset = GetBytecodeOffset();
+  return code->SourcePosition(code_offset);
+}
+
 int InterpretedFrame::LookupExceptionHandlerInTable(
     int* context_register, HandlerTable::CatchPrediction* prediction) {
   BytecodeArray* bytecode = function()->shared()->bytecode_array();
@@ -1351,6 +1383,17 @@
   return raw_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
 }
 
+int InterpretedFrame::GetBytecodeOffset(Address fp) {
+  const int offset = InterpreterFrameConstants::kExpressionsOffset;
+  const int index = InterpreterFrameConstants::kBytecodeOffsetExpressionIndex;
+  DCHECK_EQ(
+      InterpreterFrameConstants::kBytecodeOffsetFromFp,
+      InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+  Address expression_offset = fp + offset - index * kPointerSize;
+  int raw_offset = Smi::cast(Memory::Object_at(expression_offset))->value();
+  return raw_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
+}
+
 void InterpretedFrame::PatchBytecodeOffset(int new_offset) {
   const int index = InterpreterFrameConstants::kBytecodeOffsetExpressionIndex;
   DCHECK_EQ(
@@ -1460,9 +1503,9 @@
   return fp() + ExitFrameConstants::kCallerSPOffset;
 }
 
-Object* WasmFrame::wasm_obj() const {
+Object* WasmFrame::wasm_instance() const {
   Object* ret = wasm::GetOwningWasmInstance(LookupCode());
-  if (ret == nullptr) ret = *(isolate()->factory()->undefined_value());
+  if (ret == nullptr) ret = isolate()->heap()->undefined_value();
   return ret;
 }
 
@@ -1473,9 +1516,18 @@
 }
 
 Script* WasmFrame::script() const {
-  Handle<JSObject> wasm(JSObject::cast(wasm_obj()), isolate());
-  Handle<wasm::WasmDebugInfo> debug_info = wasm::GetDebugInfo(wasm);
-  return wasm::WasmDebugInfo::GetFunctionScript(debug_info, function_index());
+  Handle<JSObject> instance(JSObject::cast(wasm_instance()), isolate());
+  return *wasm::GetScript(instance);
+}
+
+int WasmFrame::position() const {
+  int position = StandardFrame::position();
+  if (wasm::WasmIsAsmJs(wasm_instance(), isolate())) {
+    Handle<JSObject> instance(JSObject::cast(wasm_instance()), isolate());
+    position =
+        wasm::GetAsmWasmSourcePosition(instance, function_index(), position);
+  }
+  return position;
 }
 
 int WasmFrame::LookupExceptionHandlerInTable(int* stack_slots) {
diff --git a/src/frames.h b/src/frames.h
index 373f4de..1daa364 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -218,6 +218,48 @@
   static const int kLastObjectOffset = kContextOffset;
 };
 
+// OptimizedBuiltinFrameConstants are used for TF-generated builtins. They
+// always have a context below the saved fp/constant pool and below that the
+// JSFunction of the executing function and below that an integer (not a Smi)
+// containing the number of arguments passed to the builtin.
+//
+//  slot      JS frame
+//       +-----------------+--------------------------------
+//  -n-1 |   parameter 0   |                            ^
+//       |- - - - - - - - -|                            |
+//  -n   |                 |                          Caller
+//  ...  |       ...       |                       frame slots
+//  -2   |  parameter n-1  |                       (slot < 0)
+//       |- - - - - - - - -|                            |
+//  -1   |   parameter n   |                            v
+//  -----+-----------------+--------------------------------
+//   0   |   return addr   |   ^                        ^
+//       |- - - - - - - - -|   |                        |
+//   1   | saved frame ptr | Fixed                      |
+//       |- - - - - - - - -| Header <-- frame ptr       |
+//   2   | [Constant Pool] |   |                        |
+//       |- - - - - - - - -|   |                        |
+// 2+cp  |     Context     |   |   if a constant pool   |
+//       |- - - - - - - - -|   |    is used, cp = 1,    |
+// 3+cp  |    JSFunction   |   |   otherwise, cp = 0    |
+//       |- - - - - - - - -|   |                        |
+// 4+cp  |      argc       |   v                        |
+//       +-----------------+----                        |
+// 5+cp  |                 |   ^                      Callee
+//       |- - - - - - - - -|   |                   frame slots
+//  ...  |                 | Frame slots           (slot >= 0)
+//       |- - - - - - - - -|   |                        |
+//       |                 |   v                        |
+//  -----+-----------------+----- <-- stack ptr -------------
+//
+class OptimizedBuiltinFrameConstants : public StandardFrameConstants {
+ public:
+  static const int kArgCSize = kPointerSize;
+  static const int kArgCOffset = -3 * kPointerSize - kCPSlotSize;
+  static const int kFixedFrameSize = kFixedFrameSizeAboveFp - kArgCOffset;
+  static const int kFixedSlotCount = kFixedFrameSize / kPointerSize;
+};
+
 // TypedFrames have a SMI type maker value below the saved FP/constant pool to
 // distinguish them from StandardFrames, which have a context in that position
 // instead.
@@ -308,10 +350,9 @@
  public:
   // FP-relative.
   static const int kContextOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
-  static const int kAllocationSiteOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
-  static const int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
-  static const int kImplicitReceiverOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
-  DEFINE_TYPED_FRAME_SIZES(4);
+  static const int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+  static const int kImplicitReceiverOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
+  DEFINE_TYPED_FRAME_SIZES(3);
 };
 
 class StubFailureTrampolineFrameConstants : public InternalFrameConstants {
@@ -734,6 +775,7 @@
   virtual Object* receiver() const;
   virtual Script* script() const;
   virtual Object* context() const;
+  virtual int position() const;
 
   // Access the expressions in the stack frame including locals.
   inline Object* GetExpression(int index) const;
@@ -871,8 +913,8 @@
     return static_cast<JavaScriptFrame*>(frame);
   }
 
-  static void PrintFunctionAndOffset(JSFunction* function, Code* code,
-                                     Address pc, FILE* file,
+  static void PrintFunctionAndOffset(JSFunction* function, AbstractCode* code,
+                                     int code_offset, FILE* file,
                                      bool print_line_number);
 
   static void PrintTop(Isolate* isolate, FILE* file, bool print_args,
@@ -941,6 +983,8 @@
 
   DeoptimizationInputData* GetDeoptimizationData(int* deopt_index) const;
 
+  Object* receiver() const override;
+
   static int StackSlotOffsetRelativeToFp(int slot_index);
 
  protected:
@@ -957,6 +1001,9 @@
  public:
   Type type() const override { return INTERPRETED; }
 
+  // Accessors.
+  int position() const override;
+
   // Lookup exception handler for current {pc}, returns -1 if none found.
   int LookupExceptionHandlerInTable(
       int* data, HandlerTable::CatchPrediction* prediction) override;
@@ -984,6 +1031,8 @@
       List<FrameSummary>* frames,
       FrameSummary::Mode mode = FrameSummary::kExactSummary) const override;
 
+  static int GetBytecodeOffset(Address fp);
+
  protected:
   inline explicit InterpretedFrame(StackFrameIteratorBase* iterator);
 
@@ -1064,9 +1113,10 @@
   Code* unchecked_code() const override;
 
   // Accessors.
-  Object* wasm_obj() const;
+  Object* wasm_instance() const;
   uint32_t function_index() const;
   Script* script() const override;
+  int position() const override;
 
   static WasmFrame* cast(StackFrame* frame) {
     DCHECK(frame->is_wasm());
diff --git a/src/full-codegen/arm/full-codegen-arm.cc b/src/full-codegen/arm/full-codegen-arm.cc
index e8eeb8e..22c991b 100644
--- a/src/full-codegen/arm/full-codegen-arm.cc
+++ b/src/full-codegen/arm/full-codegen-arm.cc
@@ -361,11 +361,7 @@
   masm()->CheckConstPool(true, false);
 }
 
-
-void FullCodeGenerator::ClearAccumulator() {
-  __ mov(r0, Operand(Smi::FromInt(0)));
-}
-
+void FullCodeGenerator::ClearAccumulator() { __ mov(r0, Operand(Smi::kZero)); }
 
 void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
   __ mov(r2, Operand(profiling_counter_));
@@ -1022,8 +1018,7 @@
   __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
   __ b(eq, &exit);
   __ bind(&convert);
-  ToObjectStub stub(isolate());
-  __ CallStub(&stub);
+  __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
   RestoreContext();
   __ bind(&done_convert);
   PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
@@ -1062,7 +1057,7 @@
   __ bind(&use_cache);
 
   __ EnumLength(r1, r0);
-  __ cmp(r1, Operand(Smi::FromInt(0)));
+  __ cmp(r1, Operand(Smi::kZero));
   __ b(eq, &no_descriptors);
 
   __ LoadInstanceDescriptors(r0, r2);
@@ -1071,7 +1066,7 @@
 
   // Set up the four remaining stack slots.
   __ push(r0);  // Map.
-  __ mov(r0, Operand(Smi::FromInt(0)));
+  __ mov(r0, Operand(Smi::kZero));
   // Push enumeration cache, enumeration cache length (as smi) and zero.
   __ Push(r2, r1, r0);
   __ jmp(&loop);
@@ -1088,7 +1083,7 @@
   __ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
   __ Push(r1);  // Fixed array length (as smi).
   PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
-  __ mov(r0, Operand(Smi::FromInt(0)));
+  __ mov(r0, Operand(Smi::kZero));
   __ Push(r0);  // Initial index.
 
   // Generate code for doing the condition check.
@@ -1124,10 +1119,9 @@
   __ str(r2, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(vector_index)));
 
   // r0 contains the key. The receiver in r1 is the second argument to the
-  // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+  // ForInFilter. ForInFilter returns undefined if the receiver doesn't
   // have the key or returns the name-converted key.
-  ForInFilterStub filter_stub(isolate());
-  __ CallStub(&filter_stub);
+  __ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
   RestoreContext();
   PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
   __ CompareRoot(result_register(), Heap::kUndefinedValueRootIndex);
@@ -1299,7 +1293,7 @@
       DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
       Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
                                                : "[ Stack variable");
-      if (NeedsHoleCheckForLoad(proxy)) {
+      if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
         // Throw a reference error when using an uninitialized let/const
         // binding in harmony mode.
         Label done;
@@ -1742,12 +1736,14 @@
 
   // Store the value.
   switch (assign_type) {
-    case VARIABLE:
-      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
-                             expr->op(), expr->AssignmentSlot());
+    case VARIABLE: {
+      VariableProxy* proxy = expr->target()->AsVariableProxy();
+      EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
+                             proxy->hole_check_mode());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       context()->Plug(r0);
       break;
+    }
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
       break;
@@ -1935,7 +1931,7 @@
       __ mov(right, Operand(scratch1), LeaveCC, ne);
       __ b(ne, &done);
       __ add(scratch2, right, Operand(left), SetCC);
-      __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
+      __ mov(right, Operand(Smi::kZero), LeaveCC, pl);
       __ b(mi, &stub_call);
       break;
     }
@@ -2029,9 +2025,10 @@
 
   switch (assign_type) {
     case VARIABLE: {
-      Variable* var = expr->AsVariableProxy()->var();
+      VariableProxy* proxy = expr->AsVariableProxy();
       EffectContext context(this);
-      EmitVariableAssignment(var, Token::ASSIGN, slot);
+      EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+                             proxy->hole_check_mode());
       break;
     }
     case NAMED_PROPERTY: {
@@ -2106,9 +2103,9 @@
   }
 }
 
-
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
-                                               FeedbackVectorSlot slot) {
+                                               FeedbackVectorSlot slot,
+                                               HoleCheckMode hole_check_mode) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
     __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
@@ -2119,7 +2116,7 @@
     DCHECK(var->IsStackAllocated() || var->IsContextSlot());
     MemOperand location = VarOperand(var, r1);
     // Perform an initialization check for lexically declared variables.
-    if (var->binding_needs_init()) {
+    if (hole_check_mode == HoleCheckMode::kRequired) {
       Label assign;
       __ ldr(r3, location);
       __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
@@ -2229,17 +2226,6 @@
   context()->Plug(r0);
 }
 
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
-                               TypeFeedbackId ast_id) {
-  ic_total_count_++;
-  // All calls must have a predictable size in full-codegen code to ensure that
-  // the debugger can patch them correctly.
-  __ Call(code, RelocInfo::CODE_TARGET, ast_id, al,
-          NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
   Expression* callee = expr->expression();
@@ -2394,14 +2380,12 @@
     // not return to this function.
     EmitProfilingCounterHandlingForReturnSequence(true);
   }
-  Handle<Code> ic =
-      CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
-          .code();
+  Handle<Code> code =
+      CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
   __ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
   __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
-  // Don't assign a type feedback id to the IC, since type feedback is provided
-  // by the vector above.
-  CallIC(ic);
+  __ mov(r0, Operand(arg_count));
+  CallIC(code);
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
@@ -2505,11 +2489,13 @@
 
   // Record source position for debugger.
   SetCallPosition(expr);
+  Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
+                                          expr->tail_call_mode())
+                          .code();
+  __ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
   __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ mov(r0, Operand(arg_count));
-  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
-                                      expr->tail_call_mode()),
-          RelocInfo::CODE_TARGET);
+  __ Call(code, RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
   RestoreContext();
@@ -2549,7 +2535,7 @@
   __ mov(r3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
 
   CallConstructStub stub(isolate());
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+  CallIC(stub.GetCode());
   OperandStackDepthDecrement(arg_count + 1);
   PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
   RestoreContext();
@@ -2843,41 +2829,6 @@
   context()->DropAndPlug(1, r0);
 }
 
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
-  __ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-  VisitForAccumulatorValue(args->at(0));
-
-  __ AssertString(r0);
-
-  __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
-  __ IndexFromHash(r0, r0);
-
-  context()->Plug(r0);
-}
-
-
 void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(1, args->length());
@@ -3056,8 +3007,7 @@
         VisitForTypeofValue(expr->expression());
       }
       __ mov(r3, r0);
-      TypeofStub typeof_stub(isolate());
-      __ CallStub(&typeof_stub);
+      __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
       context()->Plug(r0);
       break;
     }
@@ -3084,7 +3034,7 @@
   } else {
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
-      __ mov(ip, Operand(Smi::FromInt(0)));
+      __ mov(ip, Operand(Smi::kZero));
       PushOperand(ip);
     }
     switch (assign_type) {
@@ -3236,11 +3186,12 @@
 
   // Store the value returned in r0.
   switch (assign_type) {
-    case VARIABLE:
+    case VARIABLE: {
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
       if (expr->is_postfix()) {
         { EffectContext context(this);
-          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                                 Token::ASSIGN, expr->CountSlot());
+          EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+                                 proxy->hole_check_mode());
           PrepareForBailoutForId(expr->AssignmentId(),
                                  BailoutState::TOS_REGISTER);
           context.Plug(r0);
@@ -3251,13 +3202,14 @@
           context()->PlugTOS();
         }
       } else {
-        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                               Token::ASSIGN, expr->CountSlot());
+        EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+                               proxy->hole_check_mode());
         PrepareForBailoutForId(expr->AssignmentId(),
                                BailoutState::TOS_REGISTER);
         context()->Plug(r0);
       }
       break;
+    }
     case NAMED_PROPERTY: {
       PopOperand(StoreDescriptor::ReceiverRegister());
       CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
@@ -3426,8 +3378,7 @@
       VisitForAccumulatorValue(expr->right());
       SetExpressionPosition(expr);
       PopOperand(r1);
-      InstanceOfStub stub(isolate());
-      __ CallStub(&stub);
+      __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(r0, Heap::kTrueValueRootIndex);
       Split(eq, if_true, if_false, fall_through);
diff --git a/src/full-codegen/arm64/full-codegen-arm64.cc b/src/full-codegen/arm64/full-codegen-arm64.cc
index 1854f10..51b3009 100644
--- a/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -362,11 +362,7 @@
   masm()->CheckConstPool(true, false);
 }
 
-
-void FullCodeGenerator::ClearAccumulator() {
-  __ Mov(x0, Smi::FromInt(0));
-}
-
+void FullCodeGenerator::ClearAccumulator() { __ Mov(x0, Smi::kZero); }
 
 void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
   __ Mov(x2, Operand(profiling_counter_));
@@ -1017,8 +1013,7 @@
   __ JumpIfRoot(x0, Heap::kNullValueRootIndex, &exit);
   __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit);
   __ Bind(&convert);
-  ToObjectStub stub(isolate());
-  __ CallStub(&stub);
+  __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
   RestoreContext();
   __ Bind(&done_convert);
   PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
@@ -1113,10 +1108,9 @@
   __ Str(x10, FieldMemOperand(x3, FixedArray::OffsetOfElementAt(vector_index)));
 
   // x0 contains the key. The receiver in x1 is the second argument to the
-  // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+  // ForInFilter. ForInFilter returns undefined if the receiver doesn't
   // have the key or returns the name-converted key.
-  ForInFilterStub filter_stub(isolate());
-  __ CallStub(&filter_stub);
+  __ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
   RestoreContext();
   PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
   __ CompareRoot(result_register(), Heap::kUndefinedValueRootIndex);
@@ -1286,7 +1280,7 @@
       Comment cmnt(masm_, var->IsContextSlot()
                               ? "Context variable"
                               : "Stack variable");
-      if (NeedsHoleCheckForLoad(proxy)) {
+      if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
         // Throw a reference error when using an uninitialized let/const
         // binding in harmony mode.
         Label done;
@@ -1721,12 +1715,14 @@
 
   // Store the value.
   switch (assign_type) {
-    case VARIABLE:
-      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
-                             expr->op(), expr->AssignmentSlot());
+    case VARIABLE: {
+      VariableProxy* proxy = expr->target()->AsVariableProxy();
+      EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
+                             proxy->hole_check_mode());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       context()->Plug(x0);
       break;
+    }
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
       break;
@@ -1919,9 +1915,10 @@
 
   switch (assign_type) {
     case VARIABLE: {
-      Variable* var = expr->AsVariableProxy()->var();
+      VariableProxy* proxy = expr->AsVariableProxy();
       EffectContext context(this);
-      EmitVariableAssignment(var, Token::ASSIGN, slot);
+      EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+                             proxy->hole_check_mode());
       break;
     }
     case NAMED_PROPERTY: {
@@ -1998,9 +1995,9 @@
   }
 }
 
-
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
-                                               FeedbackVectorSlot slot) {
+                                               FeedbackVectorSlot slot,
+                                               HoleCheckMode hole_check_mode) {
   ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
   if (var->IsUnallocated()) {
     // Global var, const, or let.
@@ -2123,16 +2120,6 @@
   context()->Plug(x0);
 }
 
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
-                               TypeFeedbackId ast_id) {
-  ic_total_count_++;
-  // All calls must have a predictable size in full-codegen code to ensure that
-  // the debugger can patch them correctly.
-  __ Call(code, RelocInfo::CODE_TARGET, ast_id);
-}
-
-
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
   ASM_LOCATION("FullCodeGenerator::EmitCallWithLoadIC");
@@ -2295,14 +2282,12 @@
     // not return to this function.
     EmitProfilingCounterHandlingForReturnSequence(true);
   }
-  Handle<Code> ic =
-      CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
-          .code();
+  Handle<Code> code =
+      CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
   __ Mov(x3, SmiFromSlot(expr->CallFeedbackICSlot()));
   __ Peek(x1, (arg_count + 1) * kXRegSize);
-  // Don't assign a type feedback id to the IC, since type feedback is provided
-  // by the vector above.
-  CallIC(ic);
+  __ Mov(x0, arg_count);
+  CallIC(code);
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
@@ -2409,11 +2394,13 @@
   SetCallPosition(expr);
 
   // Call the evaluated function.
+  Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
+                                          expr->tail_call_mode())
+                          .code();
+  __ Mov(x3, SmiFromSlot(expr->CallFeedbackICSlot()));
   __ Peek(x1, (arg_count + 1) * kXRegSize);
   __ Mov(x0, arg_count);
-  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
-                                      expr->tail_call_mode()),
-          RelocInfo::CODE_TARGET);
+  __ Call(code, RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
   RestoreContext();
@@ -2453,7 +2440,7 @@
   __ Mov(x3, SmiFromSlot(expr->CallNewFeedbackSlot()));
 
   CallConstructStub stub(isolate());
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+  CallIC(stub.GetCode());
   OperandStackDepthDecrement(arg_count + 1);
   PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
   RestoreContext();
@@ -2751,41 +2738,6 @@
   context()->DropAndPlug(1, x0);
 }
 
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset));
-  __ Tst(x10, String::kContainsCachedArrayIndexMask);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-  VisitForAccumulatorValue(args->at(0));
-
-  __ AssertString(x0);
-
-  __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset));
-  __ IndexFromHash(x10, x0);
-
-  context()->Plug(x0);
-}
-
-
 void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(1, args->length());
@@ -2976,8 +2928,7 @@
         VisitForTypeofValue(expr->expression());
       }
       __ Mov(x3, x0);
-      TypeofStub typeof_stub(isolate());
-      __ CallStub(&typeof_stub);
+      __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
       context()->Plug(x0);
       break;
     }
@@ -3153,11 +3104,12 @@
 
   // Store the value returned in x0.
   switch (assign_type) {
-    case VARIABLE:
+    case VARIABLE: {
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
       if (expr->is_postfix()) {
         { EffectContext context(this);
-          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                                 Token::ASSIGN, expr->CountSlot());
+          EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+                                 proxy->hole_check_mode());
           PrepareForBailoutForId(expr->AssignmentId(),
                                  BailoutState::TOS_REGISTER);
           context.Plug(x0);
@@ -3168,13 +3120,14 @@
           context()->PlugTOS();
         }
       } else {
-        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                               Token::ASSIGN, expr->CountSlot());
+        EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+                               proxy->hole_check_mode());
         PrepareForBailoutForId(expr->AssignmentId(),
                                BailoutState::TOS_REGISTER);
         context()->Plug(x0);
       }
       break;
+    }
     case NAMED_PROPERTY: {
       PopOperand(StoreDescriptor::ReceiverRegister());
       CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
@@ -3352,8 +3305,7 @@
       VisitForAccumulatorValue(expr->right());
       SetExpressionPosition(expr);
       PopOperand(x1);
-      InstanceOfStub stub(isolate());
-      __ CallStub(&stub);
+      __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(x0, Heap::kTrueValueRootIndex);
       Split(eq, if_true, if_false, fall_through);
diff --git a/src/full-codegen/full-codegen.cc b/src/full-codegen/full-codegen.cc
index 25d7f92..ee5e888 100644
--- a/src/full-codegen/full-codegen.cc
+++ b/src/full-codegen/full-codegen.cc
@@ -223,20 +223,25 @@
   PrepareForBailoutForId(node->id(), state);
 }
 
-void FullCodeGenerator::CallLoadIC(FeedbackVectorSlot slot, Handle<Object> name,
-                                   TypeFeedbackId id) {
+void FullCodeGenerator::CallIC(Handle<Code> code, TypeFeedbackId ast_id) {
+  ic_total_count_++;
+  __ Call(code, RelocInfo::CODE_TARGET, ast_id);
+}
+
+void FullCodeGenerator::CallLoadIC(FeedbackVectorSlot slot,
+                                   Handle<Object> name) {
   DCHECK(name->IsName());
   __ Move(LoadDescriptor::NameRegister(), name);
 
   EmitLoadSlot(LoadDescriptor::SlotRegister(), slot);
 
-  Handle<Code> ic = CodeFactory::LoadIC(isolate()).code();
-  CallIC(ic, id);
+  Handle<Code> code = CodeFactory::LoadIC(isolate()).code();
+  __ Call(code, RelocInfo::CODE_TARGET);
   if (FLAG_tf_load_ic_stub) RestoreContext();
 }
 
 void FullCodeGenerator::CallStoreIC(FeedbackVectorSlot slot,
-                                    Handle<Object> name, TypeFeedbackId id) {
+                                    Handle<Object> name) {
   DCHECK(name->IsName());
   __ Move(StoreDescriptor::NameRegister(), name);
 
@@ -249,8 +254,8 @@
     EmitLoadSlot(StoreDescriptor::SlotRegister(), slot);
   }
 
-  Handle<Code> ic = CodeFactory::StoreIC(isolate(), language_mode()).code();
-  CallIC(ic, id);
+  Handle<Code> code = CodeFactory::StoreIC(isolate(), language_mode()).code();
+  __ Call(code, RelocInfo::CODE_TARGET);
   RestoreContext();
 }
 
@@ -264,9 +269,9 @@
     EmitLoadSlot(StoreDescriptor::SlotRegister(), slot);
   }
 
-  Handle<Code> ic =
+  Handle<Code> code =
       CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-  CallIC(ic);
+  __ Call(code, RelocInfo::CODE_TARGET);
   RestoreContext();
 }
 
@@ -466,9 +471,7 @@
          context->fall_through());
 }
 
-
-void FullCodeGenerator::VisitDeclarations(
-    ZoneList<Declaration*>* declarations) {
+void FullCodeGenerator::VisitDeclarations(Declaration::List* declarations) {
   ZoneList<Handle<Object> >* saved_globals = globals_;
   ZoneList<Handle<Object> > inner_globals(10, zone());
   globals_ = &inner_globals;
@@ -503,8 +506,8 @@
 #endif
   EmitLoadSlot(LoadGlobalDescriptor::SlotRegister(),
                proxy->VariableFeedbackSlot());
-  Handle<Code> ic = CodeFactory::LoadGlobalIC(isolate(), typeof_mode).code();
-  CallIC(ic);
+  Handle<Code> code = CodeFactory::LoadGlobalIC(isolate(), typeof_mode).code();
+  __ Call(code, RelocInfo::CODE_TARGET);
 }
 
 void FullCodeGenerator::VisitSloppyBlockFunctionStatement(
@@ -652,10 +655,6 @@
 }
 
 
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
-  EmitIntrinsicAsStubCall(expr, CodeFactory::RegExpConstructResult(isolate()));
-}
-
 void FullCodeGenerator::EmitHasProperty() {
   Callable callable = CodeFactory::HasProperty(isolate());
   PopOperand(callable.descriptor().GetRegisterParameter(1));
@@ -666,12 +665,14 @@
 
 void FullCodeGenerator::RecordStatementPosition(int pos) {
   DCHECK_NE(kNoSourcePosition, pos);
-  source_position_table_builder_.AddPosition(masm_->pc_offset(), pos, true);
+  source_position_table_builder_.AddPosition(masm_->pc_offset(),
+                                             SourcePosition(pos), true);
 }
 
 void FullCodeGenerator::RecordPosition(int pos) {
   DCHECK_NE(kNoSourcePosition, pos);
-  source_position_table_builder_.AddPosition(masm_->pc_offset(), pos, false);
+  source_position_table_builder_.AddPosition(masm_->pc_offset(),
+                                             SourcePosition(pos), false);
 }
 
 
@@ -683,8 +684,7 @@
 void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
   // For default constructors, start position equals end position, and there
   // is no source code besides the class literal.
-  int pos = std::max(fun->start_position(), fun->end_position() - 1);
-  RecordStatementPosition(pos);
+  RecordStatementPosition(fun->return_position());
   if (info_->is_debug()) {
     // Always emit a debug break slot before a return.
     DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_RETURN);
@@ -746,7 +746,7 @@
 
 
 void FullCodeGenerator::EmitDebugBreakInOptimizedCode(CallRuntime* expr) {
-  context()->Plug(handle(Smi::FromInt(0), isolate()));
+  context()->Plug(handle(Smi::kZero, isolate()));
 }
 
 
@@ -1126,8 +1126,8 @@
 
   EmitLoadSlot(LoadDescriptor::SlotRegister(), prop->PropertyFeedbackSlot());
 
-  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
-  CallIC(ic);
+  Handle<Code> code = CodeFactory::KeyedLoadIC(isolate()).code();
+  __ Call(code, RelocInfo::CODE_TARGET);
   RestoreContext();
 }
 
@@ -1582,7 +1582,7 @@
 
   if (lit->class_variable_proxy() != nullptr) {
     EmitVariableAssignment(lit->class_variable_proxy()->var(), Token::INIT,
-                           lit->ProxySlot());
+                           lit->ProxySlot(), HoleCheckMode::kElided);
   }
 
   context()->Plug(result_register());
@@ -1668,47 +1668,48 @@
   Expression* callee = expr->expression();
   Call::CallType call_type = expr->GetCallType();
 
-  switch (call_type) {
-    case Call::POSSIBLY_EVAL_CALL:
-      EmitPossiblyEvalCall(expr);
-      break;
-    case Call::GLOBAL_CALL:
-      EmitCallWithLoadIC(expr);
-      break;
-    case Call::LOOKUP_SLOT_CALL:
-      // Call to a lookup slot (dynamically introduced variable).
-      PushCalleeAndWithBaseObject(expr);
-      EmitCall(expr);
-      break;
-    case Call::NAMED_PROPERTY_CALL: {
-      Property* property = callee->AsProperty();
-      VisitForStackValue(property->obj());
-      EmitCallWithLoadIC(expr);
-      break;
+  if (expr->is_possibly_eval()) {
+    EmitPossiblyEvalCall(expr);
+  } else {
+    switch (call_type) {
+      case Call::GLOBAL_CALL:
+        EmitCallWithLoadIC(expr);
+        break;
+      case Call::WITH_CALL:
+        // Call to a lookup slot looked up through a with scope.
+        PushCalleeAndWithBaseObject(expr);
+        EmitCall(expr);
+        break;
+      case Call::NAMED_PROPERTY_CALL: {
+        Property* property = callee->AsProperty();
+        VisitForStackValue(property->obj());
+        EmitCallWithLoadIC(expr);
+        break;
+      }
+      case Call::KEYED_PROPERTY_CALL: {
+        Property* property = callee->AsProperty();
+        VisitForStackValue(property->obj());
+        EmitKeyedCallWithLoadIC(expr, property->key());
+        break;
+      }
+      case Call::NAMED_SUPER_PROPERTY_CALL:
+        EmitSuperCallWithLoadIC(expr);
+        break;
+      case Call::KEYED_SUPER_PROPERTY_CALL:
+        EmitKeyedSuperCallWithLoadIC(expr);
+        break;
+      case Call::SUPER_CALL:
+        EmitSuperConstructorCall(expr);
+        break;
+      case Call::OTHER_CALL:
+        // Call to an arbitrary expression not handled specially above.
+        VisitForStackValue(callee);
+        OperandStackDepthIncrement(1);
+        __ PushRoot(Heap::kUndefinedValueRootIndex);
+        // Emit function call.
+        EmitCall(expr);
+        break;
     }
-    case Call::KEYED_PROPERTY_CALL: {
-      Property* property = callee->AsProperty();
-      VisitForStackValue(property->obj());
-      EmitKeyedCallWithLoadIC(expr, property->key());
-      break;
-    }
-    case Call::NAMED_SUPER_PROPERTY_CALL:
-      EmitSuperCallWithLoadIC(expr);
-      break;
-    case Call::KEYED_SUPER_PROPERTY_CALL:
-      EmitKeyedSuperCallWithLoadIC(expr);
-      break;
-    case Call::SUPER_CALL:
-      EmitSuperConstructorCall(expr);
-      break;
-    case Call::OTHER_CALL:
-      // Call to an arbitrary expression not handled specially above.
-      VisitForStackValue(callee);
-      OperandStackDepthIncrement(1);
-      __ PushRoot(Heap::kUndefinedValueRootIndex);
-      // Emit function call.
-      EmitCall(expr);
-      break;
   }
 
 #ifdef DEBUG
@@ -1982,65 +1983,6 @@
   codegen_->scope_ = saved_scope_;
 }
 
-
-bool FullCodeGenerator::NeedsHoleCheckForLoad(VariableProxy* proxy) {
-  Variable* var = proxy->var();
-
-  if (!var->binding_needs_init()) {
-    return false;
-  }
-
-  // var->scope() may be NULL when the proxy is located in eval code and
-  // refers to a potential outside binding. Currently those bindings are
-  // always looked up dynamically, i.e. in that case
-  //     var->location() == LOOKUP.
-  // always holds.
-  DCHECK(var->scope() != NULL);
-  DCHECK(var->location() == VariableLocation::PARAMETER ||
-         var->location() == VariableLocation::LOCAL ||
-         var->location() == VariableLocation::CONTEXT);
-
-  // Check if the binding really needs an initialization check. The check
-  // can be skipped in the following situation: we have a LET or CONST
-  // binding in harmony mode, both the Variable and the VariableProxy have
-  // the same declaration scope (i.e. they are both in global code, in the
-  // same function or in the same eval code), the VariableProxy is in
-  // the source physically located after the initializer of the variable,
-  // and that the initializer cannot be skipped due to a nonlinear scope.
-  //
-  // We cannot skip any initialization checks for CONST in non-harmony
-  // mode because const variables may be declared but never initialized:
-  //   if (false) { const x; }; var y = x;
-  //
-  // The condition on the declaration scopes is a conservative check for
-  // nested functions that access a binding and are called before the
-  // binding is initialized:
-  //   function() { f(); let x = 1; function f() { x = 2; } }
-  //
-  // The check cannot be skipped on non-linear scopes, namely switch
-  // scopes, to ensure tests are done in cases like the following:
-  //   switch (1) { case 0: let x = 2; case 1: f(x); }
-  // The scope of the variable needs to be checked, in case the use is
-  // in a sub-block which may be linear.
-  if (var->scope()->GetDeclarationScope() != scope()->GetDeclarationScope()) {
-    return true;
-  }
-
-  if (var->is_this()) {
-    DCHECK(literal() != nullptr &&
-           (literal()->kind() & kSubclassConstructor) != 0);
-    // TODO(littledan): implement 'this' hole check elimination.
-    return true;
-  }
-
-  // Check that we always have valid source position.
-  DCHECK(var->initializer_position() != kNoSourcePosition);
-  DCHECK(proxy->position() != kNoSourcePosition);
-
-  return var->scope()->is_nonlinear() ||
-         var->initializer_position() >= proxy->position();
-}
-
 Handle<Script> FullCodeGenerator::script() { return info_->script(); }
 
 LanguageMode FullCodeGenerator::language_mode() {
diff --git a/src/full-codegen/full-codegen.h b/src/full-codegen/full-codegen.h
index 2a4eb9d..558dae1 100644
--- a/src/full-codegen/full-codegen.h
+++ b/src/full-codegen/full-codegen.h
@@ -354,7 +354,7 @@
 
   void VisitInDuplicateContext(Expression* expr);
 
-  void VisitDeclarations(ZoneList<Declaration*>* declarations);
+  void VisitDeclarations(Declaration::List* declarations);
   void DeclareGlobals(Handle<FixedArray> pairs);
   int DeclareGlobalsFlags();
 
@@ -478,15 +478,12 @@
   F(Call)                               \
   F(NewObject)                          \
   F(IsJSReceiver)                       \
-  F(HasCachedArrayIndex)                \
-  F(GetCachedArrayIndex)                \
   F(GetSuperConstructor)                \
   F(DebugBreakInOptimizedCode)          \
   F(ClassOf)                            \
   F(StringCharCodeAt)                   \
   F(SubString)                          \
   F(RegExpExec)                         \
-  F(RegExpConstructResult)              \
   F(ToInteger)                          \
   F(NumberToString)                     \
   F(ToString)                           \
@@ -520,8 +517,6 @@
 
   void EmitAccessor(ObjectLiteralProperty* property);
 
-  bool NeedsHoleCheckForLoad(VariableProxy* proxy);
-
   // Expects the arguments and the function already pushed.
   void EmitResolvePossiblyDirectEval(Call* expr);
 
@@ -576,7 +571,8 @@
   // Complete a variable assignment.  The right-hand-side value is expected
   // in the accumulator.
   void EmitVariableAssignment(Variable* var, Token::Value op,
-                              FeedbackVectorSlot slot);
+                              FeedbackVectorSlot slot,
+                              HoleCheckMode hole_check_mode);
 
   // Helper functions to EmitVariableAssignment
   void EmitStoreToStackLocalOrContextSlot(Variable* var,
@@ -620,10 +616,8 @@
   void CallIC(Handle<Code> code,
               TypeFeedbackId id = TypeFeedbackId::None());
 
-  void CallLoadIC(FeedbackVectorSlot slot, Handle<Object> name,
-                  TypeFeedbackId id = TypeFeedbackId::None());
-  void CallStoreIC(FeedbackVectorSlot slot, Handle<Object> name,
-                   TypeFeedbackId id = TypeFeedbackId::None());
+  void CallLoadIC(FeedbackVectorSlot slot, Handle<Object> name);
+  void CallStoreIC(FeedbackVectorSlot slot, Handle<Object> name);
   void CallKeyedStoreIC(FeedbackVectorSlot slot);
 
   void SetFunctionPosition(FunctionLiteral* fun);
diff --git a/src/full-codegen/ia32/full-codegen-ia32.cc b/src/full-codegen/ia32/full-codegen-ia32.cc
index e5f66cd..5e80dd3 100644
--- a/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -348,7 +348,7 @@
 
 
 void FullCodeGenerator::ClearAccumulator() {
-  __ Move(eax, Immediate(Smi::FromInt(0)));
+  __ Move(eax, Immediate(Smi::kZero));
 }
 
 
@@ -960,8 +960,7 @@
   __ cmp(eax, isolate()->factory()->null_value());
   __ j(equal, &exit);
   __ bind(&convert);
-  ToObjectStub stub(isolate());
-  __ CallStub(&stub);
+  __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
   RestoreContext();
   __ bind(&done_convert);
   PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
@@ -992,7 +991,7 @@
   __ bind(&use_cache);
 
   __ EnumLength(edx, eax);
-  __ cmp(edx, Immediate(Smi::FromInt(0)));
+  __ cmp(edx, Immediate(Smi::kZero));
   __ j(equal, &no_descriptors);
 
   __ LoadInstanceDescriptors(eax, ecx);
@@ -1003,7 +1002,7 @@
   __ push(eax);  // Map.
   __ push(ecx);  // Enumeration cache.
   __ push(edx);  // Number of valid entries for the map in the enum cache.
-  __ push(Immediate(Smi::FromInt(0)));  // Initial index.
+  __ push(Immediate(Smi::kZero));  // Initial index.
   __ jmp(&loop);
 
   __ bind(&no_descriptors);
@@ -1018,7 +1017,7 @@
   __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
   __ push(eax);  // Fixed array length (as smi).
   PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
-  __ push(Immediate(Smi::FromInt(0)));  // Initial index.
+  __ push(Immediate(Smi::kZero));  // Initial index.
 
   // Generate code for doing the condition check.
   __ bind(&loop);
@@ -1050,10 +1049,9 @@
          Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
 
   // eax contains the key.  The receiver in ebx is the second argument to the
-  // ForInFilterStub.  ForInFilter returns undefined if the receiver doesn't
+  // ForInFilter.  ForInFilter returns undefined if the receiver doesn't
   // have the key or returns the name-converted key.
-  ForInFilterStub filter_stub(isolate());
-  __ CallStub(&filter_stub);
+  __ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
   RestoreContext();
   PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
   __ JumpIfRoot(result_register(), Heap::kUndefinedValueRootIndex,
@@ -1217,7 +1215,7 @@
       Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
                                                : "[ Stack variable");
 
-      if (NeedsHoleCheckForLoad(proxy)) {
+      if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
         // Throw a reference error when using an uninitialized let/const
         // binding in harmony mode.
         Label done;
@@ -1650,12 +1648,14 @@
 
   // Store the value.
   switch (assign_type) {
-    case VARIABLE:
-      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
-                             expr->op(), expr->AssignmentSlot());
+    case VARIABLE: {
+      VariableProxy* proxy = expr->target()->AsVariableProxy();
+      EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
+                             proxy->hole_check_mode());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       context()->Plug(eax);
       break;
+    }
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
       break;
@@ -1934,9 +1934,10 @@
 
   switch (assign_type) {
     case VARIABLE: {
-      Variable* var = expr->AsVariableProxy()->var();
+      VariableProxy* proxy = expr->AsVariableProxy();
       EffectContext context(this);
-      EmitVariableAssignment(var, Token::ASSIGN, slot);
+      EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+                             proxy->hole_check_mode());
       break;
     }
     case NAMED_PROPERTY: {
@@ -2009,9 +2010,9 @@
   }
 }
 
-
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
-                                               FeedbackVectorSlot slot) {
+                                               FeedbackVectorSlot slot,
+                                               HoleCheckMode hole_check_mode) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
     __ mov(StoreDescriptor::ReceiverRegister(), NativeContextOperand());
@@ -2025,7 +2026,7 @@
     DCHECK(var->IsStackAllocated() || var->IsContextSlot());
     MemOperand location = VarOperand(var, ecx);
     // Perform an initialization check for lexically declared variables.
-    if (var->binding_needs_init()) {
+    if (hole_check_mode == HoleCheckMode::kRequired) {
       Label assign;
       __ mov(edx, location);
       __ cmp(edx, isolate()->factory()->the_hole_value());
@@ -2135,14 +2136,6 @@
   context()->Plug(eax);
 }
 
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
-                               TypeFeedbackId ast_id) {
-  ic_total_count_++;
-  __ call(code, RelocInfo::CODE_TARGET, ast_id);
-}
-
-
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
   Expression* callee = expr->expression();
@@ -2288,14 +2281,12 @@
     // not return to this function.
     EmitProfilingCounterHandlingForReturnSequence(true);
   }
-  Handle<Code> ic =
-      CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
-          .code();
+  Handle<Code> code =
+      CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
   __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
   __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
-  // Don't assign a type feedback id to the IC, since type feedback is provided
-  // by the vector above.
-  CallIC(ic);
+  __ Move(eax, Immediate(arg_count));
+  CallIC(code);
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
@@ -2394,11 +2385,13 @@
   PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
 
   SetCallPosition(expr);
+  Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
+                                          expr->tail_call_mode())
+                          .code();
+  __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
   __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
-  __ Set(eax, arg_count);
-  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
-                                      expr->tail_call_mode()),
-          RelocInfo::CODE_TARGET);
+  __ Move(eax, Immediate(arg_count));
+  __ call(code, RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
   RestoreContext();
@@ -2438,7 +2431,7 @@
   __ mov(edx, Immediate(SmiFromSlot(expr->CallNewFeedbackSlot())));
 
   CallConstructStub stub(isolate());
-  __ call(stub.GetCode(), RelocInfo::CODE_TARGET);
+  CallIC(stub.GetCode());
   OperandStackDepthDecrement(arg_count + 1);
   PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
   RestoreContext();
@@ -2729,45 +2722,6 @@
   context()->DropAndPlug(1, eax);
 }
 
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  __ AssertString(eax);
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ test(FieldOperand(eax, String::kHashFieldOffset),
-          Immediate(String::kContainsCachedArrayIndexMask));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(zero, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-  VisitForAccumulatorValue(args->at(0));
-
-  __ AssertString(eax);
-
-  __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
-  __ IndexFromHash(eax, eax);
-
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(1, args->length());
@@ -2951,8 +2905,7 @@
         VisitForTypeofValue(expr->expression());
       }
       __ mov(ebx, eax);
-      TypeofStub typeof_stub(isolate());
-      __ CallStub(&typeof_stub);
+      __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
       context()->Plug(eax);
       break;
     }
@@ -2979,7 +2932,7 @@
   } else {
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
-      PushOperand(Smi::FromInt(0));
+      PushOperand(Smi::kZero);
     }
     switch (assign_type) {
       case NAMED_PROPERTY: {
@@ -3131,12 +3084,13 @@
 
   // Store the value returned in eax.
   switch (assign_type) {
-    case VARIABLE:
+    case VARIABLE: {
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
       if (expr->is_postfix()) {
         // Perform the assignment as if via '='.
         { EffectContext context(this);
-          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                                 Token::ASSIGN, expr->CountSlot());
+          EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+                                 proxy->hole_check_mode());
           PrepareForBailoutForId(expr->AssignmentId(),
                                  BailoutState::TOS_REGISTER);
           context.Plug(eax);
@@ -3148,13 +3102,14 @@
         }
       } else {
         // Perform the assignment as if via '='.
-        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                               Token::ASSIGN, expr->CountSlot());
+        EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+                               proxy->hole_check_mode());
         PrepareForBailoutForId(expr->AssignmentId(),
                                BailoutState::TOS_REGISTER);
         context()->Plug(eax);
       }
       break;
+    }
     case NAMED_PROPERTY: {
       PopOperand(StoreDescriptor::ReceiverRegister());
       CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
@@ -3322,8 +3277,7 @@
       VisitForAccumulatorValue(expr->right());
       SetExpressionPosition(expr);
       PopOperand(edx);
-      InstanceOfStub stub(isolate());
-      __ CallStub(&stub);
+      __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ cmp(eax, isolate()->factory()->true_value());
       Split(equal, if_true, if_false, fall_through);
diff --git a/src/full-codegen/mips/full-codegen-mips.cc b/src/full-codegen/mips/full-codegen-mips.cc
index 7f97686..10cdb54 100644
--- a/src/full-codegen/mips/full-codegen-mips.cc
+++ b/src/full-codegen/mips/full-codegen-mips.cc
@@ -368,7 +368,7 @@
 
 
 void FullCodeGenerator::ClearAccumulator() {
-  DCHECK(Smi::FromInt(0) == 0);
+  DCHECK(Smi::kZero == 0);
   __ mov(v0, zero_reg);
 }
 
@@ -1018,8 +1018,7 @@
   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);  // In delay slot.
   __ Branch(&exit, eq, a0, Operand(at));
   __ bind(&convert);
-  ToObjectStub stub(isolate());
-  __ CallStub(&stub);
+  __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
   RestoreContext();
   __ mov(a0, v0);
   __ bind(&done_convert);
@@ -1058,14 +1057,14 @@
   __ bind(&use_cache);
 
   __ EnumLength(a1, v0);
-  __ Branch(&no_descriptors, eq, a1, Operand(Smi::FromInt(0)));
+  __ Branch(&no_descriptors, eq, a1, Operand(Smi::kZero));
 
   __ LoadInstanceDescriptors(v0, a2);
   __ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheOffset));
   __ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
   // Set up the four remaining stack slots.
-  __ li(a0, Operand(Smi::FromInt(0)));
+  __ li(a0, Operand(Smi::kZero));
   // Push map, enumeration cache, enumeration cache length (as smi) and zero.
   __ Push(v0, a2, a1, a0);
   __ jmp(&loop);
@@ -1082,7 +1081,7 @@
   __ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
   __ Push(a1);  // Fixed array length (as smi).
   PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
-  __ li(a0, Operand(Smi::FromInt(0)));
+  __ li(a0, Operand(Smi::kZero));
   __ Push(a0);  // Initial index.
 
   // Generate code for doing the condition check.
@@ -1119,10 +1118,9 @@
 
   __ mov(a0, result_register());
   // a0 contains the key. The receiver in a1 is the second argument to the
-  // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+  // ForInFilter. ForInFilter returns undefined if the receiver doesn't
   // have the key or returns the name-converted key.
-  ForInFilterStub filter_stub(isolate());
-  __ CallStub(&filter_stub);
+  __ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
   RestoreContext();
   PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -1295,7 +1293,7 @@
       DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
       Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
                                                : "[ Stack variable");
-      if (NeedsHoleCheckForLoad(proxy)) {
+      if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
         // Throw a reference error when using an uninitialized let/const
         // binding in harmony mode.
         Label done;
@@ -1739,12 +1737,14 @@
 
   // Store the value.
   switch (assign_type) {
-    case VARIABLE:
-      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
-                             expr->op(), expr->AssignmentSlot());
+    case VARIABLE: {
+      VariableProxy* proxy = expr->target()->AsVariableProxy();
+      EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
+                             proxy->hole_check_mode());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       context()->Plug(v0);
       break;
+    }
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
       break;
@@ -1938,7 +1938,7 @@
       __ Branch(&done, ne, v0, Operand(zero_reg));
       __ Addu(scratch2, right, left);
       __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
-      DCHECK(Smi::FromInt(0) == 0);
+      DCHECK(Smi::kZero == 0);
       __ mov(v0, zero_reg);
       break;
     }
@@ -2033,9 +2033,10 @@
 
   switch (assign_type) {
     case VARIABLE: {
-      Variable* var = expr->AsVariableProxy()->var();
+      VariableProxy* proxy = expr->AsVariableProxy();
       EffectContext context(this);
-      EmitVariableAssignment(var, Token::ASSIGN, slot);
+      EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+                             proxy->hole_check_mode());
       break;
     }
     case NAMED_PROPERTY: {
@@ -2110,9 +2111,9 @@
   }
 }
 
-
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
-                                               FeedbackVectorSlot slot) {
+                                               FeedbackVectorSlot slot,
+                                               HoleCheckMode hole_check_mode) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
     __ mov(StoreDescriptor::ValueRegister(), result_register());
@@ -2124,7 +2125,7 @@
     DCHECK(var->IsStackAllocated() || var->IsContextSlot());
     MemOperand location = VarOperand(var, a1);
     // Perform an initialization check for lexically declared variables.
-    if (var->binding_needs_init()) {
+    if (hole_check_mode == HoleCheckMode::kRequired) {
       Label assign;
       __ lw(a3, location);
       __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
@@ -2241,14 +2242,6 @@
   context()->Plug(v0);
 }
 
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
-                               TypeFeedbackId id) {
-  ic_total_count_++;
-  __ Call(code, RelocInfo::CODE_TARGET, id);
-}
-
-
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
   Expression* callee = expr->expression();
@@ -2400,14 +2393,12 @@
     // not return to this function.
     EmitProfilingCounterHandlingForReturnSequence(true);
   }
-  Handle<Code> ic =
-      CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
-          .code();
+  Handle<Code> code =
+      CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
   __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
   __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
-  // Don't assign a type feedback id to the IC, since type feedback is provided
-  // by the vector above.
-  CallIC(ic);
+  __ li(a0, Operand(arg_count));
+  CallIC(code);
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
@@ -2510,11 +2501,13 @@
   PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
   // Record source position for debugger.
   SetCallPosition(expr);
+  Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
+                                          expr->tail_call_mode())
+                          .code();
+  __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
   __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ li(a0, Operand(arg_count));
-  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
-                                      expr->tail_call_mode()),
-          RelocInfo::CODE_TARGET);
+  __ Call(code, RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
   RestoreContext();
@@ -2554,7 +2547,7 @@
   __ li(a3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
 
   CallConstructStub stub(isolate());
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+  CallIC(stub.GetCode());
   OperandStackDepthDecrement(arg_count + 1);
   PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
   RestoreContext();
@@ -2850,42 +2843,6 @@
   context()->DropAndPlug(1, v0);
 }
 
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ lw(a0, FieldMemOperand(v0, String::kHashFieldOffset));
-  __ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask));
-
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-  VisitForAccumulatorValue(args->at(0));
-
-  __ AssertString(v0);
-
-  __ lw(v0, FieldMemOperand(v0, String::kHashFieldOffset));
-  __ IndexFromHash(v0, v0);
-
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(1, args->length());
@@ -3063,8 +3020,7 @@
         VisitForTypeofValue(expr->expression());
       }
       __ mov(a3, v0);
-      TypeofStub typeof_stub(isolate());
-      __ CallStub(&typeof_stub);
+      __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
       context()->Plug(v0);
       break;
     }
@@ -3091,7 +3047,7 @@
   } else {
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
-      __ li(at, Operand(Smi::FromInt(0)));
+      __ li(at, Operand(Smi::kZero));
       PushOperand(at);
     }
     switch (assign_type) {
@@ -3240,11 +3196,12 @@
 
   // Store the value returned in v0.
   switch (assign_type) {
-    case VARIABLE:
+    case VARIABLE: {
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
       if (expr->is_postfix()) {
         { EffectContext context(this);
-          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                                 Token::ASSIGN, expr->CountSlot());
+          EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+                                 proxy->hole_check_mode());
           PrepareForBailoutForId(expr->AssignmentId(),
                                  BailoutState::TOS_REGISTER);
           context.Plug(v0);
@@ -3255,13 +3212,14 @@
           context()->PlugTOS();
         }
       } else {
-        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                               Token::ASSIGN, expr->CountSlot());
+        EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+                               proxy->hole_check_mode());
         PrepareForBailoutForId(expr->AssignmentId(),
                                BailoutState::TOS_REGISTER);
         context()->Plug(v0);
       }
       break;
+    }
     case NAMED_PROPERTY: {
       __ mov(StoreDescriptor::ValueRegister(), result_register());
       PopOperand(StoreDescriptor::ReceiverRegister());
@@ -3433,8 +3391,7 @@
       SetExpressionPosition(expr);
       __ mov(a0, result_register());
       PopOperand(a1);
-      InstanceOfStub stub(isolate());
-      __ CallStub(&stub);
+      __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ LoadRoot(at, Heap::kTrueValueRootIndex);
       Split(eq, v0, Operand(at), if_true, if_false, fall_through);
diff --git a/src/full-codegen/mips64/full-codegen-mips64.cc b/src/full-codegen/mips64/full-codegen-mips64.cc
index 660adb1..7640c52 100644
--- a/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -367,7 +367,7 @@
 
 
 void FullCodeGenerator::ClearAccumulator() {
-  DCHECK(Smi::FromInt(0) == 0);
+  DCHECK(Smi::kZero == 0);
   __ mov(v0, zero_reg);
 }
 
@@ -1018,8 +1018,7 @@
   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);  // In delay slot.
   __ Branch(&exit, eq, a0, Operand(at));
   __ bind(&convert);
-  ToObjectStub stub(isolate());
-  __ CallStub(&stub);
+  __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
   RestoreContext();
   __ mov(a0, v0);
   __ bind(&done_convert);
@@ -1058,14 +1057,14 @@
   __ bind(&use_cache);
 
   __ EnumLength(a1, v0);
-  __ Branch(&no_descriptors, eq, a1, Operand(Smi::FromInt(0)));
+  __ Branch(&no_descriptors, eq, a1, Operand(Smi::kZero));
 
   __ LoadInstanceDescriptors(v0, a2);
   __ ld(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheOffset));
   __ ld(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
   // Set up the four remaining stack slots.
-  __ li(a0, Operand(Smi::FromInt(0)));
+  __ li(a0, Operand(Smi::kZero));
   // Push map, enumeration cache, enumeration cache length (as smi) and zero.
   __ Push(v0, a2, a1, a0);
   __ jmp(&loop);
@@ -1082,7 +1081,7 @@
   __ ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
   __ Push(a1);  // Fixed array length (as smi).
   PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
-  __ li(a0, Operand(Smi::FromInt(0)));
+  __ li(a0, Operand(Smi::kZero));
   __ Push(a0);  // Initial index.
 
   // Generate code for doing the condition check.
@@ -1120,10 +1119,9 @@
 
   __ mov(a0, result_register());
   // a0 contains the key. The receiver in a1 is the second argument to the
-  // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+  // ForInFilter. ForInFilter returns undefined if the receiver doesn't
   // have the key or returns the name-converted key.
-  ForInFilterStub filter_stub(isolate());
-  __ CallStub(&filter_stub);
+  __ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
   RestoreContext();
   PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -1296,7 +1294,7 @@
       DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
       Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
                                                : "[ Stack variable");
-      if (NeedsHoleCheckForLoad(proxy)) {
+      if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
         // Throw a reference error when using an uninitialized let/const
         // binding in harmony mode.
         Label done;
@@ -1740,12 +1738,14 @@
 
   // Store the value.
   switch (assign_type) {
-    case VARIABLE:
-      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
-                             expr->op(), expr->AssignmentSlot());
+    case VARIABLE: {
+      VariableProxy* proxy = expr->target()->AsVariableProxy();
+      EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
+                             proxy->hole_check_mode());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       context()->Plug(v0);
       break;
+    }
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
       break;
@@ -1938,7 +1938,7 @@
       __ Branch(USE_DELAY_SLOT, &done, ne, v0, Operand(zero_reg));
       __ Daddu(scratch2, right, left);
       __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
-      DCHECK(Smi::FromInt(0) == 0);
+      DCHECK(Smi::kZero == 0);
       __ mov(v0, zero_reg);
       break;
     }
@@ -2033,9 +2033,10 @@
 
   switch (assign_type) {
     case VARIABLE: {
-      Variable* var = expr->AsVariableProxy()->var();
+      VariableProxy* proxy = expr->AsVariableProxy();
       EffectContext context(this);
-      EmitVariableAssignment(var, Token::ASSIGN, slot);
+      EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+                             proxy->hole_check_mode());
       break;
     }
     case NAMED_PROPERTY: {
@@ -2110,9 +2111,9 @@
   }
 }
 
-
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
-                                               FeedbackVectorSlot slot) {
+                                               FeedbackVectorSlot slot,
+                                               HoleCheckMode hole_check_mode) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
     __ mov(StoreDescriptor::ValueRegister(), result_register());
@@ -2124,7 +2125,7 @@
     DCHECK(var->IsStackAllocated() || var->IsContextSlot());
     MemOperand location = VarOperand(var, a1);
     // Perform an initialization check for lexically declared variables.
-    if (var->binding_needs_init()) {
+    if (hole_check_mode == HoleCheckMode::kRequired) {
       Label assign;
       __ ld(a3, location);
       __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
@@ -2240,14 +2241,6 @@
   context()->Plug(v0);
 }
 
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
-                               TypeFeedbackId id) {
-  ic_total_count_++;
-  __ Call(code, RelocInfo::CODE_TARGET, id);
-}
-
-
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
   Expression* callee = expr->expression();
@@ -2399,14 +2392,12 @@
     // not return to this function.
     EmitProfilingCounterHandlingForReturnSequence(true);
   }
-  Handle<Code> ic =
-      CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
-          .code();
+  Handle<Code> code =
+      CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
   __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
   __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
-  // Don't assign a type feedback id to the IC, since type feedback is provided
-  // by the vector above.
-  CallIC(ic);
+  __ li(a0, Operand(arg_count));
+  CallIC(code);
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
@@ -2509,11 +2500,13 @@
   PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
   // Record source position for debugger.
   SetCallPosition(expr);
+  Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
+                                          expr->tail_call_mode())
+                          .code();
+  __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
   __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ li(a0, Operand(arg_count));
-  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
-                                      expr->tail_call_mode()),
-          RelocInfo::CODE_TARGET);
+  __ Call(code, RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
   RestoreContext();
@@ -2553,7 +2546,7 @@
   __ li(a3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
 
   CallConstructStub stub(isolate());
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+  CallIC(stub.GetCode());
   OperandStackDepthDecrement(arg_count + 1);
   PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
   RestoreContext();
@@ -2849,42 +2842,6 @@
   context()->DropAndPlug(1, v0);
 }
 
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ lwu(a0, FieldMemOperand(v0, String::kHashFieldOffset));
-  __ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask));
-
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-  VisitForAccumulatorValue(args->at(0));
-
-  __ AssertString(v0);
-
-  __ lwu(v0, FieldMemOperand(v0, String::kHashFieldOffset));
-  __ IndexFromHash(v0, v0);
-
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(1, args->length());
@@ -3063,8 +3020,7 @@
         VisitForTypeofValue(expr->expression());
       }
       __ mov(a3, v0);
-      TypeofStub typeof_stub(isolate());
-      __ CallStub(&typeof_stub);
+      __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
       context()->Plug(v0);
       break;
     }
@@ -3091,7 +3047,7 @@
   } else {
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
-      __ li(at, Operand(Smi::FromInt(0)));
+      __ li(at, Operand(Smi::kZero));
       PushOperand(at);
     }
     switch (assign_type) {
@@ -3240,11 +3196,12 @@
 
   // Store the value returned in v0.
   switch (assign_type) {
-    case VARIABLE:
+    case VARIABLE: {
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
       if (expr->is_postfix()) {
         { EffectContext context(this);
-          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                                 Token::ASSIGN, expr->CountSlot());
+          EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+                                 proxy->hole_check_mode());
           PrepareForBailoutForId(expr->AssignmentId(),
                                  BailoutState::TOS_REGISTER);
           context.Plug(v0);
@@ -3255,13 +3212,14 @@
           context()->PlugTOS();
         }
       } else {
-        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                               Token::ASSIGN, expr->CountSlot());
+        EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+                               proxy->hole_check_mode());
         PrepareForBailoutForId(expr->AssignmentId(),
                                BailoutState::TOS_REGISTER);
         context()->Plug(v0);
       }
       break;
+    }
     case NAMED_PROPERTY: {
       __ mov(StoreDescriptor::ValueRegister(), result_register());
       PopOperand(StoreDescriptor::ReceiverRegister());
@@ -3433,8 +3391,7 @@
       SetExpressionPosition(expr);
       __ mov(a0, result_register());
       PopOperand(a1);
-      InstanceOfStub stub(isolate());
-      __ CallStub(&stub);
+      __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ LoadRoot(a4, Heap::kTrueValueRootIndex);
       Split(eq, v0, Operand(a4), if_true, if_false, fall_through);
diff --git a/src/full-codegen/ppc/full-codegen-ppc.cc b/src/full-codegen/ppc/full-codegen-ppc.cc
index de9a8f4..85d198d 100644
--- a/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -368,7 +368,7 @@
 
 
 void FullCodeGenerator::ClearAccumulator() {
-  __ LoadSmiLiteral(r3, Smi::FromInt(0));
+  __ LoadSmiLiteral(r3, Smi::kZero);
 }
 
 
@@ -982,8 +982,7 @@
   __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
   __ beq(&exit);
   __ bind(&convert);
-  ToObjectStub stub(isolate());
-  __ CallStub(&stub);
+  __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
   RestoreContext();
   __ bind(&done_convert);
   PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
@@ -1022,7 +1021,7 @@
   __ bind(&use_cache);
 
   __ EnumLength(r4, r3);
-  __ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
+  __ CmpSmiLiteral(r4, Smi::kZero, r0);
   __ beq(&no_descriptors);
 
   __ LoadInstanceDescriptors(r3, r5);
@@ -1032,7 +1031,7 @@
 
   // Set up the four remaining stack slots.
   __ push(r3);  // Map.
-  __ LoadSmiLiteral(r3, Smi::FromInt(0));
+  __ LoadSmiLiteral(r3, Smi::kZero);
   // Push enumeration cache, enumeration cache length (as smi) and zero.
   __ Push(r5, r4, r3);
   __ b(&loop);
@@ -1049,7 +1048,7 @@
   __ LoadP(r4, FieldMemOperand(r3, FixedArray::kLengthOffset));
   __ Push(r4);  // Fixed array length (as smi).
   PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
-  __ LoadSmiLiteral(r3, Smi::FromInt(0));
+  __ LoadSmiLiteral(r3, Smi::kZero);
   __ Push(r3);  // Initial index.
 
   // Generate code for doing the condition check.
@@ -1265,7 +1264,7 @@
       DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
       Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
                                                : "[ Stack variable");
-      if (NeedsHoleCheckForLoad(proxy)) {
+      if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
         // Throw a reference error when using an uninitialized let/const
         // binding in harmony mode.
         Label done;
@@ -1704,12 +1703,14 @@
 
   // Store the value.
   switch (assign_type) {
-    case VARIABLE:
-      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
-                             expr->op(), expr->AssignmentSlot());
+    case VARIABLE: {
+      VariableProxy* proxy = expr->target()->AsVariableProxy();
+      EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
+                             proxy->hole_check_mode());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       context()->Plug(r3);
       break;
+    }
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
       break;
@@ -1944,7 +1945,7 @@
       __ add(scratch2, right, left);
       __ cmpi(scratch2, Operand::Zero());
       __ blt(&stub_call);
-      __ LoadSmiLiteral(right, Smi::FromInt(0));
+      __ LoadSmiLiteral(right, Smi::kZero);
       break;
     }
     case Token::BIT_OR:
@@ -2037,9 +2038,10 @@
 
   switch (assign_type) {
     case VARIABLE: {
-      Variable* var = expr->AsVariableProxy()->var();
+      VariableProxy* proxy = expr->AsVariableProxy();
       EffectContext context(this);
-      EmitVariableAssignment(var, Token::ASSIGN, slot);
+      EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+                             proxy->hole_check_mode());
       break;
     }
     case NAMED_PROPERTY: {
@@ -2114,9 +2116,9 @@
   }
 }
 
-
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
-                                               FeedbackVectorSlot slot) {
+                                               FeedbackVectorSlot slot,
+                                               HoleCheckMode hole_check_mode) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
     __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
@@ -2127,7 +2129,7 @@
     DCHECK(var->IsStackAllocated() || var->IsContextSlot());
     MemOperand location = VarOperand(var, r4);
     // Perform an initialization check for lexically declared variables.
-    if (var->binding_needs_init()) {
+    if (hole_check_mode == HoleCheckMode::kRequired) {
       Label assign;
       __ LoadP(r6, location);
       __ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
@@ -2237,13 +2239,6 @@
   context()->Plug(r3);
 }
 
-
-void FullCodeGenerator::CallIC(Handle<Code> code, TypeFeedbackId ast_id) {
-  ic_total_count_++;
-  __ Call(code, RelocInfo::CODE_TARGET, ast_id);
-}
-
-
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
   Expression* callee = expr->expression();
@@ -2394,14 +2389,12 @@
     // not return to this function.
     EmitProfilingCounterHandlingForReturnSequence(true);
   }
-  Handle<Code> ic =
-      CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
-          .code();
+  Handle<Code> code =
+      CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
   __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackICSlot()));
   __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
-  // Don't assign a type feedback id to the IC, since type feedback is provided
-  // by the vector above.
-  CallIC(ic);
+  __ mov(r3, Operand(arg_count));
+  CallIC(code);
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
@@ -2506,11 +2499,13 @@
 
   // Record source position for debugger.
   SetCallPosition(expr);
+  Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
+                                          expr->tail_call_mode())
+                          .code();
+  __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackICSlot()));
   __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
   __ mov(r3, Operand(arg_count));
-  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
-                                      expr->tail_call_mode()),
-          RelocInfo::CODE_TARGET);
+  __ Call(code, RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
   RestoreContext();
@@ -2550,7 +2545,7 @@
   __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallNewFeedbackSlot()));
 
   CallConstructStub stub(isolate());
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+  CallIC(stub.GetCode());
   OperandStackDepthDecrement(arg_count + 1);
   PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
   RestoreContext();
@@ -2845,43 +2840,6 @@
   context()->DropAndPlug(1, r3);
 }
 
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ lwz(r3, FieldMemOperand(r3, String::kHashFieldOffset));
-  // PPC - assume ip is free
-  __ mov(ip, Operand(String::kContainsCachedArrayIndexMask));
-  __ and_(r0, r3, ip, SetRC);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through, cr0);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-  VisitForAccumulatorValue(args->at(0));
-
-  __ AssertString(r3);
-
-  __ lwz(r3, FieldMemOperand(r3, String::kHashFieldOffset));
-  __ IndexFromHash(r3, r3);
-
-  context()->Plug(r3);
-}
-
-
 void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(1, args->length());
@@ -3055,8 +3013,7 @@
         VisitForTypeofValue(expr->expression());
       }
       __ mr(r6, r3);
-      TypeofStub typeof_stub(isolate());
-      __ CallStub(&typeof_stub);
+      __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
       context()->Plug(r3);
       break;
     }
@@ -3083,7 +3040,7 @@
   } else {
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
-      __ LoadSmiLiteral(ip, Smi::FromInt(0));
+      __ LoadSmiLiteral(ip, Smi::kZero);
       PushOperand(ip);
     }
     switch (assign_type) {
@@ -3233,12 +3190,13 @@
 
   // Store the value returned in r3.
   switch (assign_type) {
-    case VARIABLE:
+    case VARIABLE: {
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
       if (expr->is_postfix()) {
         {
           EffectContext context(this);
-          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                                 Token::ASSIGN, expr->CountSlot());
+          EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+                                 proxy->hole_check_mode());
           PrepareForBailoutForId(expr->AssignmentId(),
                                  BailoutState::TOS_REGISTER);
           context.Plug(r3);
@@ -3249,13 +3207,14 @@
           context()->PlugTOS();
         }
       } else {
-        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                               Token::ASSIGN, expr->CountSlot());
+        EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+                               proxy->hole_check_mode());
         PrepareForBailoutForId(expr->AssignmentId(),
                                BailoutState::TOS_REGISTER);
         context()->Plug(r3);
       }
       break;
+    }
     case NAMED_PROPERTY: {
       PopOperand(StoreDescriptor::ReceiverRegister());
       CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
@@ -3426,8 +3385,7 @@
       VisitForAccumulatorValue(expr->right());
       SetExpressionPosition(expr);
       PopOperand(r4);
-      InstanceOfStub stub(isolate());
-      __ CallStub(&stub);
+      __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(r3, Heap::kTrueValueRootIndex);
       Split(eq, if_true, if_false, fall_through);
diff --git a/src/full-codegen/s390/full-codegen-s390.cc b/src/full-codegen/s390/full-codegen-s390.cc
index dfe6527..91fa86d 100644
--- a/src/full-codegen/s390/full-codegen-s390.cc
+++ b/src/full-codegen/s390/full-codegen-s390.cc
@@ -369,7 +369,7 @@
 }
 
 void FullCodeGenerator::ClearAccumulator() {
-  __ LoadSmiLiteral(r2, Smi::FromInt(0));
+  __ LoadSmiLiteral(r2, Smi::kZero);
 }
 
 void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
@@ -952,8 +952,7 @@
   __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
   __ beq(&exit);
   __ bind(&convert);
-  ToObjectStub stub(isolate());
-  __ CallStub(&stub);
+  __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
   RestoreContext();
   __ bind(&done_convert);
   PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
@@ -991,7 +990,7 @@
   __ bind(&use_cache);
 
   __ EnumLength(r3, r2);
-  __ CmpSmiLiteral(r3, Smi::FromInt(0), r0);
+  __ CmpSmiLiteral(r3, Smi::kZero, r0);
   __ beq(&no_descriptors, Label::kNear);
 
   __ LoadInstanceDescriptors(r2, r4);
@@ -1001,7 +1000,7 @@
 
   // Set up the four remaining stack slots.
   __ push(r2);  // Map.
-  __ LoadSmiLiteral(r2, Smi::FromInt(0));
+  __ LoadSmiLiteral(r2, Smi::kZero);
   // Push enumeration cache, enumeration cache length (as smi) and zero.
   __ Push(r4, r3, r2);
   __ b(&loop);
@@ -1018,7 +1017,7 @@
   __ LoadP(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
   __ Push(r3);  // Fixed array length (as smi).
   PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
-  __ LoadSmiLiteral(r2, Smi::FromInt(0));
+  __ LoadSmiLiteral(r2, Smi::kZero);
   __ Push(r2);  // Initial index.
 
   // Generate code for doing the condition check.
@@ -1229,7 +1228,7 @@
       DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
       Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
                                                : "[ Stack variable");
-      if (NeedsHoleCheckForLoad(proxy)) {
+      if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
         // Throw a reference error when using an uninitialized let/const
         // binding in harmony mode.
         Label done;
@@ -1664,12 +1663,14 @@
 
   // Store the value.
   switch (assign_type) {
-    case VARIABLE:
-      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
-                             expr->op(), expr->AssignmentSlot());
+    case VARIABLE: {
+      VariableProxy* proxy = expr->target()->AsVariableProxy();
+      EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
+                             proxy->hole_check_mode());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       context()->Plug(r2);
       break;
+    }
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
       break;
@@ -1904,7 +1905,7 @@
       __ AddP(scratch2, right, left);
       __ CmpP(scratch2, Operand::Zero());
       __ blt(&stub_call);
-      __ LoadSmiLiteral(right, Smi::FromInt(0));
+      __ LoadSmiLiteral(right, Smi::kZero);
       break;
     }
     case Token::BIT_OR:
@@ -1994,9 +1995,10 @@
 
   switch (assign_type) {
     case VARIABLE: {
-      Variable* var = expr->AsVariableProxy()->var();
+      VariableProxy* proxy = expr->AsVariableProxy();
       EffectContext context(this);
-      EmitVariableAssignment(var, Token::ASSIGN, slot);
+      EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+                             proxy->hole_check_mode());
       break;
     }
     case NAMED_PROPERTY: {
@@ -2071,7 +2073,8 @@
 }
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
-                                               FeedbackVectorSlot slot) {
+                                               FeedbackVectorSlot slot,
+                                               HoleCheckMode hole_check_mode) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
     __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
@@ -2083,7 +2086,7 @@
     DCHECK(var->IsStackAllocated() || var->IsContextSlot());
     MemOperand location = VarOperand(var, r3);
     // Perform an initialization check for lexically declared variables.
-    if (var->binding_needs_init()) {
+    if (hole_check_mode == HoleCheckMode::kRequired) {
       Label assign;
       __ LoadP(r5, location);
       __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
@@ -2188,11 +2191,6 @@
   context()->Plug(r2);
 }
 
-void FullCodeGenerator::CallIC(Handle<Code> code, TypeFeedbackId ast_id) {
-  ic_total_count_++;
-  __ Call(code, RelocInfo::CODE_TARGET, ast_id);
-}
-
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
   Expression* callee = expr->expression();
@@ -2339,14 +2337,12 @@
     // not return to this function.
     EmitProfilingCounterHandlingForReturnSequence(true);
   }
-  Handle<Code> ic =
-      CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
-          .code();
+  Handle<Code> code =
+      CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
   __ LoadSmiLiteral(r5, SmiFromSlot(expr->CallFeedbackICSlot()));
   __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
-  // Don't assign a type feedback id to the IC, since type feedback is provided
-  // by the vector above.
-  CallIC(ic);
+  __ mov(r2, Operand(arg_count));
+  CallIC(code);
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
@@ -2448,11 +2444,13 @@
 
   // Record source position for debugger.
   SetCallPosition(expr);
+  Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
+                                          expr->tail_call_mode())
+                          .code();
+  __ LoadSmiLiteral(r5, SmiFromSlot(expr->CallFeedbackICSlot()));
   __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
   __ mov(r2, Operand(arg_count));
-  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
-                                      expr->tail_call_mode()),
-          RelocInfo::CODE_TARGET);
+  __ Call(code, RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
   RestoreContext();
@@ -2491,7 +2489,7 @@
   __ LoadSmiLiteral(r5, SmiFromSlot(expr->CallNewFeedbackSlot()));
 
   CallConstructStub stub(isolate());
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+  CallIC(stub.GetCode());
   OperandStackDepthDecrement(arg_count + 1);
   PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
   RestoreContext();
@@ -2776,38 +2774,6 @@
   context()->DropAndPlug(1, r2);
 }
 
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ LoadlW(r2, FieldMemOperand(r2, String::kHashFieldOffset));
-  __ AndP(r0, r2, Operand(String::kContainsCachedArrayIndexMask));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-  VisitForAccumulatorValue(args->at(0));
-
-  __ AssertString(r2);
-
-  __ LoadlW(r2, FieldMemOperand(r2, String::kHashFieldOffset));
-  __ IndexFromHash(r2, r2);
-
-  context()->Plug(r2);
-}
-
 void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(1, args->length());
@@ -2977,8 +2943,7 @@
         VisitForTypeofValue(expr->expression());
       }
       __ LoadRR(r5, r2);
-      TypeofStub typeof_stub(isolate());
-      __ CallStub(&typeof_stub);
+      __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
       context()->Plug(r2);
       break;
     }
@@ -3004,7 +2969,7 @@
   } else {
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
-      __ LoadSmiLiteral(ip, Smi::FromInt(0));
+      __ LoadSmiLiteral(ip, Smi::kZero);
       PushOperand(ip);
     }
     switch (assign_type) {
@@ -3154,12 +3119,13 @@
 
   // Store the value returned in r2.
   switch (assign_type) {
-    case VARIABLE:
+    case VARIABLE: {
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
       if (expr->is_postfix()) {
         {
           EffectContext context(this);
-          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                                 Token::ASSIGN, expr->CountSlot());
+          EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+                                 proxy->hole_check_mode());
           PrepareForBailoutForId(expr->AssignmentId(),
                                  BailoutState::TOS_REGISTER);
           context.Plug(r2);
@@ -3170,13 +3136,14 @@
           context()->PlugTOS();
         }
       } else {
-        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                               Token::ASSIGN, expr->CountSlot());
+        EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+                               proxy->hole_check_mode());
         PrepareForBailoutForId(expr->AssignmentId(),
                                BailoutState::TOS_REGISTER);
         context()->Plug(r2);
       }
       break;
+    }
     case NAMED_PROPERTY: {
       PopOperand(StoreDescriptor::ReceiverRegister());
       CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
@@ -3342,8 +3309,7 @@
       VisitForAccumulatorValue(expr->right());
       SetExpressionPosition(expr);
       PopOperand(r3);
-      InstanceOfStub stub(isolate());
-      __ CallStub(&stub);
+      __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(r2, Heap::kTrueValueRootIndex);
       Split(eq, if_true, if_false, fall_through);
diff --git a/src/full-codegen/x64/full-codegen-x64.cc b/src/full-codegen/x64/full-codegen-x64.cc
index 525319f..0720c3d 100644
--- a/src/full-codegen/x64/full-codegen-x64.cc
+++ b/src/full-codegen/x64/full-codegen-x64.cc
@@ -976,8 +976,7 @@
   __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
   __ j(equal, &exit);
   __ bind(&convert);
-  ToObjectStub stub(isolate());
-  __ CallStub(&stub);
+  __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
   RestoreContext();
   __ bind(&done_convert);
   PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
@@ -1016,7 +1015,7 @@
   Label no_descriptors;
 
   __ EnumLength(rdx, rax);
-  __ Cmp(rdx, Smi::FromInt(0));
+  __ Cmp(rdx, Smi::kZero);
   __ j(equal, &no_descriptors);
 
   __ LoadInstanceDescriptors(rax, rcx);
@@ -1027,7 +1026,7 @@
   __ Push(rax);  // Map.
   __ Push(rcx);  // Enumeration cache.
   __ Push(rdx);  // Number of valid entries for the map in the enum cache.
-  __ Push(Smi::FromInt(0));  // Initial index.
+  __ Push(Smi::kZero);  // Initial index.
   __ jmp(&loop);
 
   __ bind(&no_descriptors);
@@ -1043,7 +1042,7 @@
   __ movp(rax, FieldOperand(rax, FixedArray::kLengthOffset));
   __ Push(rax);  // Fixed array length (as smi).
   PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
-  __ Push(Smi::FromInt(0));  // Initial index.
+  __ Push(Smi::kZero);  // Initial index.
 
   // Generate code for doing the condition check.
   __ bind(&loop);
@@ -1076,11 +1075,10 @@
   __ Move(FieldOperand(rdx, FixedArray::OffsetOfElementAt(vector_index)),
           TypeFeedbackVector::MegamorphicSentinel(isolate()));
 
-  // rax contains the key. The receiver in rbx is the second argument to the
-  // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+  // rax contains the key. The receiver in rbx is the second argument to
+  // ForInFilter. ForInFilter returns undefined if the receiver doesn't
   // have the key or returns the name-converted key.
-  ForInFilterStub has_stub(isolate());
-  __ CallStub(&has_stub);
+  __ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
   RestoreContext();
   PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
   __ JumpIfRoot(result_register(), Heap::kUndefinedValueRootIndex,
@@ -1246,7 +1244,7 @@
       DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
       Comment cmnt(masm_, var->IsContextSlot() ? "[ Context slot"
                                                : "[ Stack slot");
-      if (NeedsHoleCheckForLoad(proxy)) {
+      if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
         // Throw a reference error when using an uninitialized let/const
         // binding in harmony mode.
         DCHECK(IsLexicalVariableMode(var->mode()));
@@ -1677,12 +1675,14 @@
 
   // Store the value.
   switch (assign_type) {
-    case VARIABLE:
-      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
-                             expr->op(), expr->AssignmentSlot());
+    case VARIABLE: {
+      VariableProxy* proxy = expr->target()->AsVariableProxy();
+      EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
+                             proxy->hole_check_mode());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       context()->Plug(rax);
       break;
+    }
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
       break;
@@ -1926,9 +1926,10 @@
 
   switch (assign_type) {
     case VARIABLE: {
-      Variable* var = expr->AsVariableProxy()->var();
+      VariableProxy* proxy = expr->AsVariableProxy();
       EffectContext context(this);
-      EmitVariableAssignment(var, Token::ASSIGN, slot);
+      EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+                             proxy->hole_check_mode());
       break;
     }
     case NAMED_PROPERTY: {
@@ -2001,9 +2002,9 @@
   }
 }
 
-
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
-                                               FeedbackVectorSlot slot) {
+                                               FeedbackVectorSlot slot,
+                                               HoleCheckMode hole_check_mode) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
     __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
@@ -2014,7 +2015,7 @@
     DCHECK(var->IsStackAllocated() || var->IsContextSlot());
     MemOperand location = VarOperand(var, rcx);
     // Perform an initialization check for lexically declared variables.
-    if (var->binding_needs_init()) {
+    if (hole_check_mode == HoleCheckMode::kRequired) {
       Label assign;
       __ movp(rdx, location);
       __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
@@ -2122,14 +2123,6 @@
   context()->Plug(rax);
 }
 
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
-                               TypeFeedbackId ast_id) {
-  ic_total_count_++;
-  __ call(code, RelocInfo::CODE_TARGET, ast_id);
-}
-
-
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
   Expression* callee = expr->expression();
@@ -2277,14 +2270,12 @@
     // not return to this function.
     EmitProfilingCounterHandlingForReturnSequence(true);
   }
-  Handle<Code> ic =
-      CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
-          .code();
+  Handle<Code> code =
+      CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
   __ Move(rdx, SmiFromSlot(expr->CallFeedbackICSlot()));
   __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
-  // Don't assign a type feedback id to the IC, since type feedback is provided
-  // by the vector above.
-  CallIC(ic);
+  __ Set(rax, arg_count);
+  CallIC(code);
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
@@ -2384,11 +2375,13 @@
   PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
 
   SetCallPosition(expr);
+  Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
+                                          expr->tail_call_mode())
+                          .code();
+  __ Move(rdx, SmiFromSlot(expr->CallFeedbackICSlot()));
   __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
   __ Set(rax, arg_count);
-  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
-                                      expr->tail_call_mode()),
-          RelocInfo::CODE_TARGET);
+  __ call(code, RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
   RestoreContext();
@@ -2428,7 +2421,7 @@
   __ Move(rdx, SmiFromSlot(expr->CallNewFeedbackSlot()));
 
   CallConstructStub stub(isolate());
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+  CallIC(stub.GetCode());
   OperandStackDepthDecrement(arg_count + 1);
   PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
   RestoreContext();
@@ -2720,45 +2713,6 @@
   context()->DropAndPlug(1, rax);
 }
 
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ testl(FieldOperand(rax, String::kHashFieldOffset),
-           Immediate(String::kContainsCachedArrayIndexMask));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  __ j(zero, if_true);
-  __ jmp(if_false);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-  VisitForAccumulatorValue(args->at(0));
-
-  __ AssertString(rax);
-
-  __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
-  DCHECK(String::kHashShift >= kSmiTagSize);
-  __ IndexFromHash(rax, rax);
-
-  context()->Plug(rax);
-}
-
-
 void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(1, args->length());
@@ -2942,8 +2896,7 @@
         VisitForTypeofValue(expr->expression());
       }
       __ movp(rbx, rax);
-      TypeofStub typeof_stub(isolate());
-      __ CallStub(&typeof_stub);
+      __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
       context()->Plug(rax);
       break;
     }
@@ -2970,7 +2923,7 @@
   } else {
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
-      PushOperand(Smi::FromInt(0));
+      PushOperand(Smi::kZero);
     }
     switch (assign_type) {
       case NAMED_PROPERTY: {
@@ -3120,12 +3073,13 @@
 
   // Store the value returned in rax.
   switch (assign_type) {
-    case VARIABLE:
+    case VARIABLE: {
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
       if (expr->is_postfix()) {
         // Perform the assignment as if via '='.
         { EffectContext context(this);
-          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                                 Token::ASSIGN, expr->CountSlot());
+          EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+                                 proxy->hole_check_mode());
           PrepareForBailoutForId(expr->AssignmentId(),
                                  BailoutState::TOS_REGISTER);
           context.Plug(rax);
@@ -3137,13 +3091,14 @@
         }
       } else {
         // Perform the assignment as if via '='.
-        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                               Token::ASSIGN, expr->CountSlot());
+        EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+                               proxy->hole_check_mode());
         PrepareForBailoutForId(expr->AssignmentId(),
                                BailoutState::TOS_REGISTER);
         context()->Plug(rax);
       }
       break;
+    }
     case NAMED_PROPERTY: {
       PopOperand(StoreDescriptor::ReceiverRegister());
       CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
@@ -3311,8 +3266,7 @@
       VisitForAccumulatorValue(expr->right());
       SetExpressionPosition(expr);
       PopOperand(rdx);
-      InstanceOfStub stub(isolate());
-      __ CallStub(&stub);
+      __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(rax, Heap::kTrueValueRootIndex);
       Split(equal, if_true, if_false, fall_through);
diff --git a/src/full-codegen/x87/full-codegen-x87.cc b/src/full-codegen/x87/full-codegen-x87.cc
index 47be8b0..7cc7e2b 100644
--- a/src/full-codegen/x87/full-codegen-x87.cc
+++ b/src/full-codegen/x87/full-codegen-x87.cc
@@ -345,7 +345,7 @@
 
 
 void FullCodeGenerator::ClearAccumulator() {
-  __ Move(eax, Immediate(Smi::FromInt(0)));
+  __ Move(eax, Immediate(Smi::kZero));
 }
 
 
@@ -952,8 +952,7 @@
   __ cmp(eax, isolate()->factory()->null_value());
   __ j(equal, &exit);
   __ bind(&convert);
-  ToObjectStub stub(isolate());
-  __ CallStub(&stub);
+  __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
   RestoreContext();
   __ bind(&done_convert);
   PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
@@ -984,7 +983,7 @@
   __ bind(&use_cache);
 
   __ EnumLength(edx, eax);
-  __ cmp(edx, Immediate(Smi::FromInt(0)));
+  __ cmp(edx, Immediate(Smi::kZero));
   __ j(equal, &no_descriptors);
 
   __ LoadInstanceDescriptors(eax, ecx);
@@ -995,7 +994,7 @@
   __ push(eax);  // Map.
   __ push(ecx);  // Enumeration cache.
   __ push(edx);  // Number of valid entries for the map in the enum cache.
-  __ push(Immediate(Smi::FromInt(0)));  // Initial index.
+  __ push(Immediate(Smi::kZero));  // Initial index.
   __ jmp(&loop);
 
   __ bind(&no_descriptors);
@@ -1010,7 +1009,7 @@
   __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
   __ push(eax);  // Fixed array length (as smi).
   PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
-  __ push(Immediate(Smi::FromInt(0)));  // Initial index.
+  __ push(Immediate(Smi::kZero));  // Initial index.
 
   // Generate code for doing the condition check.
   __ bind(&loop);
@@ -1042,10 +1041,9 @@
          Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
 
   // eax contains the key.  The receiver in ebx is the second argument to the
-  // ForInFilterStub.  ForInFilter returns undefined if the receiver doesn't
+  // ForInFilter.  ForInFilter returns undefined if the receiver doesn't
   // have the key or returns the name-converted key.
-  ForInFilterStub filter_stub(isolate());
-  __ CallStub(&filter_stub);
+  __ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
   RestoreContext();
   PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
   __ JumpIfRoot(result_register(), Heap::kUndefinedValueRootIndex,
@@ -1209,7 +1207,7 @@
       Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
                                                : "[ Stack variable");
 
-      if (NeedsHoleCheckForLoad(proxy)) {
+      if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
         // Throw a reference error when using an uninitialized let/const
         // binding in harmony mode.
         Label done;
@@ -1642,12 +1640,14 @@
 
   // Store the value.
   switch (assign_type) {
-    case VARIABLE:
-      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
-                             expr->op(), expr->AssignmentSlot());
+    case VARIABLE: {
+      VariableProxy* proxy = expr->target()->AsVariableProxy();
+      EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
+                             proxy->hole_check_mode());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       context()->Plug(eax);
       break;
+    }
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
       break;
@@ -1926,9 +1926,10 @@
 
   switch (assign_type) {
     case VARIABLE: {
-      Variable* var = expr->AsVariableProxy()->var();
+      VariableProxy* proxy = expr->AsVariableProxy();
       EffectContext context(this);
-      EmitVariableAssignment(var, Token::ASSIGN, slot);
+      EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+                             proxy->hole_check_mode());
       break;
     }
     case NAMED_PROPERTY: {
@@ -2001,9 +2002,9 @@
   }
 }
 
-
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
-                                               FeedbackVectorSlot slot) {
+                                               FeedbackVectorSlot slot,
+                                               HoleCheckMode hole_check_mode) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
     __ mov(StoreDescriptor::ReceiverRegister(), NativeContextOperand());
@@ -2017,7 +2018,7 @@
     DCHECK(var->IsStackAllocated() || var->IsContextSlot());
     MemOperand location = VarOperand(var, ecx);
     // Perform an initialization check for lexically declared variables.
-    if (var->binding_needs_init()) {
+    if (hole_check_mode == HoleCheckMode::kRequired) {
       Label assign;
       __ mov(edx, location);
       __ cmp(edx, isolate()->factory()->the_hole_value());
@@ -2127,14 +2128,6 @@
   context()->Plug(eax);
 }
 
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
-                               TypeFeedbackId ast_id) {
-  ic_total_count_++;
-  __ call(code, RelocInfo::CODE_TARGET, ast_id);
-}
-
-
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
   Expression* callee = expr->expression();
@@ -2280,14 +2273,12 @@
     // not return to this function.
     EmitProfilingCounterHandlingForReturnSequence(true);
   }
-  Handle<Code> ic =
-      CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
-          .code();
+  Handle<Code> code =
+      CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
   __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
   __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
-  // Don't assign a type feedback id to the IC, since type feedback is provided
-  // by the vector above.
-  CallIC(ic);
+  __ Move(eax, Immediate(arg_count));
+  CallIC(code);
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
@@ -2386,11 +2377,13 @@
   PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
 
   SetCallPosition(expr);
+  Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
+                                          expr->tail_call_mode())
+                          .code();
+  __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
   __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
-  __ Set(eax, arg_count);
-  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
-                                      expr->tail_call_mode()),
-          RelocInfo::CODE_TARGET);
+  __ Move(eax, Immediate(arg_count));
+  __ call(code, RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
   RestoreContext();
@@ -2430,7 +2423,7 @@
   __ mov(edx, Immediate(SmiFromSlot(expr->CallNewFeedbackSlot())));
 
   CallConstructStub stub(isolate());
-  __ call(stub.GetCode(), RelocInfo::CODE_TARGET);
+  CallIC(stub.GetCode());
   OperandStackDepthDecrement(arg_count + 1);
   PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
   RestoreContext();
@@ -2721,45 +2714,6 @@
   context()->DropAndPlug(1, eax);
 }
 
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  __ AssertString(eax);
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ test(FieldOperand(eax, String::kHashFieldOffset),
-          Immediate(String::kContainsCachedArrayIndexMask));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(zero, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-  VisitForAccumulatorValue(args->at(0));
-
-  __ AssertString(eax);
-
-  __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
-  __ IndexFromHash(eax, eax);
-
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(1, args->length());
@@ -2943,8 +2897,7 @@
         VisitForTypeofValue(expr->expression());
       }
       __ mov(ebx, eax);
-      TypeofStub typeof_stub(isolate());
-      __ CallStub(&typeof_stub);
+      __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
       context()->Plug(eax);
       break;
     }
@@ -2971,7 +2924,7 @@
   } else {
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
-      PushOperand(Smi::FromInt(0));
+      PushOperand(Smi::kZero);
     }
     switch (assign_type) {
       case NAMED_PROPERTY: {
@@ -3123,12 +3076,13 @@
 
   // Store the value returned in eax.
   switch (assign_type) {
-    case VARIABLE:
+    case VARIABLE: {
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
       if (expr->is_postfix()) {
         // Perform the assignment as if via '='.
         { EffectContext context(this);
-          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                                 Token::ASSIGN, expr->CountSlot());
+          EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+                                 proxy->hole_check_mode());
           PrepareForBailoutForId(expr->AssignmentId(),
                                  BailoutState::TOS_REGISTER);
           context.Plug(eax);
@@ -3140,13 +3094,14 @@
         }
       } else {
         // Perform the assignment as if via '='.
-        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                               Token::ASSIGN, expr->CountSlot());
+        EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+                               proxy->hole_check_mode());
         PrepareForBailoutForId(expr->AssignmentId(),
                                BailoutState::TOS_REGISTER);
         context()->Plug(eax);
       }
       break;
+    }
     case NAMED_PROPERTY: {
       PopOperand(StoreDescriptor::ReceiverRegister());
       CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
@@ -3314,8 +3269,7 @@
       VisitForAccumulatorValue(expr->right());
       SetExpressionPosition(expr);
       PopOperand(edx);
-      InstanceOfStub stub(isolate());
-      __ CallStub(&stub);
+      __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ cmp(eax, isolate()->factory()->true_value());
       Split(equal, if_true, if_false, fall_through);
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index 4e73981..cc5451f 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -1942,7 +1942,7 @@
 
 static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) {
 #ifdef __MACH_O
-  Zone zone(isolate->allocator());
+  Zone zone(isolate->allocator(), ZONE_NAME);
   MachO mach_o(&zone);
   Writer w(&mach_o);
 
@@ -1954,7 +1954,7 @@
 
   mach_o.Write(&w, desc->CodeStart(), desc->CodeSize());
 #else
-  Zone zone(isolate->allocator());
+  Zone zone(isolate->allocator(), ZONE_NAME);
   ELF elf(&zone);
   Writer w(&elf);
 
diff --git a/src/global-handles.cc b/src/global-handles.cc
index ea46344..9ff16af 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -52,8 +52,6 @@
     STATIC_ASSERT(NEAR_DEATH == Internals::kNodeStateIsNearDeathValue);
     STATIC_ASSERT(static_cast<int>(IsIndependent::kShift) ==
                   Internals::kNodeIsIndependentShift);
-    STATIC_ASSERT(static_cast<int>(IsPartiallyDependent::kShift) ==
-                  Internals::kNodeIsPartiallyDependentShift);
     STATIC_ASSERT(static_cast<int>(IsActive::kShift) ==
                   Internals::kNodeIsActiveShift);
   }
@@ -66,11 +64,7 @@
     class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
     index_ = 0;
     set_independent(false);
-    if (FLAG_scavenge_reclaim_unmodified_objects) {
-      set_active(false);
-    } else {
-      set_partially_dependent(false);
-    }
+    set_active(false);
     set_in_new_space_list(false);
     parameter_or_next_free_.next_free = NULL;
     weak_callback_ = NULL;
@@ -92,11 +86,7 @@
     object_ = object;
     class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
     set_independent(false);
-    if (FLAG_scavenge_reclaim_unmodified_objects) {
-      set_active(false);
-    } else {
-      set_partially_dependent(false);
-    }
+    set_active(false);
     set_state(NORMAL);
     parameter_or_next_free_.parameter = NULL;
     weak_callback_ = NULL;
@@ -116,11 +106,7 @@
     object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
     class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
     set_independent(false);
-    if (FLAG_scavenge_reclaim_unmodified_objects) {
-      set_active(false);
-    } else {
-      set_partially_dependent(false);
-    }
+    set_active(false);
     weak_callback_ = NULL;
     DecreaseBlockUses();
   }
@@ -153,21 +139,10 @@
     flags_ = IsIndependent::update(flags_, v);
   }
 
-  bool is_partially_dependent() {
-    CHECK(!FLAG_scavenge_reclaim_unmodified_objects);
-    return IsPartiallyDependent::decode(flags_);
-  }
-  void set_partially_dependent(bool v) {
-    CHECK(!FLAG_scavenge_reclaim_unmodified_objects);
-    flags_ = IsPartiallyDependent::update(flags_, v);
-  }
-
   bool is_active() {
-    CHECK(FLAG_scavenge_reclaim_unmodified_objects);
     return IsActive::decode(flags_);
   }
   void set_active(bool v) {
-    CHECK(FLAG_scavenge_reclaim_unmodified_objects);
     flags_ = IsActive::update(flags_, v);
   }
 
@@ -227,14 +202,6 @@
     set_independent(true);
   }
 
-  void MarkPartiallyDependent() {
-    DCHECK(IsInUse());
-    if (GetGlobalHandles()->isolate()->heap()->InNewSpace(object_)) {
-      set_partially_dependent(true);
-    }
-  }
-  void clear_partially_dependent() { set_partially_dependent(false); }
-
   // Callback accessor.
   // TODO(svenpanne) Re-enable or nuke later.
   // WeakReferenceCallback callback() { return callback_; }
@@ -398,7 +365,6 @@
   class IsIndependent : public BitField<bool, 3, 1> {};
   // The following two fields are mutually exclusive
   class IsActive : public BitField<bool, 4, 1> {};
-  class IsPartiallyDependent : public BitField<bool, 4, 1> {};
   class IsInNewSpaceList : public BitField<bool, 5, 1> {};
   class NodeWeaknessType : public BitField<WeaknessType, 6, 2> {};
 
@@ -642,12 +608,6 @@
   Node::FromLocation(location)->MarkIndependent();
 }
 
-
-void GlobalHandles::MarkPartiallyDependent(Object** location) {
-  Node::FromLocation(location)->MarkPartiallyDependent();
-}
-
-
 bool GlobalHandles::IsIndependent(Object** location) {
   return Node::FromLocation(location)->is_independent();
 }
@@ -694,18 +654,10 @@
 void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v) {
   for (int i = 0; i < new_space_nodes_.length(); ++i) {
     Node* node = new_space_nodes_[i];
-    if (FLAG_scavenge_reclaim_unmodified_objects) {
-      if (node->IsStrongRetainer() ||
-          (node->IsWeakRetainer() && !node->is_independent() &&
-           node->is_active())) {
-        v->VisitPointer(node->location());
-      }
-    } else {
-      if (node->IsStrongRetainer() ||
-          (node->IsWeakRetainer() && !node->is_independent() &&
-           !node->is_partially_dependent())) {
-        v->VisitPointer(node->location());
-      }
+    if (node->IsStrongRetainer() ||
+        (node->IsWeakRetainer() && !node->is_independent() &&
+         node->is_active())) {
+      v->VisitPointer(node->location());
     }
   }
 }
@@ -716,8 +668,8 @@
   for (int i = 0; i < new_space_nodes_.length(); ++i) {
     Node* node = new_space_nodes_[i];
     DCHECK(node->is_in_new_space_list());
-    if ((node->is_independent() || node->is_partially_dependent()) &&
-        node->IsWeak() && f(isolate_->heap(), node->location())) {
+    if (node->is_independent() && node->IsWeak() &&
+        f(isolate_->heap(), node->location())) {
       node->MarkPending();
     }
   }
@@ -728,8 +680,7 @@
   for (int i = 0; i < new_space_nodes_.length(); ++i) {
     Node* node = new_space_nodes_[i];
     DCHECK(node->is_in_new_space_list());
-    if ((node->is_independent() || node->is_partially_dependent()) &&
-        node->IsWeakRetainer()) {
+    if (node->is_independent() && node->IsWeakRetainer()) {
       // Pending weak phantom handles die immediately. Everything else survives.
       if (node->IsPendingPhantomResetHandle()) {
         node->ResetPhantomHandle();
@@ -968,18 +919,11 @@
     // to be
     // called between two global garbage collection callbacks which
     // are not called for minor collections.
-    if (FLAG_scavenge_reclaim_unmodified_objects) {
       if (!node->is_independent() && (node->is_active())) {
         node->set_active(false);
         continue;
       }
       node->set_active(false);
-    } else {
-      if (!node->is_independent() && !node->is_partially_dependent()) {
-        continue;
-      }
-      node->clear_partially_dependent();
-    }
 
     if (node->PostGarbageCollectionProcessing(isolate_)) {
       if (initial_post_gc_processing_count != post_gc_processing_count_) {
@@ -1007,11 +951,7 @@
       // the freed_nodes.
       continue;
     }
-    if (FLAG_scavenge_reclaim_unmodified_objects) {
-      it.node()->set_active(false);
-    } else {
-      it.node()->clear_partially_dependent();
-    }
+    it.node()->set_active(false);
     if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
       if (initial_post_gc_processing_count != post_gc_processing_count_) {
         // See the comment above.
@@ -1122,7 +1062,7 @@
     // PostScavengeProcessing.
     return freed_nodes;
   }
-  if (collector == SCAVENGER) {
+  if (Heap::IsYoungGenerationCollector(collector)) {
     freed_nodes += PostScavengeProcessing(initial_post_gc_processing_count);
   } else {
     freed_nodes += PostMarkSweepProcessing(initial_post_gc_processing_count);
diff --git a/src/global-handles.h b/src/global-handles.h
index 24a2273..50e5ed6 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -167,9 +167,6 @@
   // Mark the reference to this object independent of any object group.
   static void MarkIndependent(Object** location);
 
-  // Mark the reference to this object externaly unreachable.
-  static void MarkPartiallyDependent(Object** location);
-
   static bool IsIndependent(Object** location);
 
   // Tells whether global handle is near death.
diff --git a/src/globals.h b/src/globals.h
index 03c5b1d..f689c66 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -141,19 +141,20 @@
 const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
 const int kMinUInt32 = 0;
 
-const int kCharSize      = sizeof(char);      // NOLINT
-const int kShortSize     = sizeof(short);     // NOLINT
-const int kIntSize       = sizeof(int);       // NOLINT
-const int kInt32Size     = sizeof(int32_t);   // NOLINT
-const int kInt64Size     = sizeof(int64_t);   // NOLINT
-const int kFloatSize     = sizeof(float);     // NOLINT
-const int kDoubleSize    = sizeof(double);    // NOLINT
-const int kIntptrSize    = sizeof(intptr_t);  // NOLINT
-const int kPointerSize   = sizeof(void*);     // NOLINT
+const int kCharSize = sizeof(char);
+const int kShortSize = sizeof(short);  // NOLINT
+const int kIntSize = sizeof(int);
+const int kInt32Size = sizeof(int32_t);
+const int kInt64Size = sizeof(int64_t);
+const int kSizetSize = sizeof(size_t);
+const int kFloatSize = sizeof(float);
+const int kDoubleSize = sizeof(double);
+const int kIntptrSize = sizeof(intptr_t);
+const int kPointerSize = sizeof(void*);
 #if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
-const int kRegisterSize  = kPointerSize + kPointerSize;
+const int kRegisterSize = kPointerSize + kPointerSize;
 #else
-const int kRegisterSize  = kPointerSize;
+const int kRegisterSize = kPointerSize;
 #endif
 const int kPCOnStackSize = kRegisterSize;
 const int kFPOnStackSize = kRegisterSize;
@@ -576,7 +577,7 @@
   USE_CUSTOM_MINIMUM_CAPACITY
 };
 
-enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
+enum GarbageCollector { SCAVENGER, MARK_COMPACTOR, MINOR_MARK_COMPACTOR };
 
 enum Executability { NOT_EXECUTABLE, EXECUTABLE };
 
@@ -602,6 +603,14 @@
   ONLY_SINGLE_FUNCTION_LITERAL  // Only a single FunctionLiteral expression.
 };
 
+// TODO(gsathya): Move this to JSPromise once we create it.
+// This should be in sync with the constants in promise.js
+enum PromiseStatus {
+  kPromisePending,
+  kPromiseFulfilled,
+  kPromiseRejected,
+};
+
 // A CodeDesc describes a buffer holding instructions and relocation
 // information. The instructions start at the beginning of the buffer
 // and grow forward, the relocation information starts at the end of
@@ -1048,6 +1057,8 @@
 // immediately initialized upon creation (kCreatedInitialized).
 enum InitializationFlag : uint8_t { kNeedsInitialization, kCreatedInitialized };
 
+enum class HoleCheckMode { kRequired, kElided };
+
 enum MaybeAssignedFlag : uint8_t { kNotAssigned, kMaybeAssigned };
 
 // Serialized in PreparseData, so numeric values should not be changed.
@@ -1208,16 +1219,22 @@
 // Type feedback is encoded in such a way that, we can combine the feedback
 // at different points by performing an 'OR' operation. Type feedback moves
 // to a more generic type when we combine feedback.
-// kSignedSmall -> kNumber  -> kAny
-//                 kString  -> kAny
+// kSignedSmall -> kNumber  -> kNumberOrOddball -> kAny
+//                             kString          -> kAny
+// TODO(mythria): Remove kNumber type when crankshaft can handle Oddballs
+// similar to Numbers. We don't need kNumber feedback for Turbofan. Extra
+// information about Number might reduce few instructions but causes more
+// deopts. We collect Number only because crankshaft does not handle all
+// cases of oddballs.
 class BinaryOperationFeedback {
  public:
   enum {
     kNone = 0x0,
     kSignedSmall = 0x1,
     kNumber = 0x3,
-    kString = 0x4,
-    kAny = 0xF
+    kNumberOrOddball = 0x7,
+    kString = 0x8,
+    kAny = 0x1F
   };
 };
 
@@ -1262,9 +1279,28 @@
   return os;
 }
 
+enum class IterationKind { kKeys, kValues, kEntries };
+
+inline std::ostream& operator<<(std::ostream& os, IterationKind kind) {
+  switch (kind) {
+    case IterationKind::kKeys:
+      return os << "IterationKind::kKeys";
+    case IterationKind::kValues:
+      return os << "IterationKind::kValues";
+    case IterationKind::kEntries:
+      return os << "IterationKind::kEntries";
+  }
+  UNREACHABLE();
+  return os;
+}
+
 }  // namespace internal
 }  // namespace v8
 
+// Used by js-builtin-reducer to identify whether ReduceArrayIterator() is
+// reducing a JSArray method, or a JSTypedArray method.
+enum class ArrayIteratorKind { kArray, kTypedArray };
+
 namespace i = v8::internal;
 
 #endif  // V8_GLOBALS_H_
diff --git a/src/handles.cc b/src/handles.cc
index 6331c79..3b1902e 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -119,7 +119,7 @@
 }
 
 CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate)
-    : isolate_(isolate), zone_(isolate->allocator()) {
+    : isolate_(isolate), zone_(isolate->allocator(), ZONE_NAME) {
   HandleScopeData* handle_scope_data = isolate_->handle_scope_data();
   prev_canonical_scope_ = handle_scope_data->canonical_scope;
   handle_scope_data->canonical_scope = this;
diff --git a/src/handles.h b/src/handles.h
index 3587d85..2c98209 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -340,13 +340,13 @@
 // This does not apply to nested inner HandleScopes unless a nested
 // CanonicalHandleScope is introduced. Handles are only canonicalized within
 // the same CanonicalHandleScope, but not across nested ones.
-class CanonicalHandleScope final {
+class V8_EXPORT_PRIVATE CanonicalHandleScope final {
  public:
   explicit CanonicalHandleScope(Isolate* isolate);
   ~CanonicalHandleScope();
 
  private:
-  V8_EXPORT_PRIVATE Object** Lookup(Object* object);
+  Object** Lookup(Object* object);
 
   Isolate* isolate_;
   Zone zone_;
diff --git a/src/heap-symbols.h b/src/heap-symbols.h
index c7b3370..cee9000 100644
--- a/src/heap-symbols.h
+++ b/src/heap-symbols.h
@@ -12,6 +12,7 @@
   V(Arguments_string, "Arguments")                                 \
   V(arguments_to_string, "[object Arguments]")                     \
   V(Array_string, "Array")                                         \
+  V(ArrayIterator_string, "Array Iterator")                        \
   V(assign_string, "assign")                                       \
   V(array_to_string, "[object Array]")                             \
   V(boolean_to_string, "[object Boolean]")                         \
@@ -53,11 +54,14 @@
   V(default_string, "default")                                     \
   V(defineProperty_string, "defineProperty")                       \
   V(deleteProperty_string, "deleteProperty")                       \
+  V(did_handle_string, "didHandle")                                \
   V(display_name_string, "displayName")                            \
   V(done_string, "done")                                           \
   V(dot_result_string, ".result")                                  \
   V(dot_string, ".")                                               \
+  V(exec_string, "exec")                                           \
   V(entries_string, "entries")                                     \
+  V(enqueue_string, "enqueue")                                     \
   V(enumerable_string, "enumerable")                               \
   V(era_string, "era")                                             \
   V(Error_string, "Error")                                         \
@@ -95,7 +99,8 @@
   V(isView_string, "isView")                                       \
   V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic")           \
   V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic")         \
-  V(last_index_string, "lastIndex")                                \
+  V(keys_string, "keys")                                           \
+  V(lastIndex_string, "lastIndex")                                 \
   V(length_string, "length")                                       \
   V(line_string, "line")                                           \
   V(literal_string, "literal")                                     \
@@ -122,6 +127,7 @@
   V(preventExtensions_string, "preventExtensions")                 \
   V(private_api_string, "private_api")                             \
   V(Promise_string, "Promise")                                     \
+  V(PromiseResolveThenableJob_string, "PromiseResolveThenableJob") \
   V(proto_string, "__proto__")                                     \
   V(prototype_string, "prototype")                                 \
   V(Proxy_string, "Proxy")                                         \
@@ -140,6 +146,7 @@
   V(source_url_string, "source_url")                               \
   V(stack_string, "stack")                                         \
   V(stackTraceLimit_string, "stackTraceLimit")                     \
+  V(sticky_string, "sticky")                                       \
   V(strict_compare_ic_string, "===")                               \
   V(string_string, "string")                                       \
   V(String_string, "String")                                       \
@@ -155,6 +162,8 @@
   V(true_string, "true")                                           \
   V(TypeError_string, "TypeError")                                 \
   V(type_string, "type")                                           \
+  V(CompileError_string, "CompileError")                           \
+  V(RuntimeError_string, "RuntimeError")                           \
   V(uint16x8_string, "uint16x8")                                   \
   V(Uint16x8_string, "Uint16x8")                                   \
   V(uint32x4_string, "uint32x4")                                   \
@@ -163,6 +172,7 @@
   V(Uint8x16_string, "Uint8x16")                                   \
   V(undefined_string, "undefined")                                 \
   V(undefined_to_string, "[object Undefined]")                     \
+  V(unicode_string, "unicode")                                     \
   V(URIError_string, "URIError")                                   \
   V(valueOf_string, "valueOf")                                     \
   V(values_string, "values")                                       \
@@ -170,53 +180,52 @@
   V(WeakMap_string, "WeakMap")                                     \
   V(WeakSet_string, "WeakSet")                                     \
   V(weekday_string, "weekday")                                     \
+  V(will_handle_string, "willHandle")                              \
   V(writable_string, "writable")                                   \
   V(year_string, "year")
 
-#define PRIVATE_SYMBOL_LIST(V)              \
-  V(array_iteration_kind_symbol)            \
-  V(array_iterator_next_symbol)             \
-  V(array_iterator_object_symbol)           \
-  V(call_site_frame_array_symbol)           \
-  V(call_site_frame_index_symbol)           \
-  V(class_end_position_symbol)              \
-  V(class_start_position_symbol)            \
-  V(detailed_stack_trace_symbol)            \
-  V(elements_transition_symbol)             \
-  V(error_end_pos_symbol)                   \
-  V(error_script_symbol)                    \
-  V(error_start_pos_symbol)                 \
-  V(frozen_symbol)                          \
-  V(hash_code_symbol)                       \
-  V(home_object_symbol)                     \
-  V(intl_impl_object_symbol)                \
-  V(intl_initialized_marker_symbol)         \
-  V(intl_pattern_symbol)                    \
-  V(intl_resolved_symbol)                   \
-  V(megamorphic_symbol)                     \
-  V(native_context_index_symbol)            \
-  V(nonexistent_symbol)                     \
-  V(nonextensible_symbol)                   \
-  V(normal_ic_symbol)                       \
-  V(not_mapped_symbol)                      \
-  V(premonomorphic_symbol)                  \
-  V(promise_async_stack_id_symbol)          \
-  V(promise_debug_marker_symbol)            \
-  V(promise_deferred_reactions_symbol)      \
-  V(promise_forwarding_handler_symbol)      \
-  V(promise_fulfill_reactions_symbol)       \
-  V(promise_handled_by_symbol)              \
-  V(promise_handled_hint_symbol)            \
-  V(promise_has_handler_symbol)             \
-  V(promise_raw_symbol)                     \
-  V(promise_reject_reactions_symbol)        \
-  V(promise_result_symbol)                  \
-  V(promise_state_symbol)                   \
-  V(sealed_symbol)                          \
-  V(stack_trace_symbol)                     \
-  V(strict_function_transition_symbol)      \
-  V(string_iterator_iterated_string_symbol) \
-  V(string_iterator_next_index_symbol)      \
+#define PRIVATE_SYMBOL_LIST(V)         \
+  V(array_iteration_kind_symbol)       \
+  V(array_iterator_next_symbol)        \
+  V(array_iterator_object_symbol)      \
+  V(call_site_frame_array_symbol)      \
+  V(call_site_frame_index_symbol)      \
+  V(class_end_position_symbol)         \
+  V(class_start_position_symbol)       \
+  V(detailed_stack_trace_symbol)       \
+  V(elements_transition_symbol)        \
+  V(error_end_pos_symbol)              \
+  V(error_script_symbol)               \
+  V(error_start_pos_symbol)            \
+  V(frozen_symbol)                     \
+  V(hash_code_symbol)                  \
+  V(home_object_symbol)                \
+  V(intl_impl_object_symbol)           \
+  V(intl_initialized_marker_symbol)    \
+  V(intl_pattern_symbol)               \
+  V(intl_resolved_symbol)              \
+  V(megamorphic_symbol)                \
+  V(native_context_index_symbol)       \
+  V(nonexistent_symbol)                \
+  V(nonextensible_symbol)              \
+  V(normal_ic_symbol)                  \
+  V(not_mapped_symbol)                 \
+  V(premonomorphic_symbol)             \
+  V(promise_async_stack_id_symbol)     \
+  V(promise_debug_marker_symbol)       \
+  V(promise_deferred_reaction_symbol)  \
+  V(promise_forwarding_handler_symbol) \
+  V(promise_fulfill_reactions_symbol)  \
+  V(promise_handled_by_symbol)         \
+  V(promise_handled_hint_symbol)       \
+  V(promise_has_handler_symbol)        \
+  V(promise_raw_symbol)                \
+  V(promise_reject_reactions_symbol)   \
+  V(promise_result_symbol)             \
+  V(promise_state_symbol)              \
+  V(sealed_symbol)                     \
+  V(stack_trace_symbol)                \
+  V(strict_function_transition_symbol) \
   V(uninitialized_symbol)
 
 #define PUBLIC_SYMBOL_LIST(V)                \
diff --git a/src/heap/gc-idle-time-handler.h b/src/heap/gc-idle-time-handler.h
index 39dea7e..7ce0c1a 100644
--- a/src/heap/gc-idle-time-handler.h
+++ b/src/heap/gc-idle-time-handler.h
@@ -68,7 +68,7 @@
 
 // The idle time handler makes decisions about which garbage collection
 // operations are executing during IdleNotification.
-class GCIdleTimeHandler {
+class V8_EXPORT_PRIVATE GCIdleTimeHandler {
  public:
   // If we haven't recorded any incremental marking events yet, we carefully
   // mark with a conservative lower bound for the marking speed.
diff --git a/src/heap/gc-tracer.cc b/src/heap/gc-tracer.cc
index 8049ce4..dcd319f 100644
--- a/src/heap/gc-tracer.cc
+++ b/src/heap/gc-tracer.cc
@@ -11,10 +11,11 @@
 namespace v8 {
 namespace internal {
 
-static intptr_t CountTotalHolesSize(Heap* heap) {
-  intptr_t holes_size = 0;
+static size_t CountTotalHolesSize(Heap* heap) {
+  size_t holes_size = 0;
   OldSpaces spaces(heap);
   for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
+    DCHECK_GE(holes_size + space->Waste() + space->Available(), holes_size);
     holes_size += space->Waste() + space->Available();
   }
   return holes_size;
@@ -28,8 +29,7 @@
   STATIC_ASSERT(FIRST_INCREMENTAL_SCOPE == 0);
   start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
   // TODO(cbruni): remove once we fully moved to a trace-based system.
-  if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
-      FLAG_runtime_call_stats) {
+  if (V8_UNLIKELY(FLAG_runtime_stats)) {
     RuntimeCallStats::Enter(
         tracer_->heap_->isolate()->counters()->runtime_call_stats(), &timer_,
         &RuntimeCallStats::GC);
@@ -40,8 +40,7 @@
   tracer_->AddScopeSample(
       scope_, tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_);
   // TODO(cbruni): remove once we fully moved to a trace-based system.
-  if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
-      FLAG_runtime_call_stats) {
+  if (V8_UNLIKELY(FLAG_runtime_stats)) {
     RuntimeCallStats::Leave(
         tracer_->heap_->isolate()->counters()->runtime_call_stats(), &timer_);
   }
@@ -83,28 +82,17 @@
   }
 }
 
-
 const char* GCTracer::Event::TypeName(bool short_name) const {
   switch (type) {
     case SCAVENGER:
-      if (short_name) {
-        return "s";
-      } else {
-        return "Scavenge";
-      }
+      return (short_name) ? "s" : "Scavenge";
     case MARK_COMPACTOR:
     case INCREMENTAL_MARK_COMPACTOR:
-      if (short_name) {
-        return "ms";
-      } else {
-        return "Mark-sweep";
-      }
+      return (short_name) ? "ms" : "Mark-sweep";
+    case MINOR_MARK_COMPACTOR:
+      return (short_name) ? "mmc" : "Minor Mark-Compact";
     case START:
-      if (short_name) {
-        return "st";
-      } else {
-        return "Start";
-      }
+      return (short_name) ? "st" : "Start";
   }
   return "Unknown Event Type";
 }
@@ -115,6 +103,7 @@
       previous_(current_),
       incremental_marking_bytes_(0),
       incremental_marking_duration_(0.0),
+      incremental_marking_start_time_(0.0),
       recorded_incremental_marking_speed_(0.0),
       allocation_time_ms_(0.0),
       new_space_allocation_counter_bytes_(0),
@@ -139,8 +128,8 @@
   new_space_allocation_in_bytes_since_gc_ = 0.0;
   old_generation_allocation_in_bytes_since_gc_ = 0.0;
   combined_mark_compact_speed_cache_ = 0.0;
-  recorded_scavenges_total_.Reset();
-  recorded_scavenges_survived_.Reset();
+  recorded_minor_gcs_total_.Reset();
+  recorded_minor_gcs_survived_.Reset();
   recorded_compactions_.Reset();
   recorded_mark_compacts_.Reset();
   recorded_incremental_mark_compacts_.Reset();
@@ -162,15 +151,22 @@
   SampleAllocation(start_time, heap_->NewSpaceAllocationCounter(),
                    heap_->OldGenerationAllocationCounter());
 
-  if (collector == SCAVENGER) {
-    current_ = Event(Event::SCAVENGER, gc_reason, collector_reason);
-  } else if (collector == MARK_COMPACTOR) {
-    if (heap_->incremental_marking()->WasActivated()) {
+  switch (collector) {
+    case SCAVENGER:
+      current_ = Event(Event::SCAVENGER, gc_reason, collector_reason);
+      break;
+    case MINOR_MARK_COMPACTOR:
       current_ =
-          Event(Event::INCREMENTAL_MARK_COMPACTOR, gc_reason, collector_reason);
-    } else {
-      current_ = Event(Event::MARK_COMPACTOR, gc_reason, collector_reason);
-    }
+          Event(Event::MINOR_MARK_COMPACTOR, gc_reason, collector_reason);
+      break;
+    case MARK_COMPACTOR:
+      if (heap_->incremental_marking()->WasActivated()) {
+        current_ = Event(Event::INCREMENTAL_MARK_COMPACTOR, gc_reason,
+                         collector_reason);
+      } else {
+        current_ = Event(Event::MARK_COMPACTOR, gc_reason, collector_reason);
+      }
+      break;
   }
 
   current_.reduce_memory = heap_->ShouldReduceMemory();
@@ -188,12 +184,12 @@
     current_.scopes[i] = 0;
   }
 
-  int committed_memory = static_cast<int>(heap_->CommittedMemory() / KB);
-  int used_memory = static_cast<int>(current_.start_object_size / KB);
+  size_t committed_memory = heap_->CommittedMemory() / KB;
+  size_t used_memory = current_.start_object_size / KB;
 
   Counters* counters = heap_->isolate()->counters();
 
-  if (collector == SCAVENGER) {
+  if (Heap::IsYoungGenerationCollector(collector)) {
     counters->scavenge_reason()->AddSample(static_cast<int>(gc_reason));
   } else {
     counters->mark_compact_reason()->AddSample(static_cast<int>(gc_reason));
@@ -202,8 +198,7 @@
                                                           committed_memory);
   counters->aggregated_memory_heap_used()->AddSample(start_time, used_memory);
   // TODO(cbruni): remove once we fully moved to a trace-based system.
-  if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
-      FLAG_runtime_call_stats) {
+  if (V8_UNLIKELY(FLAG_runtime_stats)) {
     RuntimeCallStats::Enter(heap_->isolate()->counters()->runtime_call_stats(),
                             &timer_, &RuntimeCallStats::GC);
   }
@@ -220,15 +215,16 @@
 void GCTracer::Stop(GarbageCollector collector) {
   start_counter_--;
   if (start_counter_ != 0) {
-    heap_->isolate()->PrintWithTimestamp(
-        "[Finished reentrant %s during %s.]\n",
-        collector == SCAVENGER ? "Scavenge" : "Mark-sweep",
-        current_.TypeName(false));
+    heap_->isolate()->PrintWithTimestamp("[Finished reentrant %s during %s.]\n",
+                                         Heap::CollectorName(collector),
+                                         current_.TypeName(false));
     return;
   }
 
   DCHECK(start_counter_ >= 0);
   DCHECK((collector == SCAVENGER && current_.type == Event::SCAVENGER) ||
+         (collector == MINOR_MARK_COMPACTOR &&
+          current_.type == Event::MINOR_MARK_COMPACTOR) ||
          (collector == MARK_COMPACTOR &&
           (current_.type == Event::MARK_COMPACTOR ||
            current_.type == Event::INCREMENTAL_MARK_COMPACTOR)));
@@ -241,8 +237,8 @@
 
   AddAllocation(current_.end_time);
 
-  int committed_memory = static_cast<int>(heap_->CommittedMemory() / KB);
-  int used_memory = static_cast<int>(current_.end_object_size / KB);
+  size_t committed_memory = heap_->CommittedMemory() / KB;
+  size_t used_memory = current_.end_object_size / KB;
   heap_->isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
       current_.end_time, committed_memory);
   heap_->isolate()->counters()->aggregated_memory_heap_used()->AddSample(
@@ -250,36 +246,45 @@
 
   double duration = current_.end_time - current_.start_time;
 
-  if (current_.type == Event::SCAVENGER) {
-    recorded_scavenges_total_.Push(
-        MakeBytesAndDuration(current_.new_space_object_size, duration));
-    recorded_scavenges_survived_.Push(MakeBytesAndDuration(
-        current_.survived_new_space_object_size, duration));
-  } else if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
-    current_.incremental_marking_bytes = incremental_marking_bytes_;
-    current_.incremental_marking_duration = incremental_marking_duration_;
-    for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
-      current_.incremental_marking_scopes[i] = incremental_marking_scopes_[i];
-      current_.scopes[i] = incremental_marking_scopes_[i].duration;
-    }
-    RecordIncrementalMarkingSpeed(current_.incremental_marking_bytes,
-                                  current_.incremental_marking_duration);
-    recorded_incremental_mark_compacts_.Push(
-        MakeBytesAndDuration(current_.start_object_size, duration));
-    ResetIncrementalMarkingCounters();
-    combined_mark_compact_speed_cache_ = 0.0;
-  } else {
-    DCHECK_EQ(0, current_.incremental_marking_bytes);
-    DCHECK_EQ(0, current_.incremental_marking_duration);
-    recorded_mark_compacts_.Push(
-        MakeBytesAndDuration(current_.start_object_size, duration));
-    ResetIncrementalMarkingCounters();
-    combined_mark_compact_speed_cache_ = 0.0;
+  switch (current_.type) {
+    case Event::SCAVENGER:
+    case Event::MINOR_MARK_COMPACTOR:
+      recorded_minor_gcs_total_.Push(
+          MakeBytesAndDuration(current_.new_space_object_size, duration));
+      recorded_minor_gcs_survived_.Push(MakeBytesAndDuration(
+          current_.survived_new_space_object_size, duration));
+      break;
+    case Event::INCREMENTAL_MARK_COMPACTOR:
+      current_.incremental_marking_bytes = incremental_marking_bytes_;
+      current_.incremental_marking_duration = incremental_marking_duration_;
+      for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
+        current_.incremental_marking_scopes[i] = incremental_marking_scopes_[i];
+        current_.scopes[i] = incremental_marking_scopes_[i].duration;
+      }
+      RecordIncrementalMarkingSpeed(current_.incremental_marking_bytes,
+                                    current_.incremental_marking_duration);
+      recorded_incremental_mark_compacts_.Push(
+          MakeBytesAndDuration(current_.start_object_size, duration));
+      ResetIncrementalMarkingCounters();
+      combined_mark_compact_speed_cache_ = 0.0;
+      break;
+    case Event::MARK_COMPACTOR:
+      DCHECK_EQ(0u, current_.incremental_marking_bytes);
+      DCHECK_EQ(0, current_.incremental_marking_duration);
+      recorded_mark_compacts_.Push(
+          MakeBytesAndDuration(current_.start_object_size, duration));
+      ResetIncrementalMarkingCounters();
+      combined_mark_compact_speed_cache_ = 0.0;
+      break;
+    case Event::START:
+      UNREACHABLE();
   }
 
   heap_->UpdateTotalGCTime(duration);
 
-  if (current_.type == Event::SCAVENGER && FLAG_trace_gc_ignore_scavenger)
+  if ((current_.type == Event::SCAVENGER ||
+       current_.type == Event::MINOR_MARK_COMPACTOR) &&
+      FLAG_trace_gc_ignore_scavenger)
     return;
 
   if (FLAG_trace_gc_nvp) {
@@ -293,8 +298,7 @@
   }
 
   // TODO(cbruni): remove once we fully moved to a trace-based system.
-  if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
-      FLAG_runtime_call_stats) {
+  if (V8_UNLIKELY(FLAG_runtime_stats)) {
     RuntimeCallStats::Leave(heap_->isolate()->counters()->runtime_call_stats(),
                             &timer_);
   }
@@ -348,9 +352,8 @@
   recorded_context_disposal_times_.Push(time);
 }
 
-
 void GCTracer::AddCompactionEvent(double duration,
-                                  intptr_t live_bytes_compacted) {
+                                  size_t live_bytes_compacted) {
   recorded_compactions_.Push(
       MakeBytesAndDuration(live_bytes_compacted, duration));
 }
@@ -360,8 +363,7 @@
   recorded_survival_ratios_.Push(promotion_ratio);
 }
 
-
-void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) {
+void GCTracer::AddIncrementalMarkingStep(double duration, size_t bytes) {
   if (bytes > 0) {
     incremental_marking_bytes_ += bytes;
     incremental_marking_duration_ += duration;
@@ -426,7 +428,7 @@
 void GCTracer::PrintNVP() const {
   double duration = current_.end_time - current_.start_time;
   double spent_in_mutator = current_.start_time - previous_.end_time;
-  intptr_t allocated_since_last_gc =
+  size_t allocated_since_last_gc =
       current_.start_object_size - previous_.end_object_size;
 
   double incremental_walltime_duration = 0;
@@ -449,26 +451,25 @@
           "roots=%.2f "
           "code=%.2f "
           "semispace=%.2f "
-          "object_groups=%.2f "
-          "external_prologue=%.2f "
-          "external_epilogue=%.2f "
+          "external.prologue=%.2f "
+          "external.epilogue=%.2f "
           "external_weak_global_handles=%.2f "
           "steps_count=%d "
           "steps_took=%.1f "
           "scavenge_throughput=%.f "
-          "total_size_before=%" V8PRIdPTR
+          "total_size_before=%" PRIuS
           " "
-          "total_size_after=%" V8PRIdPTR
+          "total_size_after=%" PRIuS
           " "
-          "holes_size_before=%" V8PRIdPTR
+          "holes_size_before=%" PRIuS
           " "
-          "holes_size_after=%" V8PRIdPTR
+          "holes_size_after=%" PRIuS
           " "
-          "allocated=%" V8PRIdPTR
+          "allocated=%" PRIuS
           " "
-          "promoted=%" V8PRIdPTR
+          "promoted=%" PRIuS
           " "
-          "semi_space_copied=%" V8PRIdPTR
+          "semi_space_copied=%" PRIuS
           " "
           "nodes_died_in_new=%d "
           "nodes_copied_in_new=%d "
@@ -486,9 +487,8 @@
           current_.scopes[Scope::SCAVENGER_ROOTS],
           current_.scopes[Scope::SCAVENGER_CODE_FLUSH_CANDIDATES],
           current_.scopes[Scope::SCAVENGER_SEMISPACE],
-          current_.scopes[Scope::SCAVENGER_OBJECT_GROUPS],
-          current_.scopes[Scope::SCAVENGER_EXTERNAL_PROLOGUE],
-          current_.scopes[Scope::SCAVENGER_EXTERNAL_EPILOGUE],
+          current_.scopes[Scope::EXTERNAL_PROLOGUE],
+          current_.scopes[Scope::EXTERNAL_EPILOGUE],
           current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES],
           current_.incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL]
               .steps,
@@ -505,6 +505,15 @@
           NewSpaceAllocationThroughputInBytesPerMillisecond(),
           ContextDisposalRateInMilliseconds());
       break;
+    case Event::MINOR_MARK_COMPACTOR:
+      heap_->isolate()->PrintWithTimestamp(
+          "pause=%.1f "
+          "mutator=%.1f "
+          "gc=%s "
+          "reduce_memory=%d\n",
+          duration, spent_in_mutator, current_.TypeName(true),
+          current_.reduce_memory);
+      break;
     case Event::MARK_COMPACTOR:
     case Event::INCREMENTAL_MARK_COMPACTOR:
       heap_->isolate()->PrintWithTimestamp(
@@ -523,6 +532,7 @@
           "clear.weak_cells=%.1f "
           "clear.weak_collections=%.1f "
           "clear.weak_lists=%.1f "
+          "epilogue=%.1f "
           "evacuate=%.1f "
           "evacuate.candidates=%.1f "
           "evacuate.clean_up=%.1f "
@@ -531,8 +541,8 @@
           "evacuate.update_pointers.to_evacuated=%.1f "
           "evacuate.update_pointers.to_new=%.1f "
           "evacuate.update_pointers.weak=%.1f "
-          "external.mc_prologue=%.1f "
-          "external.mc_epilogue=%.1f "
+          "external.prologue=%.1f "
+          "external.epilogue=%.1f "
           "external.weak_global_handles=%.1f "
           "finish=%.1f "
           "mark=%.1f "
@@ -548,6 +558,7 @@
           "mark.wrapper_prologue=%.1f "
           "mark.wrapper_epilogue=%.1f "
           "mark.wrapper_tracing=%.1f "
+          "prologue=%.1f "
           "sweep=%.1f "
           "sweep.code=%.1f "
           "sweep.map=%.1f "
@@ -568,19 +579,19 @@
           "incremental_steps_count=%d "
           "incremental_marking_throughput=%.f "
           "incremental_walltime_duration=%.f "
-          "total_size_before=%" V8PRIdPTR
+          "total_size_before=%" PRIuS
           " "
-          "total_size_after=%" V8PRIdPTR
+          "total_size_after=%" PRIuS
           " "
-          "holes_size_before=%" V8PRIdPTR
+          "holes_size_before=%" PRIuS
           " "
-          "holes_size_after=%" V8PRIdPTR
+          "holes_size_after=%" PRIuS
           " "
-          "allocated=%" V8PRIdPTR
+          "allocated=%" PRIuS
           " "
-          "promoted=%" V8PRIdPTR
+          "promoted=%" PRIuS
           " "
-          "semi_space_copied=%" V8PRIdPTR
+          "semi_space_copied=%" PRIuS
           " "
           "nodes_died_in_new=%d "
           "nodes_copied_in_new=%d "
@@ -604,6 +615,7 @@
           current_.scopes[Scope::MC_CLEAR_WEAK_CELLS],
           current_.scopes[Scope::MC_CLEAR_WEAK_COLLECTIONS],
           current_.scopes[Scope::MC_CLEAR_WEAK_LISTS],
+          current_.scopes[Scope::MC_EPILOGUE],
           current_.scopes[Scope::MC_EVACUATE],
           current_.scopes[Scope::MC_EVACUATE_CANDIDATES],
           current_.scopes[Scope::MC_EVACUATE_CLEAN_UP],
@@ -612,8 +624,8 @@
           current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED],
           current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
           current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK],
-          current_.scopes[Scope::MC_EXTERNAL_PROLOGUE],
-          current_.scopes[Scope::MC_EXTERNAL_EPILOGUE],
+          current_.scopes[Scope::EXTERNAL_PROLOGUE],
+          current_.scopes[Scope::EXTERNAL_EPILOGUE],
           current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES],
           current_.scopes[Scope::MC_FINISH], current_.scopes[Scope::MC_MARK],
           current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
@@ -628,7 +640,7 @@
           current_.scopes[Scope::MC_MARK_WRAPPER_PROLOGUE],
           current_.scopes[Scope::MC_MARK_WRAPPER_EPILOGUE],
           current_.scopes[Scope::MC_MARK_WRAPPER_TRACING],
-          current_.scopes[Scope::MC_SWEEP],
+          current_.scopes[Scope::MC_PROLOGUE], current_.scopes[Scope::MC_SWEEP],
           current_.scopes[Scope::MC_SWEEP_CODE],
           current_.scopes[Scope::MC_SWEEP_MAP],
           current_.scopes[Scope::MC_SWEEP_OLD],
@@ -674,7 +686,7 @@
   }
 }
 
-double GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
+double GCTracer::AverageSpeed(const base::RingBuffer<BytesAndDuration>& buffer,
                               const BytesAndDuration& initial, double time_ms) {
   BytesAndDuration sum = buffer.Sum(
       [time_ms](BytesAndDuration a, BytesAndDuration b) {
@@ -693,11 +705,12 @@
   return speed;
 }
 
-double GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer) {
+double GCTracer::AverageSpeed(
+    const base::RingBuffer<BytesAndDuration>& buffer) {
   return AverageSpeed(buffer, MakeBytesAndDuration(0, 0), 0);
 }
 
-void GCTracer::RecordIncrementalMarkingSpeed(intptr_t bytes, double duration) {
+void GCTracer::RecordIncrementalMarkingSpeed(size_t bytes, double duration) {
   if (duration == 0 || bytes == 0) return;
   double current_speed = bytes / duration;
   if (recorded_incremental_marking_speed_ == 0) {
@@ -722,9 +735,9 @@
 double GCTracer::ScavengeSpeedInBytesPerMillisecond(
     ScavengeSpeedMode mode) const {
   if (mode == kForAllObjects) {
-    return AverageSpeed(recorded_scavenges_total_);
+    return AverageSpeed(recorded_minor_gcs_total_);
   } else {
-    return AverageSpeed(recorded_scavenges_survived_);
+    return AverageSpeed(recorded_minor_gcs_survived_);
   }
 }
 
diff --git a/src/heap/gc-tracer.h b/src/heap/gc-tracer.h
index e8c72c1..ed62dee 100644
--- a/src/heap/gc-tracer.h
+++ b/src/heap/gc-tracer.h
@@ -7,6 +7,7 @@
 
 #include "src/base/compiler-specific.h"
 #include "src/base/platform/platform.h"
+#include "src/base/ring-buffer.h"
 #include "src/counters.h"
 #include "src/globals.h"
 #include "testing/gtest/include/gtest/gtest_prod.h"
@@ -14,44 +15,6 @@
 namespace v8 {
 namespace internal {
 
-template <typename T>
-class RingBuffer {
- public:
-  RingBuffer() { Reset(); }
-  static const int kSize = 10;
-  void Push(const T& value) {
-    if (count_ == kSize) {
-      elements_[start_++] = value;
-      if (start_ == kSize) start_ = 0;
-    } else {
-      DCHECK_EQ(start_, 0);
-      elements_[count_++] = value;
-    }
-  }
-
-  int Count() const { return count_; }
-
-  template <typename Callback>
-  T Sum(Callback callback, const T& initial) const {
-    int j = start_ + count_ - 1;
-    if (j >= kSize) j -= kSize;
-    T result = initial;
-    for (int i = 0; i < count_; i++) {
-      result = callback(result, elements_[j]);
-      if (--j == -1) j += kSize;
-    }
-    return result;
-  }
-
-  void Reset() { start_ = count_ = 0; }
-
- private:
-  T elements_[kSize];
-  int start_;
-  int count_;
-  DISALLOW_COPY_AND_ASSIGN(RingBuffer);
-};
-
 typedef std::pair<uint64_t, double> BytesAndDuration;
 
 inline BytesAndDuration MakeBytesAndDuration(uint64_t bytes, double duration) {
@@ -74,6 +37,8 @@
 
 #define TRACER_SCOPES(F)                      \
   INCREMENTAL_SCOPES(F)                       \
+  F(EXTERNAL_EPILOGUE)                        \
+  F(EXTERNAL_PROLOGUE)                        \
   F(EXTERNAL_WEAK_GLOBAL_HANDLES)             \
   F(MC_CLEAR)                                 \
   F(MC_CLEAR_CODE_FLUSH)                      \
@@ -86,6 +51,7 @@
   F(MC_CLEAR_WEAK_CELLS)                      \
   F(MC_CLEAR_WEAK_COLLECTIONS)                \
   F(MC_CLEAR_WEAK_LISTS)                      \
+  F(MC_EPILOGUE)                              \
   F(MC_EVACUATE)                              \
   F(MC_EVACUATE_CANDIDATES)                   \
   F(MC_EVACUATE_CLEAN_UP)                     \
@@ -94,8 +60,6 @@
   F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED) \
   F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW)       \
   F(MC_EVACUATE_UPDATE_POINTERS_WEAK)         \
-  F(MC_EXTERNAL_EPILOGUE)                     \
-  F(MC_EXTERNAL_PROLOGUE)                     \
   F(MC_FINISH)                                \
   F(MC_MARK)                                  \
   F(MC_MARK_FINISH_INCREMENTAL)               \
@@ -110,14 +74,12 @@
   F(MC_MARK_WRAPPER_PROLOGUE)                 \
   F(MC_MARK_WRAPPER_TRACING)                  \
   F(MC_MARK_OBJECT_GROUPING)                  \
+  F(MC_PROLOGUE)                              \
   F(MC_SWEEP)                                 \
   F(MC_SWEEP_CODE)                            \
   F(MC_SWEEP_MAP)                             \
   F(MC_SWEEP_OLD)                             \
   F(SCAVENGER_CODE_FLUSH_CANDIDATES)          \
-  F(SCAVENGER_EXTERNAL_EPILOGUE)              \
-  F(SCAVENGER_EXTERNAL_PROLOGUE)              \
-  F(SCAVENGER_OBJECT_GROUPS)                  \
   F(SCAVENGER_OLD_TO_NEW_POINTERS)            \
   F(SCAVENGER_ROOTS)                          \
   F(SCAVENGER_SCAVENGE)                       \
@@ -132,7 +94,7 @@
 
 // GCTracer collects and prints ONE line after each garbage collector
 // invocation IFF --trace_gc is used.
-class GCTracer {
+class V8_EXPORT_PRIVATE GCTracer {
  public:
   struct IncrementalMarkingInfos {
     IncrementalMarkingInfos() : duration(0), longest_step(0), steps(0) {}
@@ -190,7 +152,8 @@
       SCAVENGER = 0,
       MARK_COMPACTOR = 1,
       INCREMENTAL_MARK_COMPACTOR = 2,
-      START = 3
+      MINOR_MARK_COMPACTOR = 3,
+      START = 4
     };
 
     Event(Type type, GarbageCollectionReason gc_reason,
@@ -215,10 +178,10 @@
     bool reduce_memory;
 
     // Size of objects in heap set in constructor.
-    intptr_t start_object_size;
+    size_t start_object_size;
 
     // Size of objects in heap set in destructor.
-    intptr_t end_object_size;
+    size_t end_object_size;
 
     // Size of memory allocated from OS set in constructor.
     size_t start_memory_size;
@@ -228,23 +191,20 @@
 
     // Total amount of space either wasted or contained in one of free lists
     // before the current GC.
-    intptr_t start_holes_size;
+    size_t start_holes_size;
 
     // Total amount of space either wasted or contained in one of free lists
     // after the current GC.
-    intptr_t end_holes_size;
+    size_t end_holes_size;
 
     // Size of new space objects in constructor.
-    intptr_t new_space_object_size;
+    size_t new_space_object_size;
 
     // Size of survived new space objects in destructor.
-    intptr_t survived_new_space_object_size;
-
-    // Bytes marked since creation of tracer (value at start of event).
-    intptr_t cumulative_incremental_marking_bytes;
+    size_t survived_new_space_object_size;
 
     // Bytes marked incrementally for INCREMENTAL_MARK_COMPACTOR
-    intptr_t incremental_marking_bytes;
+    size_t incremental_marking_bytes;
 
     // Duration of incremental marking steps for INCREMENTAL_MARK_COMPACTOR.
     double incremental_marking_duration;
@@ -277,12 +237,12 @@
 
   void AddContextDisposalTime(double time);
 
-  void AddCompactionEvent(double duration, intptr_t live_bytes_compacted);
+  void AddCompactionEvent(double duration, size_t live_bytes_compacted);
 
   void AddSurvivalRatio(double survival_ratio);
 
   // Log an incremental marking step.
-  void AddIncrementalMarkingStep(double duration, intptr_t bytes);
+  void AddIncrementalMarkingStep(double duration, size_t bytes);
 
   // Compute the average incremental marking speed in bytes/millisecond.
   // Returns 0 if no events have been recorded.
@@ -380,13 +340,13 @@
   // Returns the average speed of the events in the buffer.
   // If the buffer is empty, the result is 0.
   // Otherwise, the result is between 1 byte/ms and 1 GB/ms.
-  static double AverageSpeed(const RingBuffer<BytesAndDuration>& buffer);
-  static double AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
+  static double AverageSpeed(const base::RingBuffer<BytesAndDuration>& buffer);
+  static double AverageSpeed(const base::RingBuffer<BytesAndDuration>& buffer,
                              const BytesAndDuration& initial, double time_ms);
 
   void ResetForTesting();
   void ResetIncrementalMarkingCounters();
-  void RecordIncrementalMarkingSpeed(intptr_t bytes, double duration);
+  void RecordIncrementalMarkingSpeed(size_t bytes, double duration);
 
   // Print one detailed trace line in name=value format.
   // TODO(ernstm): Move to Heap.
@@ -402,12 +362,10 @@
 
   double TotalExternalTime() const {
     return current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES] +
-           current_.scopes[Scope::MC_EXTERNAL_EPILOGUE] +
-           current_.scopes[Scope::MC_EXTERNAL_PROLOGUE] +
+           current_.scopes[Scope::EXTERNAL_EPILOGUE] +
+           current_.scopes[Scope::EXTERNAL_PROLOGUE] +
            current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE] +
-           current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE] +
-           current_.scopes[Scope::SCAVENGER_EXTERNAL_EPILOGUE] +
-           current_.scopes[Scope::SCAVENGER_EXTERNAL_PROLOGUE];
+           current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE];
   }
 
   // Pointer to the heap that owns this tracer.
@@ -422,7 +380,7 @@
 
   // Size of incremental marking steps (in bytes) accumulated since the end of
   // the last mark compact GC.
-  intptr_t incremental_marking_bytes_;
+  size_t incremental_marking_bytes_;
 
   // Duration of incremental marking steps since the end of the last mark-
   // compact event.
@@ -456,15 +414,15 @@
   // Separate timer used for --runtime_call_stats
   RuntimeCallTimer timer_;
 
-  RingBuffer<BytesAndDuration> recorded_scavenges_total_;
-  RingBuffer<BytesAndDuration> recorded_scavenges_survived_;
-  RingBuffer<BytesAndDuration> recorded_compactions_;
-  RingBuffer<BytesAndDuration> recorded_incremental_mark_compacts_;
-  RingBuffer<BytesAndDuration> recorded_mark_compacts_;
-  RingBuffer<BytesAndDuration> recorded_new_generation_allocations_;
-  RingBuffer<BytesAndDuration> recorded_old_generation_allocations_;
-  RingBuffer<double> recorded_context_disposal_times_;
-  RingBuffer<double> recorded_survival_ratios_;
+  base::RingBuffer<BytesAndDuration> recorded_minor_gcs_total_;
+  base::RingBuffer<BytesAndDuration> recorded_minor_gcs_survived_;
+  base::RingBuffer<BytesAndDuration> recorded_compactions_;
+  base::RingBuffer<BytesAndDuration> recorded_incremental_mark_compacts_;
+  base::RingBuffer<BytesAndDuration> recorded_mark_compacts_;
+  base::RingBuffer<BytesAndDuration> recorded_new_generation_allocations_;
+  base::RingBuffer<BytesAndDuration> recorded_old_generation_allocations_;
+  base::RingBuffer<double> recorded_context_disposal_times_;
+  base::RingBuffer<double> recorded_survival_ratios_;
 
   DISALLOW_COPY_AND_ASSIGN(GCTracer);
 };
diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h
index 23e1712..7d0d241 100644
--- a/src/heap/heap-inl.h
+++ b/src/heap/heap-inl.h
@@ -12,6 +12,7 @@
 #include "src/heap/heap.h"
 #include "src/heap/incremental-marking-inl.h"
 #include "src/heap/mark-compact.h"
+#include "src/heap/object-stats.h"
 #include "src/heap/remembered-set.h"
 #include "src/heap/spaces-inl.h"
 #include "src/heap/store-buffer.h"
@@ -490,37 +491,18 @@
   return old_space_->ContainsSlow(address);
 }
 
-template <PromotionMode promotion_mode>
 bool Heap::ShouldBePromoted(Address old_address, int object_size) {
   Page* page = Page::FromAddress(old_address);
   Address age_mark = new_space_->age_mark();
-
-  if (promotion_mode == PROMOTE_MARKED) {
-    MarkBit mark_bit = ObjectMarking::MarkBitFrom(old_address);
-    if (!Marking::IsWhite(mark_bit)) {
-      return true;
-    }
-  }
-
   return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
          (!page->ContainsLimit(age_mark) || old_address < age_mark);
 }
 
-PromotionMode Heap::CurrentPromotionMode() {
-  if (incremental_marking()->IsMarking()) {
-    return PROMOTE_MARKED;
-  } else {
-    return DEFAULT_PROMOTION;
-  }
-}
-
 void Heap::RecordWrite(Object* object, int offset, Object* o) {
   if (!InNewSpace(o) || !object->IsHeapObject() || InNewSpace(object)) {
     return;
   }
-  RememberedSet<OLD_TO_NEW>::Insert(
-      Page::FromAddress(reinterpret_cast<Address>(object)),
-      HeapObject::cast(object)->address() + offset);
+  store_buffer()->InsertEntry(HeapObject::cast(object)->address() + offset);
 }
 
 void Heap::RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value) {
@@ -531,11 +513,9 @@
 
 void Heap::RecordFixedArrayElements(FixedArray* array, int offset, int length) {
   if (InNewSpace(array)) return;
-  Page* page = Page::FromAddress(reinterpret_cast<Address>(array));
   for (int i = 0; i < length; i++) {
     if (!InNewSpace(array->get(offset + i))) continue;
-    RememberedSet<OLD_TO_NEW>::Insert(
-        page,
+    store_buffer()->InsertEntry(
         reinterpret_cast<Address>(array->RawFieldOfElementAt(offset + i)));
   }
 }
@@ -647,7 +627,13 @@
 template <Heap::UpdateAllocationSiteMode mode>
 void Heap::UpdateAllocationSite(HeapObject* object,
                                 base::HashMap* pretenuring_feedback) {
-  DCHECK(InFromSpace(object));
+  DCHECK(InFromSpace(object) ||
+         (InToSpace(object) &&
+          Page::FromAddress(object->address())
+              ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) ||
+         (!InNewSpace(object) &&
+          Page::FromAddress(object->address())
+              ->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)));
   if (!FLAG_allocation_site_pretenuring ||
       !AllocationSite::CanTrack(object->map()->instance_type()))
     return;
@@ -759,9 +745,7 @@
 #endif
 }
 
-void Heap::ClearInstanceofCache() {
-  set_instanceof_cache_function(Smi::FromInt(0));
-}
+void Heap::ClearInstanceofCache() { set_instanceof_cache_function(Smi::kZero); }
 
 Oddball* Heap::ToBoolean(bool condition) {
   return condition ? true_value() : false_value();
@@ -769,8 +753,8 @@
 
 
 void Heap::CompletelyClearInstanceofCache() {
-  set_instanceof_cache_map(Smi::FromInt(0));
-  set_instanceof_cache_function(Smi::FromInt(0));
+  set_instanceof_cache_map(Smi::kZero);
+  set_instanceof_cache_function(Smi::kZero);
 }
 
 
@@ -793,27 +777,27 @@
 }
 
 void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
-  DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
+  DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::kZero);
   set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
 }
 
 void Heap::SetConstructStubDeoptPCOffset(int pc_offset) {
-  DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
+  DCHECK(construct_stub_deopt_pc_offset() == Smi::kZero);
   set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
 }
 
 void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
-  DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
+  DCHECK(getter_stub_deopt_pc_offset() == Smi::kZero);
   set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
 }
 
 void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
-  DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
+  DCHECK(setter_stub_deopt_pc_offset() == Smi::kZero);
   set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
 }
 
 void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
-  DCHECK(interpreter_entry_return_pc_offset() == Smi::FromInt(0));
+  DCHECK(interpreter_entry_return_pc_offset() == Smi::kZero);
   set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
 }
 
@@ -828,6 +812,16 @@
   set_serialized_templates(templates);
 }
 
+void Heap::CreateObjectStats() {
+  if (V8_LIKELY(FLAG_gc_stats == 0)) return;
+  if (!live_object_stats_) {
+    live_object_stats_ = new ObjectStats(this);
+  }
+  if (!dead_object_stats_) {
+    dead_object_stats_ = new ObjectStats(this);
+  }
+}
+
 AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
     : heap_(isolate->heap()) {
   heap_->always_allocate_scope_count_.Increment(1);
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index 54b8589..2059dae 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -156,8 +156,8 @@
       strong_roots_list_(NULL),
       heap_iterator_depth_(0),
       embedder_heap_tracer_(nullptr),
-      embedder_reference_reporter_(new TracePossibleWrapperReporter(this)),
-      force_oom_(false) {
+      force_oom_(false),
+      delay_sweeper_tasks_for_testing_(false) {
 // Allow build-time customization of the max semispace size. Building
 // V8 with snapshots and a non-default max semispace size is much
 // easier if you can define it as part of the build environment.
@@ -170,23 +170,22 @@
 
   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
   set_native_contexts_list(NULL);
-  set_allocation_sites_list(Smi::FromInt(0));
-  set_encountered_weak_collections(Smi::FromInt(0));
-  set_encountered_weak_cells(Smi::FromInt(0));
-  set_encountered_transition_arrays(Smi::FromInt(0));
+  set_allocation_sites_list(Smi::kZero);
+  set_encountered_weak_collections(Smi::kZero);
+  set_encountered_weak_cells(Smi::kZero);
+  set_encountered_transition_arrays(Smi::kZero);
   // Put a dummy entry in the remembered pages so we can find the list the
   // minidump even if there are no real unmapped pages.
   RememberUnmappedPage(NULL, false);
 }
 
-
-intptr_t Heap::Capacity() {
+size_t Heap::Capacity() {
   if (!HasBeenSetUp()) return 0;
 
   return new_space_->Capacity() + OldGenerationCapacity();
 }
 
-intptr_t Heap::OldGenerationCapacity() {
+size_t Heap::OldGenerationCapacity() {
   if (!HasBeenSetUp()) return 0;
 
   return old_space_->Capacity() + code_space_->Capacity() +
@@ -233,11 +232,10 @@
   }
 }
 
-
-intptr_t Heap::Available() {
+size_t Heap::Available() {
   if (!HasBeenSetUp()) return 0;
 
-  intptr_t total = 0;
+  size_t total = 0;
   AllSpaces spaces(this);
   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
     total += space->Available();
@@ -266,6 +264,12 @@
     return MARK_COMPACTOR;
   }
 
+  if (incremental_marking()->NeedsFinalization() &&
+      AllocationLimitOvershotByLargeMargin()) {
+    *reason = "Incremental marking needs finalization";
+    return MARK_COMPACTOR;
+  }
+
   // Is there enough space left in OLD to guarantee that a scavenge can
   // succeed?
   //
@@ -275,8 +279,7 @@
   // and does not count available bytes already in the old space or code
   // space.  Undercounting is safe---we may get an unrequested full GC when
   // a scavenge would have succeeded.
-  if (static_cast<intptr_t>(memory_allocator()->MaxAvailable()) <=
-      new_space_->Size()) {
+  if (memory_allocator()->MaxAvailable() <= new_space_->Size()) {
     isolate_->counters()
         ->gc_compactor_caused_by_oldspace_exhaustion()
         ->Increment();
@@ -286,7 +289,7 @@
 
   // Default
   *reason = NULL;
-  return SCAVENGER;
+  return YoungGenerationCollector();
 }
 
 
@@ -316,55 +319,55 @@
 
 void Heap::PrintShortHeapStatistics() {
   if (!FLAG_trace_gc_verbose) return;
-  PrintIsolate(isolate_,
-               "Memory allocator,   used: %6zu KB,"
-               " available: %6zu KB\n",
+  PrintIsolate(isolate_, "Memory allocator,   used: %6" PRIuS
+                         " KB,"
+                         " available: %6" PRIuS " KB\n",
                memory_allocator()->Size() / KB,
                memory_allocator()->Available() / KB);
-  PrintIsolate(isolate_, "New space,          used: %6" V8PRIdPTR
+  PrintIsolate(isolate_, "New space,          used: %6" PRIuS
                          " KB"
-                         ", available: %6" V8PRIdPTR
+                         ", available: %6" PRIuS
                          " KB"
-                         ", committed: %6zu KB\n",
+                         ", committed: %6" PRIuS " KB\n",
                new_space_->Size() / KB, new_space_->Available() / KB,
                new_space_->CommittedMemory() / KB);
-  PrintIsolate(isolate_, "Old space,          used: %6" V8PRIdPTR
+  PrintIsolate(isolate_, "Old space,          used: %6" PRIuS
                          " KB"
-                         ", available: %6" V8PRIdPTR
+                         ", available: %6" PRIuS
                          " KB"
-                         ", committed: %6zu KB\n",
+                         ", committed: %6" PRIuS " KB\n",
                old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
                old_space_->CommittedMemory() / KB);
-  PrintIsolate(isolate_, "Code space,         used: %6" V8PRIdPTR
+  PrintIsolate(isolate_, "Code space,         used: %6" PRIuS
                          " KB"
-                         ", available: %6" V8PRIdPTR
+                         ", available: %6" PRIuS
                          " KB"
-                         ", committed: %6zu KB\n",
+                         ", committed: %6" PRIuS "KB\n",
                code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
                code_space_->CommittedMemory() / KB);
-  PrintIsolate(isolate_, "Map space,          used: %6" V8PRIdPTR
+  PrintIsolate(isolate_, "Map space,          used: %6" PRIuS
                          " KB"
-                         ", available: %6" V8PRIdPTR
+                         ", available: %6" PRIuS
                          " KB"
-                         ", committed: %6zu KB\n",
+                         ", committed: %6" PRIuS " KB\n",
                map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
                map_space_->CommittedMemory() / KB);
-  PrintIsolate(isolate_, "Large object space, used: %6" V8PRIdPTR
+  PrintIsolate(isolate_, "Large object space, used: %6" PRIuS
                          " KB"
-                         ", available: %6" V8PRIdPTR
+                         ", available: %6" PRIuS
                          " KB"
-                         ", committed: %6zu KB\n",
+                         ", committed: %6" PRIuS " KB\n",
                lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
                lo_space_->CommittedMemory() / KB);
-  PrintIsolate(isolate_, "All spaces,         used: %6" V8PRIdPTR
+  PrintIsolate(isolate_, "All spaces,         used: %6" PRIuS
                          " KB"
-                         ", available: %6" V8PRIdPTR
+                         ", available: %6" PRIuS
                          " KB"
-                         ", committed: %6zu KB\n",
+                         ", committed: %6" PRIuS "KB\n",
                this->SizeOfObjects() / KB, this->Available() / KB,
                this->CommittedMemory() / KB);
-  PrintIsolate(isolate_, "External memory reported: %6" V8PRIdPTR " KB\n",
-               static_cast<intptr_t>(external_memory_ / KB));
+  PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
+               external_memory_ / KB);
   PrintIsolate(isolate_, "Total time spent in GC  : %.1f ms\n",
                total_gc_time_ms_);
 }
@@ -439,12 +442,11 @@
   }
   CheckNewSpaceExpansionCriteria();
   UpdateNewSpaceAllocationCounter();
-  store_buffer()->MoveEntriesToRememberedSet();
+  store_buffer()->MoveAllEntriesToRememberedSet();
 }
 
-
-intptr_t Heap::SizeOfObjects() {
-  intptr_t total = 0;
+size_t Heap::SizeOfObjects() {
+  size_t total = 0;
   AllSpaces spaces(this);
   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
     total += space->SizeOfObjects();
@@ -742,7 +744,7 @@
   }
   // We must not compact the weak fixed list here, as we may be in the middle
   // of writing to it, when the GC triggered. Instead, we reset the root value.
-  set_weak_stack_trace_list(Smi::FromInt(0));
+  set_weak_stack_trace_list(Smi::kZero);
 }
 
 
@@ -822,7 +824,7 @@
 
 
 HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
-  if (collector == SCAVENGER) {
+  if (IsYoungGenerationCollector(collector)) {
     return isolate_->counters()->gc_scavenger();
   } else {
     if (!incremental_marking()->IsStopped()) {
@@ -862,7 +864,8 @@
   if (isolate()->concurrent_recompilation_enabled()) {
     // The optimizing compiler may be unnecessarily holding on to memory.
     DisallowHeapAllocation no_recursive_gc;
-    isolate()->optimizing_compile_dispatcher()->Flush();
+    isolate()->optimizing_compile_dispatcher()->Flush(
+        OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
   }
   isolate()->ClearSerializerData();
   set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
@@ -952,17 +955,20 @@
 
   EnsureFillerObjectAtTop();
 
-  if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
+  if (IsYoungGenerationCollector(collector) &&
+      !incremental_marking()->IsStopped()) {
     if (FLAG_trace_incremental_marking) {
       isolate()->PrintWithTimestamp(
           "[IncrementalMarking] Scavenge during marking.\n");
     }
   }
 
-  if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() &&
-      !ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
-      !incremental_marking()->should_hurry() && FLAG_incremental_marking &&
-      OldGenerationSpaceAvailable() <= 0) {
+  if (collector == MARK_COMPACTOR && FLAG_incremental_marking &&
+      !ShouldFinalizeIncrementalMarking() && !ShouldAbortIncrementalMarking() &&
+      !incremental_marking()->IsStopped() &&
+      !incremental_marking()->should_hurry() &&
+      !incremental_marking()->NeedsFinalization() &&
+      !IsCloseToOutOfMemory(new_space_->Capacity())) {
     if (!incremental_marking()->IsComplete() &&
         !mark_compact_collector()->marking_deque()->IsEmpty() &&
         !FLAG_gc_global) {
@@ -970,13 +976,13 @@
         isolate()->PrintWithTimestamp(
             "[IncrementalMarking] Delaying MarkSweep.\n");
       }
-      collector = SCAVENGER;
+      collector = YoungGenerationCollector();
       collector_reason = "incremental marking delaying mark-sweep";
     }
   }
 
   bool next_gc_likely_to_collect_more = false;
-  intptr_t committed_memory_before = 0;
+  size_t committed_memory_before = 0;
 
   if (collector == MARK_COMPACTOR) {
     committed_memory_before = CommittedOldGenerationMemory();
@@ -1003,8 +1009,8 @@
     }
 
     if (collector == MARK_COMPACTOR) {
-      intptr_t committed_memory_after = CommittedOldGenerationMemory();
-      intptr_t used_memory_after = PromotedSpaceSizeOfObjects();
+      size_t committed_memory_after = CommittedOldGenerationMemory();
+      size_t used_memory_after = PromotedSpaceSizeOfObjects();
       MemoryReducer::Event event;
       event.type = MemoryReducer::kMarkCompact;
       event.time_ms = MonotonicallyIncreasingTimeInMs();
@@ -1013,7 +1019,7 @@
       // - there is high fragmentation,
       // - there are live detached contexts.
       event.next_gc_likely_to_collect_more =
-          (committed_memory_before - committed_memory_after) > MB ||
+          (committed_memory_before > committed_memory_after + MB) ||
           HasHighFragmentation(used_memory_after, committed_memory_after) ||
           (detached_contexts()->length() > 0);
       if (deserialization_complete_) {
@@ -1035,7 +1041,8 @@
   // generator needs incremental marking to stay off after it aborted.
   // We do this only for scavenger to avoid a loop where mark-compact
   // causes another mark-compact.
-  if (collector == SCAVENGER && !ShouldAbortIncrementalMarking()) {
+  if (IsYoungGenerationCollector(collector) &&
+      !ShouldAbortIncrementalMarking()) {
     StartIncrementalMarkingIfAllocationLimitIsReached(kNoGCFlags,
                                                       kNoGCCallbackFlags);
   }
@@ -1055,7 +1062,8 @@
   }
   if (isolate()->concurrent_recompilation_enabled()) {
     // Flush the queued recompilation tasks.
-    isolate()->optimizing_compile_dispatcher()->Flush();
+    isolate()->optimizing_compile_dispatcher()->Flush(
+        OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
   }
   AgeInlineCaches();
   number_of_disposed_maps_ = retained_maps()->Length();
@@ -1171,8 +1179,9 @@
         for (auto& chunk : *reservation) {
           AllocationResult allocation;
           int size = chunk.size;
-          DCHECK_LE(size, MemoryAllocator::PageAreaSize(
-                              static_cast<AllocationSpace>(space)));
+          DCHECK_LE(static_cast<size_t>(size),
+                    MemoryAllocator::PageAreaSize(
+                        static_cast<AllocationSpace>(space)));
           if (space == NEW_SPACE) {
             allocation = new_space()->AllocateRawUnaligned(size);
           } else {
@@ -1274,7 +1283,7 @@
     GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
   int freed_global_handles = 0;
 
-  if (collector != SCAVENGER) {
+  if (!IsYoungGenerationCollector(collector)) {
     PROFILE(isolate_, CodeMovingGCEvent());
   }
 
@@ -1291,9 +1300,7 @@
     GCCallbacksScope scope(this);
     if (scope.CheckReenter()) {
       AllowHeapAllocation allow_allocation;
-      TRACE_GC(tracer(), collector == MARK_COMPACTOR
-                             ? GCTracer::Scope::MC_EXTERNAL_PROLOGUE
-                             : GCTracer::Scope::SCAVENGER_EXTERNAL_PROLOGUE);
+      TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_PROLOGUE);
       VMState<EXTERNAL> state(isolate_);
       HandleScope handle_scope(isolate_);
       CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
@@ -1307,18 +1314,25 @@
   {
     Heap::PretenuringScope pretenuring_scope(this);
 
-    if (collector == MARK_COMPACTOR) {
-      UpdateOldGenerationAllocationCounter();
-      // Perform mark-sweep with optional compaction.
-      MarkCompact();
-      old_generation_size_configured_ = true;
-      // This should be updated before PostGarbageCollectionProcessing, which
-      // can cause another GC. Take into account the objects promoted during GC.
-      old_generation_allocation_counter_at_last_gc_ +=
-          static_cast<size_t>(promoted_objects_size_);
-      old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
-    } else {
-      Scavenge();
+    switch (collector) {
+      case MARK_COMPACTOR:
+        UpdateOldGenerationAllocationCounter();
+        // Perform mark-sweep with optional compaction.
+        MarkCompact();
+        old_generation_size_configured_ = true;
+        // This should be updated before PostGarbageCollectionProcessing, which
+        // can cause another GC. Take into account the objects promoted during
+        // GC.
+        old_generation_allocation_counter_at_last_gc_ +=
+            static_cast<size_t>(promoted_objects_size_);
+        old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
+        break;
+      case MINOR_MARK_COMPACTOR:
+        MinorMarkCompact();
+        break;
+      case SCAVENGER:
+        Scavenge();
+        break;
     }
 
     ProcessPretenuringFeedback();
@@ -1347,7 +1361,7 @@
   double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
   double mutator_speed =
       tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
-  intptr_t old_gen_size = PromotedSpaceSizeOfObjects();
+  size_t old_gen_size = PromotedSpaceSizeOfObjects();
   if (collector == MARK_COMPACTOR) {
     // Register the amount of external allocated memory.
     external_memory_at_last_mark_compact_ = external_memory_;
@@ -1362,9 +1376,7 @@
     GCCallbacksScope scope(this);
     if (scope.CheckReenter()) {
       AllowHeapAllocation allow_allocation;
-      TRACE_GC(tracer(), collector == MARK_COMPACTOR
-                             ? GCTracer::Scope::MC_EXTERNAL_EPILOGUE
-                             : GCTracer::Scope::SCAVENGER_EXTERNAL_EPILOGUE);
+      TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_EPILOGUE);
       VMState<EXTERNAL> state(isolate_);
       HandleScope handle_scope(isolate_);
       CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
@@ -1443,8 +1455,10 @@
   }
 }
 
+void Heap::MinorMarkCompact() { UNREACHABLE(); }
 
 void Heap::MarkCompactEpilogue() {
+  TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
   gc_state_ = NOT_IN_GC;
 
   isolate_->counters()->objs_since_last_full()->Set(0);
@@ -1454,18 +1468,12 @@
   PreprocessStackTraces();
   DCHECK(incremental_marking()->IsStopped());
 
-  // We finished a marking cycle. We can uncommit the marking deque until
-  // we start marking again.
-  mark_compact_collector()->marking_deque()->Uninitialize();
-  mark_compact_collector()->EnsureMarkingDequeIsCommitted(
-      MarkCompactCollector::kMinMarkingDequeSize);
+  mark_compact_collector()->marking_deque()->StopUsing();
 }
 
 
 void Heap::MarkCompactPrologue() {
-  // At any old GC clear the keyed lookup cache to enable collection of unused
-  // maps.
-  isolate_->keyed_lookup_cache()->Clear();
+  TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
   isolate_->context_slot_cache()->Clear();
   isolate_->descriptor_lookup_cache()->Clear();
   RegExpResultsCache::Clear(string_split_cache());
@@ -1603,7 +1611,7 @@
   LOG(isolate_, ResourceEvent("scavenge", "begin"));
 
   // Used for updating survived_since_last_expansion_ at function end.
-  intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
+  size_t survived_watermark = PromotedSpaceSizeOfObjects();
 
   scavenge_collector_->SelectScavengingVisitorsTable();
 
@@ -1639,13 +1647,10 @@
   Address new_space_front = new_space_->ToSpaceStart();
   promotion_queue_.Initialize();
 
-  PromotionMode promotion_mode = CurrentPromotionMode();
   ScavengeVisitor scavenge_visitor(this);
 
-  if (FLAG_scavenge_reclaim_unmodified_objects) {
-    isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
-        &IsUnmodifiedHeapObject);
-  }
+  isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
+      &IsUnmodifiedHeapObject);
 
   {
     // Copy roots.
@@ -1677,8 +1682,6 @@
     TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
     // Copy objects reachable from the encountered weak collections list.
     scavenge_visitor.VisitPointer(&encountered_weak_collections_);
-    // Copy objects reachable from the encountered weak cells.
-    scavenge_visitor.VisitPointer(&encountered_weak_cells_);
   }
 
   {
@@ -1692,36 +1695,15 @@
 
   {
     TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
-    new_space_front =
-        DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
+    new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
   }
 
-  if (FLAG_scavenge_reclaim_unmodified_objects) {
-    isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
-        &IsUnscavengedHeapObject);
+  isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
+      &IsUnscavengedHeapObject);
 
-    isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
-        &scavenge_visitor);
-    new_space_front =
-        DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
-  } else {
-    TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OBJECT_GROUPS);
-    while (isolate()->global_handles()->IterateObjectGroups(
-        &scavenge_visitor, &IsUnscavengedHeapObject)) {
-      new_space_front =
-          DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
-    }
-    isolate()->global_handles()->RemoveObjectGroups();
-    isolate()->global_handles()->RemoveImplicitRefGroups();
-
-    isolate()->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
-        &IsUnscavengedHeapObject);
-
-    isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
-        &scavenge_visitor);
-    new_space_front =
-        DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
-  }
+  isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
+      &scavenge_visitor);
+  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
 
   UpdateNewSpaceReferencesInExternalStringTable(
       &UpdateNewSpaceReferenceInExternalStringTableEntry);
@@ -1741,9 +1723,9 @@
   ArrayBufferTracker::FreeDeadInNewSpace(this);
 
   // Update how much has survived scavenge.
-  IncrementYoungSurvivorsCounter(
-      static_cast<int>((PromotedSpaceSizeOfObjects() - survived_watermark) +
-                       new_space_->Size()));
+  DCHECK_GE(PromotedSpaceSizeOfObjects(), survived_watermark);
+  IncrementYoungSurvivorsCounter(PromotedSpaceSizeOfObjects() +
+                                 new_space_->Size() - survived_watermark);
 
   LOG(isolate_, ResourceEvent("scavenge", "end"));
 
@@ -1904,8 +1886,7 @@
 }
 
 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
-                         Address new_space_front,
-                         PromotionMode promotion_mode) {
+                         Address new_space_front) {
   do {
     SemiSpace::AssertValidRange(new_space_front, new_space_->top());
     // The addresses new_space_front and new_space_.top() define a
@@ -1914,14 +1895,8 @@
     while (new_space_front != new_space_->top()) {
       if (!Page::IsAlignedToPageSize(new_space_front)) {
         HeapObject* object = HeapObject::FromAddress(new_space_front);
-        if (promotion_mode == PROMOTE_MARKED) {
-          new_space_front += StaticScavengeVisitor<PROMOTE_MARKED>::IterateBody(
-              object->map(), object);
-        } else {
-          new_space_front +=
-              StaticScavengeVisitor<DEFAULT_PROMOTION>::IterateBody(
-                  object->map(), object);
-        }
+        new_space_front +=
+            StaticScavengeVisitor::IterateBody(object->map(), object);
       } else {
         new_space_front = Page::FromAllocationAreaAddress(new_space_front)
                               ->next_page()
@@ -1943,8 +1918,8 @@
         // to new space.
         DCHECK(!target->IsMap());
 
-        IteratePromotedObject(target, static_cast<int>(size), was_marked_black,
-                              &Scavenger::ScavengeObject);
+        IterateAndScavengePromotedObject(target, static_cast<int>(size),
+                                         was_marked_black);
       }
     }
 
@@ -2038,7 +2013,7 @@
   if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
     old_generation_allocation_limit_ =
         Max(MinimumAllocationLimitGrowingStep(),
-            static_cast<intptr_t>(
+            static_cast<size_t>(
                 static_cast<double>(old_generation_allocation_limit_) *
                 (tracer()->AverageSurvivalRatio() / 100)));
   }
@@ -2073,7 +2048,7 @@
                    Map::OwnsDescriptors::encode(true) |
                    Map::ConstructionCounter::encode(Map::kNoSlackTracking);
   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
-  reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::FromInt(0));
+  reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::kZero);
   return result;
 }
 
@@ -2097,8 +2072,8 @@
   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
                           SKIP_WRITE_BARRIER);
-  map->set_weak_cell_cache(Smi::FromInt(0));
-  map->set_raw_transitions(Smi::FromInt(0));
+  map->set_weak_cell_cache(Smi::kZero);
+  map->set_raw_transitions(Smi::kZero);
   map->set_unused_property_fields(0);
   map->set_instance_descriptors(empty_descriptor_array());
   if (FLAG_unbox_double_fields) {
@@ -2170,7 +2145,7 @@
 void FinalizePartialMap(Heap* heap, Map* map) {
   map->set_code_cache(heap->empty_fixed_array());
   map->set_dependent_code(DependentCode::cast(heap->empty_fixed_array()));
-  map->set_raw_transitions(Smi::FromInt(0));
+  map->set_raw_transitions(Smi::kZero);
   map->set_instance_descriptors(heap->empty_descriptor_array());
   if (FLAG_unbox_double_fields) {
     map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
@@ -2280,7 +2255,6 @@
     DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
 
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
-    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info_entry)
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
     ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
                            Context::NUMBER_FUNCTION_INDEX)
@@ -2505,7 +2479,7 @@
   PropertyCell* cell = PropertyCell::cast(result);
   cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
                            SKIP_WRITE_BARRIER);
-  cell->set_property_details(PropertyDetails(Smi::FromInt(0)));
+  cell->set_property_details(PropertyDetails(Smi::kZero));
   cell->set_value(the_hole_value());
   return result;
 }
@@ -2552,16 +2526,6 @@
 
 void Heap::CreateApiObjects() {
   HandleScope scope(isolate());
-  Factory* factory = isolate()->factory();
-  Handle<Map> new_neander_map =
-      factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
-
-  // Don't use Smi-only elements optimizations for objects with the neander
-  // map. There are too many cases where element values are set directly with a
-  // bottleneck to trap the Smi-only -> fast elements transition, and there
-  // appears to be no benefit for optimize this case.
-  new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
-  set_neander_map(*new_neander_map);
   set_message_listeners(*TemplateList::New(isolate(), 2));
 }
 
@@ -2635,8 +2599,7 @@
 
   // Initialize the null_value.
   Oddball::Initialize(isolate(), factory->null_value(), "null",
-                      handle(Smi::FromInt(0), isolate()), "object",
-                      Oddball::kNull);
+                      handle(Smi::kZero, isolate()), "object", Oddball::kNull);
 
   // Initialize the_hole_value.
   Oddball::Initialize(isolate(), factory->the_hole_value(), "hole",
@@ -2650,7 +2613,7 @@
 
   // Initialize the false_value.
   Oddball::Initialize(isolate(), factory->false_value(), "false",
-                      handle(Smi::FromInt(0), isolate()), "boolean",
+                      handle(Smi::kZero, isolate()), "boolean",
                       Oddball::kFalse);
 
   set_uninitialized_value(
@@ -2696,9 +2659,9 @@
   // expanding the dictionary during bootstrapping.
   set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
 
-  set_instanceof_cache_function(Smi::FromInt(0));
-  set_instanceof_cache_map(Smi::FromInt(0));
-  set_instanceof_cache_answer(Smi::FromInt(0));
+  set_instanceof_cache_function(Smi::kZero);
+  set_instanceof_cache_map(Smi::kZero);
+  set_instanceof_cache_answer(Smi::kZero);
 
   {
     HandleScope scope(isolate());
@@ -2767,7 +2730,7 @@
   set_undefined_cell(*factory->NewCell(factory->undefined_value()));
 
   // The symbol registry is initialized lazily.
-  set_symbol_registry(Smi::FromInt(0));
+  set_symbol_registry(Smi::kZero);
 
   // Microtask queue uses the empty fixed array as a sentinel for "empty".
   // Number of queued microtasks stored in Isolate::pending_microtask_count().
@@ -2815,7 +2778,7 @@
     empty_type_feedback_vector->set(TypeFeedbackVector::kMetadataIndex,
                                     empty_fixed_array());
     empty_type_feedback_vector->set(TypeFeedbackVector::kInvocationCountIndex,
-                                    Smi::FromInt(0));
+                                    Smi::kZero);
     set_empty_type_feedback_vector(*empty_type_feedback_vector);
 
     // We use a canonical empty LiteralsArray for all functions that neither
@@ -2838,14 +2801,6 @@
     Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
     set_empty_weak_cell(*cell);
     cell->clear();
-
-    Handle<FixedArray> cleared_optimized_code_map =
-        factory->NewFixedArray(SharedFunctionInfo::kEntriesStart, TENURED);
-    cleared_optimized_code_map->set(SharedFunctionInfo::kSharedCodeIndex,
-                                    *cell);
-    STATIC_ASSERT(SharedFunctionInfo::kEntriesStart == 1 &&
-                  SharedFunctionInfo::kSharedCodeIndex == 0);
-    set_cleared_optimized_code_map(*cleared_optimized_code_map);
   }
 
   set_detached_contexts(empty_fixed_array());
@@ -2859,7 +2814,7 @@
       ArrayList::cast(*(factory->NewFixedArray(16, TENURED))));
   weak_new_space_object_to_code_list()->SetLength(0);
 
-  set_script_list(Smi::FromInt(0));
+  set_script_list(Smi::kZero);
 
   Handle<SeededNumberDictionary> slow_element_dictionary =
       SeededNumberDictionary::New(isolate(), 0, TENURED);
@@ -2870,7 +2825,7 @@
 
   // Handling of script id generation is in Heap::NextScriptId().
   set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
-  set_next_template_serial_number(Smi::FromInt(0));
+  set_next_template_serial_number(Smi::kZero);
 
   // Allocate the empty script.
   Handle<Script> script = factory->NewScript(factory->empty_string());
@@ -2878,7 +2833,7 @@
   set_empty_script(*script);
 
   Handle<PropertyCell> cell = factory->NewPropertyCell();
-  cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
+  cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
   set_array_protector(*cell);
 
   cell = factory->NewPropertyCell();
@@ -2886,29 +2841,34 @@
   set_empty_property_cell(*cell);
 
   cell = factory->NewPropertyCell();
-  cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
+  cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
   set_has_instance_protector(*cell);
 
   Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
-      handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate()));
+      handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
   set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
 
   Handle<Cell> species_cell = factory->NewCell(
-      handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate()));
+      handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
   set_species_protector(*species_cell);
 
   cell = factory->NewPropertyCell();
-  cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
+  cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
   set_string_length_protector(*cell);
 
+  Handle<Cell> fast_array_iteration_cell = factory->NewCell(
+      handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
+  set_fast_array_iteration_protector(*fast_array_iteration_cell);
+
+  Handle<Cell> array_iterator_cell = factory->NewCell(
+      handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
+  set_array_iterator_protector(*array_iterator_cell);
+
   set_serialized_templates(empty_fixed_array());
 
-  set_weak_stack_trace_list(Smi::FromInt(0));
+  set_weak_stack_trace_list(Smi::kZero);
 
-  set_noscript_shared_function_infos(Smi::FromInt(0));
-
-  // Initialize keyed lookup cache.
-  isolate_->keyed_lookup_cache()->Clear();
+  set_noscript_shared_function_infos(Smi::kZero);
 
   // Initialize context slot cache.
   isolate_->context_slot_cache()->Clear();
@@ -2963,12 +2923,13 @@
   // Compute the size of the number string cache based on the max newspace size.
   // The number string cache has a minimum size based on twice the initial cache
   // size to ensure that it is bigger after being made 'full size'.
-  int number_string_cache_size = max_semi_space_size_ / 512;
-  number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
-                                 Min(0x4000, number_string_cache_size));
+  size_t number_string_cache_size = max_semi_space_size_ / 512;
+  number_string_cache_size =
+      Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
+          Min<size_t>(0x4000u, number_string_cache_size));
   // There is a string and a number per entry so the length is twice the number
   // of entries.
-  return number_string_cache_size * 2;
+  return static_cast<int>(number_string_cache_size * 2);
 }
 
 
@@ -3307,7 +3268,7 @@
 
   result->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
   FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(result);
-  elements->set_base_pointer(Smi::FromInt(0), SKIP_WRITE_BARRIER);
+  elements->set_base_pointer(Smi::kZero, SKIP_WRITE_BARRIER);
   elements->set_external_pointer(external_pointer, SKIP_WRITE_BARRIER);
   elements->set_length(length);
   return elements;
@@ -3391,7 +3352,7 @@
   DCHECK(!memory_allocator()->code_range()->valid() ||
          memory_allocator()->code_range()->contains(code->address()) ||
          object_size <= code_space()->AreaSize());
-  code->set_gc_metadata(Smi::FromInt(0));
+  code->set_gc_metadata(Smi::kZero);
   code->set_ic_age(global_ic_age_);
   return code;
 }
@@ -3488,7 +3449,7 @@
   // TODO(1240798): Initialize the object's body using valid initial values
   // according to the object's initial map.  For example, if the map's
   // instance type is JS_ARRAY_TYPE, the length field should be initialized
-  // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
+  // to a number (e.g. Smi::kZero) and the elements initialized to a
   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
   // verification code has to cope with (temporarily) invalid objects.  See
   // for example, JSArray::JSArrayVerify).
@@ -4035,13 +3996,7 @@
   result->set_map_no_write_barrier(symbol_map());
 
   // Generate a random hash value.
-  int hash;
-  int attempts = 0;
-  do {
-    hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
-    attempts++;
-  } while (hash == 0 && attempts < 30);
-  if (hash == 0) hash = 1;  // never return 0
+  int hash = isolate()->GenerateIdentityHash(Name::kHashBitMask);
 
   Symbol::cast(result)
       ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
@@ -4164,16 +4119,16 @@
 
 
 bool Heap::HasHighFragmentation() {
-  intptr_t used = PromotedSpaceSizeOfObjects();
-  intptr_t committed = CommittedOldGenerationMemory();
+  size_t used = PromotedSpaceSizeOfObjects();
+  size_t committed = CommittedOldGenerationMemory();
   return HasHighFragmentation(used, committed);
 }
 
-
-bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) {
-  const intptr_t kSlack = 16 * MB;
+bool Heap::HasHighFragmentation(size_t used, size_t committed) {
+  const size_t kSlack = 16 * MB;
   // Fragmentation is high if committed > 2 * used + kSlack.
   // Rewrite the exression to avoid overflow.
+  DCHECK_GE(committed, used);
   return committed - used > used + kSlack;
 }
 
@@ -4228,8 +4183,7 @@
        (!incremental_marking()->finalize_marking_completed() &&
         MarkingDequesAreEmpty()))) {
     FinalizeIncrementalMarking(gc_reason);
-  } else if (incremental_marking()->IsComplete() ||
-             (mark_compact_collector()->marking_deque()->IsEmpty())) {
+  } else if (incremental_marking()->IsComplete() || MarkingDequesAreEmpty()) {
     CollectAllGarbage(current_gc_flags_, gc_reason);
   }
 }
@@ -4456,7 +4410,8 @@
     if (isolate()->concurrent_recompilation_enabled()) {
       // The optimizing compiler may be unnecessarily holding on to memory.
       DisallowHeapAllocation no_recursive_gc;
-      isolate()->optimizing_compile_dispatcher()->Flush();
+      isolate()->optimizing_compile_dispatcher()->Flush(
+          OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
     }
   }
   if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
@@ -4783,51 +4738,44 @@
   }
 }
 
-void Heap::IteratePromotedObjectPointers(HeapObject* object, Address start,
-                                         Address end, bool record_slots,
-                                         ObjectSlotCallback callback) {
-  Address slot_address = start;
-  Page* page = Page::FromAddress(start);
-
-  while (slot_address < end) {
-    Object** slot = reinterpret_cast<Object**>(slot_address);
-    Object* target = *slot;
-    if (target->IsHeapObject()) {
-      if (Heap::InFromSpace(target)) {
-        callback(reinterpret_cast<HeapObject**>(slot),
-                 HeapObject::cast(target));
-        Object* new_target = *slot;
-        if (InNewSpace(new_target)) {
-          SLOW_DCHECK(Heap::InToSpace(new_target));
-          SLOW_DCHECK(new_target->IsHeapObject());
-          RememberedSet<OLD_TO_NEW>::Insert(page, slot_address);
-        }
-        SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
-      } else if (record_slots &&
-                 MarkCompactCollector::IsOnEvacuationCandidate(target)) {
-        mark_compact_collector()->RecordSlot(object, slot, target);
-      }
-    }
-    slot_address += kPointerSize;
-  }
-}
-
-class IteratePromotedObjectsVisitor final : public ObjectVisitor {
+class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
  public:
-  IteratePromotedObjectsVisitor(Heap* heap, HeapObject* target,
-                                bool record_slots, ObjectSlotCallback callback)
-      : heap_(heap),
-        target_(target),
-        record_slots_(record_slots),
-        callback_(callback) {}
+  IterateAndScavengePromotedObjectsVisitor(Heap* heap, HeapObject* target,
+                                           bool record_slots)
+      : heap_(heap), target_(target), record_slots_(record_slots) {}
 
-  V8_INLINE void VisitPointers(Object** start, Object** end) override {
-    heap_->IteratePromotedObjectPointers(
-        target_, reinterpret_cast<Address>(start),
-        reinterpret_cast<Address>(end), record_slots_, callback_);
+  inline void VisitPointers(Object** start, Object** end) override {
+    Address slot_address = reinterpret_cast<Address>(start);
+    Page* page = Page::FromAddress(slot_address);
+
+    while (slot_address < reinterpret_cast<Address>(end)) {
+      Object** slot = reinterpret_cast<Object**>(slot_address);
+      Object* target = *slot;
+
+      if (target->IsHeapObject()) {
+        if (heap_->InFromSpace(target)) {
+          Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(slot),
+                                    HeapObject::cast(target));
+          target = *slot;
+          if (heap_->InNewSpace(target)) {
+            SLOW_DCHECK(heap_->InToSpace(target));
+            SLOW_DCHECK(target->IsHeapObject());
+            RememberedSet<OLD_TO_NEW>::Insert(page, slot_address);
+          }
+          SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
+              HeapObject::cast(target)));
+        } else if (record_slots_ &&
+                   MarkCompactCollector::IsOnEvacuationCandidate(
+                       HeapObject::cast(target))) {
+          heap_->mark_compact_collector()->RecordSlot(target_, slot, target);
+        }
+      }
+
+      slot_address += kPointerSize;
+    }
   }
 
-  V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
+  inline void VisitCodeEntry(Address code_entry_slot) override {
     // Black allocation requires us to process objects referenced by
     // promoted objects.
     if (heap_->incremental_marking()->black_allocation()) {
@@ -4840,12 +4788,10 @@
   Heap* heap_;
   HeapObject* target_;
   bool record_slots_;
-  ObjectSlotCallback callback_;
 };
 
-void Heap::IteratePromotedObject(HeapObject* target, int size,
-                                 bool was_marked_black,
-                                 ObjectSlotCallback callback) {
+void Heap::IterateAndScavengePromotedObject(HeapObject* target, int size,
+                                            bool was_marked_black) {
   // We are not collecting slots on new space objects during mutation
   // thus we have to scan for pointers to evacuation candidates when we
   // promote objects. But we should not record any slots in non-black
@@ -4858,8 +4804,14 @@
     record_slots = Marking::IsBlack(mark_bit);
   }
 
-  IteratePromotedObjectsVisitor visitor(this, target, record_slots, callback);
-  target->IterateBody(target->map()->instance_type(), size, &visitor);
+  IterateAndScavengePromotedObjectsVisitor visitor(this, target, record_slots);
+  if (target->IsJSFunction()) {
+    // JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for
+    // this links are recorded during processing of weak lists.
+    JSFunction::BodyDescriptorWeakCode::IterateBody(target, size, &visitor);
+  } else {
+    target->IterateBody(target->map()->instance_type(), size, &visitor);
+  }
 
   // When black allocations is on, we have to visit not already marked black
   // objects (in new space) promoted to black pages to keep their references
@@ -5031,31 +4983,31 @@
 // TODO(1236194): Since the heap size is configurable on the command line
 // and through the API, we should gracefully handle the case that the heap
 // size is not big enough to fit all the initial objects.
-bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
-                         int max_executable_size, size_t code_range_size) {
+bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
+                         size_t max_executable_size, size_t code_range_size) {
   if (HasBeenSetUp()) return false;
 
   // Overwrite default configuration.
-  if (max_semi_space_size > 0) {
+  if (max_semi_space_size != 0) {
     max_semi_space_size_ = max_semi_space_size * MB;
   }
-  if (max_old_space_size > 0) {
-    max_old_generation_size_ = static_cast<intptr_t>(max_old_space_size) * MB;
+  if (max_old_space_size != 0) {
+    max_old_generation_size_ = max_old_space_size * MB;
   }
-  if (max_executable_size > 0) {
-    max_executable_size_ = static_cast<intptr_t>(max_executable_size) * MB;
+  if (max_executable_size != 0) {
+    max_executable_size_ = max_executable_size * MB;
   }
 
   // If max space size flags are specified overwrite the configuration.
   if (FLAG_max_semi_space_size > 0) {
-    max_semi_space_size_ = FLAG_max_semi_space_size * MB;
+    max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
   }
   if (FLAG_max_old_space_size > 0) {
     max_old_generation_size_ =
-        static_cast<intptr_t>(FLAG_max_old_space_size) * MB;
+        static_cast<size_t>(FLAG_max_old_space_size) * MB;
   }
   if (FLAG_max_executable_size > 0) {
-    max_executable_size_ = static_cast<intptr_t>(FLAG_max_executable_size) * MB;
+    max_executable_size_ = static_cast<size_t>(FLAG_max_executable_size) * MB;
   }
 
   if (Page::kPageSize > MB) {
@@ -5072,17 +5024,18 @@
 
   // The new space size must be a power of two to support single-bit testing
   // for containment.
-  max_semi_space_size_ =
-      base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
+  max_semi_space_size_ = base::bits::RoundUpToPowerOfTwo32(
+      static_cast<uint32_t>(max_semi_space_size_));
 
   if (FLAG_min_semi_space_size > 0) {
-    int initial_semispace_size = FLAG_min_semi_space_size * MB;
+    size_t initial_semispace_size =
+        static_cast<size_t>(FLAG_min_semi_space_size) * MB;
     if (initial_semispace_size > max_semi_space_size_) {
       initial_semispace_size_ = max_semi_space_size_;
       if (FLAG_trace_gc) {
         PrintIsolate(isolate_,
                      "Min semi-space size cannot be more than the maximum "
-                     "semi-space size of %d MB\n",
+                     "semi-space size of %" PRIuS " MB\n",
                      max_semi_space_size_ / MB);
       }
     } else {
@@ -5100,7 +5053,7 @@
   // The old generation is paged and needs at least one page for each space.
   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
   max_old_generation_size_ =
-      Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
+      Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
           max_old_generation_size_);
 
   // The max executable size must be less than or equal to the max old
@@ -5199,16 +5152,15 @@
   }
 }
 
-
-intptr_t Heap::PromotedSpaceSizeOfObjects() {
+size_t Heap::PromotedSpaceSizeOfObjects() {
   return old_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
          map_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
 }
 
-
-int64_t Heap::PromotedExternalMemorySize() {
+uint64_t Heap::PromotedExternalMemorySize() {
   if (external_memory_ <= external_memory_at_last_mark_compact_) return 0;
-  return external_memory_ - external_memory_at_last_mark_compact_;
+  return static_cast<uint64_t>(external_memory_ -
+                               external_memory_at_last_mark_compact_);
 }
 
 
@@ -5276,29 +5228,29 @@
   return factor;
 }
 
-
-intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
-                                                     intptr_t old_gen_size) {
+size_t Heap::CalculateOldGenerationAllocationLimit(double factor,
+                                                   size_t old_gen_size) {
   CHECK(factor > 1.0);
   CHECK(old_gen_size > 0);
-  intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
-  limit = Max(limit, old_gen_size + MinimumAllocationLimitGrowingStep());
+  uint64_t limit = static_cast<uint64_t>(old_gen_size * factor);
+  limit = Max(limit, static_cast<uint64_t>(old_gen_size) +
+                         MinimumAllocationLimitGrowingStep());
   limit += new_space_->Capacity();
-  intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
-  return Min(limit, halfway_to_the_max);
+  uint64_t halfway_to_the_max =
+      (static_cast<uint64_t>(old_gen_size) + max_old_generation_size_) / 2;
+  return static_cast<size_t>(Min(limit, halfway_to_the_max));
 }
 
-intptr_t Heap::MinimumAllocationLimitGrowingStep() {
-  const double kRegularAllocationLimitGrowingStep = 8;
-  const double kLowMemoryAllocationLimitGrowingStep = 2;
-  intptr_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
+size_t Heap::MinimumAllocationLimitGrowingStep() {
+  const size_t kRegularAllocationLimitGrowingStep = 8;
+  const size_t kLowMemoryAllocationLimitGrowingStep = 2;
+  size_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
   return limit * (ShouldOptimizeForMemoryUsage()
                       ? kLowMemoryAllocationLimitGrowingStep
                       : kRegularAllocationLimitGrowingStep);
 }
 
-void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
-                                           double gc_speed,
+void Heap::SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
                                            double mutator_speed) {
   double factor = HeapGrowingFactor(gc_speed, mutator_speed);
 
@@ -5331,24 +5283,23 @@
       CalculateOldGenerationAllocationLimit(factor, old_gen_size);
 
   if (FLAG_trace_gc_verbose) {
-    isolate_->PrintWithTimestamp("Grow: old size: %" V8PRIdPTR
-                                 " KB, new limit: %" V8PRIdPTR " KB (%.1f)\n",
-                                 old_gen_size / KB,
-                                 old_generation_allocation_limit_ / KB, factor);
+    isolate_->PrintWithTimestamp(
+        "Grow: old size: %" PRIuS " KB, new limit: %" PRIuS " KB (%.1f)\n",
+        old_gen_size / KB, old_generation_allocation_limit_ / KB, factor);
   }
 }
 
-void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
+void Heap::DampenOldGenerationAllocationLimit(size_t old_gen_size,
                                               double gc_speed,
                                               double mutator_speed) {
   double factor = HeapGrowingFactor(gc_speed, mutator_speed);
-  intptr_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
+  size_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
   if (limit < old_generation_allocation_limit_) {
     if (FLAG_trace_gc_verbose) {
       isolate_->PrintWithTimestamp(
-          "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR
+          "Dampen: old size: %" PRIuS " KB, old limit: %" PRIuS
           " KB, "
-          "new limit: %" V8PRIdPTR " KB (%.1f)\n",
+          "new limit: %" PRIuS " KB (%.1f)\n",
           old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB,
           factor);
     }
@@ -5361,12 +5312,16 @@
 // major GC. It happens when the old generation allocation limit is reached and
 // - either we need to optimize for memory usage,
 // - or the incremental marking is not in progress and we cannot start it.
-bool Heap::ShouldExpandOldGenerationOnAllocationFailure() {
+bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
   if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
   // We reached the old generation allocation limit.
 
   if (ShouldOptimizeForMemoryUsage()) return false;
 
+  if (incremental_marking()->NeedsFinalization()) {
+    return !AllocationLimitOvershotByLargeMargin();
+  }
+
   if (incremental_marking()->IsStopped() &&
       IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
     // We cannot start incremental marking.
@@ -5382,7 +5337,8 @@
 // The kHardLimit means that incremental marking should be started immediately.
 Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
   if (!incremental_marking()->CanBeActivated() ||
-      PromotedSpaceSizeOfObjects() < IncrementalMarking::kActivationThreshold) {
+      PromotedSpaceSizeOfObjects() <=
+          IncrementalMarking::kActivationThreshold) {
     // Incremental marking is disabled or it is too early to start.
     return IncrementalMarkingLimit::kNoLimit;
   }
@@ -5392,13 +5348,13 @@
     // start marking immediately.
     return IncrementalMarkingLimit::kHardLimit;
   }
-  intptr_t old_generation_space_available = OldGenerationSpaceAvailable();
+  size_t old_generation_space_available = OldGenerationSpaceAvailable();
   if (old_generation_space_available > new_space_->Capacity()) {
     return IncrementalMarkingLimit::kNoLimit;
   }
   // We are close to the allocation limit.
   // Choose between the hard and the soft limits.
-  if (old_generation_space_available <= 0 || ShouldOptimizeForMemoryUsage()) {
+  if (old_generation_space_available == 0 || ShouldOptimizeForMemoryUsage()) {
     return IncrementalMarkingLimit::kHardLimit;
   }
   return IncrementalMarkingLimit::kSoftLimit;
@@ -5433,8 +5389,7 @@
 
 static void InitializeGCOnce() {
   Scavenger::Initialize();
-  StaticScavengeVisitor<DEFAULT_PROMOTION>::Initialize();
-  StaticScavengeVisitor<PROMOTE_MARKED>::Initialize();
+  StaticScavengeVisitor::Initialize();
   MarkCompactCollector::Initialize();
 }
 
@@ -5517,7 +5472,7 @@
   mark_compact_collector_ = new MarkCompactCollector(this);
   gc_idle_time_handler_ = new GCIdleTimeHandler();
   memory_reducer_ = new MemoryReducer(this);
-  if (FLAG_track_gc_object_stats) {
+  if (V8_UNLIKELY(FLAG_gc_stats)) {
     live_object_stats_ = new ObjectStats(this);
     dead_object_stats_ = new ObjectStats(this);
   }
@@ -5569,8 +5524,8 @@
 }
 
 void Heap::ClearStackLimits() {
-  roots_[kStackLimitRootIndex] = Smi::FromInt(0);
-  roots_[kRealStackLimitRootIndex] = Smi::FromInt(0);
+  roots_[kStackLimitRootIndex] = Smi::kZero;
+  roots_[kRealStackLimitRootIndex] = Smi::kZero;
 }
 
 void Heap::PrintAlloctionsHash() {
@@ -5596,8 +5551,7 @@
 }
 
 void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
-  DCHECK_NOT_NULL(tracer);
-  CHECK_NULL(embedder_heap_tracer_);
+  DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
   embedder_heap_tracer_ = tracer;
 }
 
@@ -5755,9 +5709,6 @@
 
   delete memory_allocator_;
   memory_allocator_ = nullptr;
-
-  delete embedder_reference_reporter_;
-  embedder_reference_reporter_ = nullptr;
 }
 
 
@@ -5950,11 +5901,10 @@
 
 void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
   if (!InNewSpace(object)) {
-    store_buffer()->MoveEntriesToRememberedSet();
     Address slot_addr = reinterpret_cast<Address>(slot);
     Page* page = Page::FromAddress(slot_addr);
     DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
-    RememberedSet<OLD_TO_NEW>::Remove(page, slot_addr);
+    store_buffer()->DeleteEntry(slot_addr);
     RememberedSet<OLD_TO_OLD>::Remove(page, slot_addr);
   }
 }
@@ -5962,10 +5912,10 @@
 void Heap::ClearRecordedSlotRange(Address start, Address end) {
   Page* page = Page::FromAddress(start);
   if (!page->InNewSpace()) {
-    store_buffer()->MoveEntriesToRememberedSet();
     DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
-    RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end);
-    RememberedSet<OLD_TO_OLD>::RemoveRange(page, start, end);
+    store_buffer()->DeleteEntry(start, end);
+    RememberedSet<OLD_TO_OLD>::RemoveRange(page, start, end,
+                                           SlotSet::FREE_EMPTY_BUCKETS);
   }
 }
 
diff --git a/src/heap/heap.h b/src/heap/heap.h
index cce467f..013cd9a 100644
--- a/src/heap/heap.h
+++ b/src/heap/heap.h
@@ -48,8 +48,6 @@
   V(Map, one_byte_string_map, OneByteStringMap)                                \
   V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap)       \
   V(Map, scope_info_map, ScopeInfoMap)                                         \
-  V(Map, module_info_entry_map, ModuleInfoEntryMap)                            \
-  V(Map, module_info_map, ModuleInfoMap)                                       \
   V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
   V(Map, code_map, CodeMap)                                                    \
   V(Map, function_context_map, FunctionContextMap)                             \
@@ -62,13 +60,13 @@
   V(FixedArray, empty_literals_array, EmptyLiteralsArray)                      \
   V(FixedArray, empty_type_feedback_vector, EmptyTypeFeedbackVector)           \
   V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
-  V(ScopeInfo, empty_scope_info, EmptyScopeInfo)                               \
-  V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap)           \
   V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
   /* Entries beyond the first 32                                            */ \
   /* The roots above this line should be boring from a GC point of view.    */ \
   /* This means they are never in new space and never on a page that is     */ \
   /* being compacted.                                                       */ \
+  /* Empty scope info */                                                       \
+  V(ScopeInfo, empty_scope_info, EmptyScopeInfo)                               \
   /* Oddballs */                                                               \
   V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel)      \
   V(Oddball, arguments_marker, ArgumentsMarker)                                \
@@ -92,9 +90,9 @@
   V(Map, unseeded_number_dictionary_map, UnseededNumberDictionaryMap)          \
   V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap)            \
   V(Map, message_object_map, JSMessageObjectMap)                               \
-  V(Map, neander_map, NeanderMap)                                              \
   V(Map, external_map, ExternalMap)                                            \
   V(Map, bytecode_array_map, BytecodeArrayMap)                                 \
+  V(Map, module_info_map, ModuleInfoMap)                                       \
   /* String maps */                                                            \
   V(Map, native_source_string_map, NativeSourceStringMap)                      \
   V(Map, string_map, StringMap)                                                \
@@ -168,6 +166,8 @@
   V(PropertyCell, has_instance_protector, HasInstanceProtector)                \
   V(Cell, species_protector, SpeciesProtector)                                 \
   V(PropertyCell, string_length_protector, StringLengthProtector)              \
+  V(Cell, fast_array_iteration_protector, FastArrayIterationProtector)         \
+  V(Cell, array_iterator_protector, ArrayIteratorProtector)                    \
   /* Special numbers */                                                        \
   V(HeapNumber, nan_value, NanValue)                                           \
   V(HeapNumber, hole_nan_value, HoleNanValue)                                  \
@@ -278,7 +278,6 @@
   V(FixedArrayMap)                      \
   V(CodeMap)                            \
   V(ScopeInfoMap)                       \
-  V(ModuleInfoEntryMap)                 \
   V(ModuleInfoMap)                      \
   V(FixedCOWArrayMap)                   \
   V(FixedDoubleArrayMap)                \
@@ -307,7 +306,6 @@
   V(ArgumentsMarkerMap)                 \
   V(JSMessageObjectMap)                 \
   V(ForeignMap)                         \
-  V(NeanderMap)                         \
   V(NanValue)                           \
   V(InfinityValue)                      \
   V(MinusZeroValue)                     \
@@ -342,8 +340,6 @@
 
 typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
 
-enum PromotionMode { PROMOTE_MARKED, DEFAULT_PROMOTION };
-
 enum ArrayStorageAllocationMode {
   DONT_INITIALIZE_ARRAY_ELEMENTS,
   INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
@@ -626,8 +622,8 @@
   static const int kTraceRingBufferSize = 512;
   static const int kStacktraceBufferSize = 512;
 
-  static const double kMinHeapGrowingFactor;
-  static const double kMaxHeapGrowingFactor;
+  V8_EXPORT_PRIVATE static const double kMinHeapGrowingFactor;
+  V8_EXPORT_PRIVATE static const double kMaxHeapGrowingFactor;
   static const double kMaxHeapGrowingFactorMemoryConstrained;
   static const double kMaxHeapGrowingFactorIdle;
   static const double kConservativeHeapGrowingFactor;
@@ -690,7 +686,28 @@
 #endif
   }
 
-  static double HeapGrowingFactor(double gc_speed, double mutator_speed);
+  static inline bool IsYoungGenerationCollector(GarbageCollector collector) {
+    return collector == SCAVENGER || collector == MINOR_MARK_COMPACTOR;
+  }
+
+  static inline GarbageCollector YoungGenerationCollector() {
+    return (FLAG_minor_mc) ? MINOR_MARK_COMPACTOR : SCAVENGER;
+  }
+
+  static inline const char* CollectorName(GarbageCollector collector) {
+    switch (collector) {
+      case SCAVENGER:
+        return "Scavenger";
+      case MARK_COMPACTOR:
+        return "Mark-Compact";
+      case MINOR_MARK_COMPACTOR:
+        return "Minor Mark-Compact";
+    }
+    return "Unknown collector";
+  }
+
+  V8_EXPORT_PRIVATE static double HeapGrowingFactor(double gc_speed,
+                                                    double mutator_speed);
 
   // Copy block of memory from src to dst. Size of block should be aligned
   // by pointer size.
@@ -835,11 +852,8 @@
 
   // An object should be promoted if the object has survived a
   // scavenge operation.
-  template <PromotionMode promotion_mode>
   inline bool ShouldBePromoted(Address old_address, int object_size);
 
-  inline PromotionMode CurrentPromotionMode();
-
   void ClearNormalizedMapCaches();
 
   void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
@@ -916,7 +930,7 @@
 
   bool HasLowAllocationRate();
   bool HasHighFragmentation();
-  bool HasHighFragmentation(intptr_t used, intptr_t committed);
+  bool HasHighFragmentation(size_t used, size_t committed);
 
   void ActivateMemoryReducerIfNeeded();
 
@@ -940,8 +954,8 @@
 
   // Configure heap size in MB before setup. Return false if the heap has been
   // set up already.
-  bool ConfigureHeap(int max_semi_space_size, int max_old_space_size,
-                     int max_executable_size, size_t code_range_size);
+  bool ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
+                     size_t max_executable_size, size_t code_range_size);
   bool ConfigureHeapDefault();
 
   // Prepares the heap, setting up memory areas that are needed in the isolate
@@ -952,6 +966,9 @@
   // Returns whether it succeeded.
   bool CreateHeapObjects();
 
+  // Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
+  V8_INLINE void CreateObjectStats();
+
   // Destroys all memory allocated by the heap.
   void TearDown();
 
@@ -1023,6 +1040,14 @@
   Handle<Object> root_handle(RootListIndex index) {
     return Handle<Object>(&roots_[index]);
   }
+  template <typename T>
+  bool IsRootHandle(Handle<T> handle, RootListIndex* index) const {
+    Object** const handle_location = bit_cast<Object**>(handle.address());
+    if (handle_location >= &roots_[kRootListLength]) return false;
+    if (handle_location < &roots_[0]) return false;
+    *index = static_cast<RootListIndex>(handle_location - &roots_[0]);
+    return true;
+  }
 
   // Generated code can embed this address to get access to the roots.
   Object** roots_array_start() { return roots_; }
@@ -1127,13 +1152,8 @@
   void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
 
   // Iterate pointers of promoted objects.
-  void IteratePromotedObject(HeapObject* target, int size,
-                             bool was_marked_black,
-                             ObjectSlotCallback callback);
-
-  void IteratePromotedObjectPointers(HeapObject* object, Address start,
-                                     Address end, bool record_slots,
-                                     ObjectSlotCallback callback);
+  void IterateAndScavengePromotedObject(HeapObject* target, int size,
+                                        bool was_marked_black);
 
   // ===========================================================================
   // Store buffer API. =========================================================
@@ -1199,10 +1219,6 @@
 
   EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
 
-  EmbedderReachableReferenceReporter* embedder_reachable_reference_reporter() {
-    return embedder_reference_reporter_;
-  }
-
   size_t wrappers_to_trace() { return wrappers_to_trace_.size(); }
 
   // ===========================================================================
@@ -1274,20 +1290,20 @@
   // ===========================================================================
 
   // Returns the maximum amount of memory reserved for the heap.
-  intptr_t MaxReserved() {
+  size_t MaxReserved() {
     return 2 * max_semi_space_size_ + max_old_generation_size_;
   }
-  int MaxSemiSpaceSize() { return max_semi_space_size_; }
-  int InitialSemiSpaceSize() { return initial_semispace_size_; }
-  intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
-  intptr_t MaxExecutableSize() { return max_executable_size_; }
+  size_t MaxSemiSpaceSize() { return max_semi_space_size_; }
+  size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
+  size_t MaxOldGenerationSize() { return max_old_generation_size_; }
+  size_t MaxExecutableSize() { return max_executable_size_; }
 
   // Returns the capacity of the heap in bytes w/o growing. Heap grows when
   // more spaces are needed until it reaches the limit.
-  intptr_t Capacity();
+  size_t Capacity();
 
   // Returns the capacity of the old generation.
-  intptr_t OldGenerationCapacity();
+  size_t OldGenerationCapacity();
 
   // Returns the amount of memory currently committed for the heap.
   size_t CommittedMemory();
@@ -1311,28 +1327,26 @@
   // Returns the available bytes in space w/o growing.
   // Heap doesn't guarantee that it can allocate an object that requires
   // all available bytes. Check MaxHeapObjectSize() instead.
-  intptr_t Available();
+  size_t Available();
 
   // Returns of size of all objects residing in the heap.
-  intptr_t SizeOfObjects();
+  size_t SizeOfObjects();
 
   void UpdateSurvivalStatistics(int start_new_space_size);
 
-  inline void IncrementPromotedObjectsSize(intptr_t object_size) {
-    DCHECK_GE(object_size, 0);
+  inline void IncrementPromotedObjectsSize(size_t object_size) {
     promoted_objects_size_ += object_size;
   }
-  inline intptr_t promoted_objects_size() { return promoted_objects_size_; }
+  inline size_t promoted_objects_size() { return promoted_objects_size_; }
 
-  inline void IncrementSemiSpaceCopiedObjectSize(intptr_t object_size) {
-    DCHECK_GE(object_size, 0);
+  inline void IncrementSemiSpaceCopiedObjectSize(size_t object_size) {
     semi_space_copied_object_size_ += object_size;
   }
-  inline intptr_t semi_space_copied_object_size() {
+  inline size_t semi_space_copied_object_size() {
     return semi_space_copied_object_size_;
   }
 
-  inline intptr_t SurvivedNewSpaceObjectSize() {
+  inline size_t SurvivedNewSpaceObjectSize() {
     return promoted_objects_size_ + semi_space_copied_object_size_;
   }
 
@@ -1342,20 +1356,13 @@
 
   inline void IncrementNodesPromoted() { nodes_promoted_++; }
 
-  inline void IncrementYoungSurvivorsCounter(intptr_t survived) {
-    DCHECK_GE(survived, 0);
+  inline void IncrementYoungSurvivorsCounter(size_t survived) {
     survived_last_scavenge_ = survived;
     survived_since_last_expansion_ += survived;
   }
 
-  inline intptr_t PromotedTotalSize() {
-    int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
-    if (total > std::numeric_limits<intptr_t>::max()) {
-      // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations.
-      return std::numeric_limits<intptr_t>::max();
-    }
-    if (total < 0) return 0;
-    return static_cast<intptr_t>(total);
+  inline uint64_t PromotedTotalSize() {
+    return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
   }
 
   inline void UpdateNewSpaceAllocationCounter();
@@ -1389,7 +1396,7 @@
   int gc_count() const { return gc_count_; }
 
   // Returns the size of objects residing in non new spaces.
-  intptr_t PromotedSpaceSizeOfObjects();
+  size_t PromotedSpaceSizeOfObjects();
 
   double total_regexp_code_generated() { return total_regexp_code_generated_; }
   void IncreaseTotalRegexpCodeGenerated(int size) {
@@ -1439,6 +1446,10 @@
   // ArrayBuffer tracking. =====================================================
   // ===========================================================================
 
+  // TODO(gc): API usability: encapsulate mutation of JSArrayBuffer::is_external
+  // in the registration/unregistration APIs. Consider dropping the "New" from
+  // "RegisterNewArrayBuffer" because one can re-register a previously
+  // unregistered buffer, too, and the name is confusing.
   void RegisterNewArrayBuffer(JSArrayBuffer* buffer);
   void UnregisterArrayBuffer(JSArrayBuffer* buffer);
 
@@ -1770,6 +1781,8 @@
 
   // Performs a major collection in the whole heap.
   void MarkCompact();
+  // Performs a minor collection of just the young generation.
+  void MinorMarkCompact();
 
   // Code to be run before and after mark-compact.
   void MarkCompactPrologue();
@@ -1778,8 +1791,7 @@
   // Performs a minor collection in new generation.
   void Scavenge();
 
-  Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front,
-                     PromotionMode promotion_mode);
+  Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
 
   void UpdateNewSpaceReferencesInExternalStringTable(
       ExternalStringTableUpdaterCallback updater_func);
@@ -1797,8 +1809,27 @@
   // GC statistics. ============================================================
   // ===========================================================================
 
-  inline intptr_t OldGenerationSpaceAvailable() {
-    return old_generation_allocation_limit_ - PromotedTotalSize();
+  inline size_t OldGenerationSpaceAvailable() {
+    if (old_generation_allocation_limit_ <= PromotedTotalSize()) return 0;
+    return old_generation_allocation_limit_ -
+           static_cast<size_t>(PromotedTotalSize());
+  }
+
+  // We allow incremental marking to overshoot the allocation limit for
+  // performace reasons. If the overshoot is too large then we are more
+  // eager to finalize incremental marking.
+  inline bool AllocationLimitOvershotByLargeMargin() {
+    // This guards against too eager finalization in small heaps.
+    // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
+    size_t kMarginForSmallHeaps = 32u * MB;
+    if (old_generation_allocation_limit_ >= PromotedTotalSize()) return false;
+    uint64_t overshoot = PromotedTotalSize() - old_generation_allocation_limit_;
+    // Overshoot margin is 50% of allocation limit or half-way to the max heap
+    // with special handling of small heaps.
+    uint64_t margin =
+        Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
+            (max_old_generation_size_ - old_generation_allocation_limit_) / 2);
+    return overshoot >= margin;
   }
 
   void UpdateTotalGCTime(double duration);
@@ -1811,23 +1842,21 @@
 
   // Decrease the allocation limit if the new limit based on the given
   // parameters is lower than the current limit.
-  void DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
-                                          double gc_speed,
+  void DampenOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
                                           double mutator_speed);
 
-
   // Calculates the allocation limit based on a given growing factor and a
   // given old generation size.
-  intptr_t CalculateOldGenerationAllocationLimit(double factor,
-                                                 intptr_t old_gen_size);
+  size_t CalculateOldGenerationAllocationLimit(double factor,
+                                               size_t old_gen_size);
 
   // Sets the allocation limit to trigger the next full garbage collection.
-  void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed,
+  void SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
                                        double mutator_speed);
 
-  intptr_t MinimumAllocationLimitGrowingStep();
+  size_t MinimumAllocationLimitGrowingStep();
 
-  intptr_t old_generation_allocation_limit() const {
+  size_t old_generation_allocation_limit() const {
     return old_generation_allocation_limit_;
   }
 
@@ -1838,7 +1867,11 @@
     return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
   }
 
-  bool ShouldExpandOldGenerationOnAllocationFailure();
+  bool IsCloseToOutOfMemory(size_t slack) {
+    return OldGenerationCapacity() + slack >= MaxOldGenerationSize();
+  }
+
+  bool ShouldExpandOldGenerationOnSlowAllocation();
 
   enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
   IncrementalMarkingLimit IncrementalMarkingLimitReached();
@@ -2092,20 +2125,20 @@
   Object* roots_[kRootListLength];
 
   size_t code_range_size_;
-  int max_semi_space_size_;
-  int initial_semispace_size_;
-  intptr_t max_old_generation_size_;
-  intptr_t initial_old_generation_size_;
+  size_t max_semi_space_size_;
+  size_t initial_semispace_size_;
+  size_t max_old_generation_size_;
+  size_t initial_old_generation_size_;
   bool old_generation_size_configured_;
-  intptr_t max_executable_size_;
+  size_t max_executable_size_;
   size_t maximum_committed_;
 
   // For keeping track of how much data has survived
   // scavenge since last new space expansion.
-  intptr_t survived_since_last_expansion_;
+  size_t survived_since_last_expansion_;
 
   // ... and since the last scavenge.
-  intptr_t survived_last_scavenge_;
+  size_t survived_last_scavenge_;
 
   // This is not the depth of nested AlwaysAllocateScope's but rather a single
   // count, as scopes can be acquired from multiple tasks (read: threads).
@@ -2137,7 +2170,7 @@
   Address new_space_top_after_last_gc_;
 
   // Returns the amount of external memory registered since last global gc.
-  int64_t PromotedExternalMemorySize();
+  uint64_t PromotedExternalMemorySize();
 
   // How many "runtime allocations" happened.
   uint32_t allocations_count_;
@@ -2166,7 +2199,7 @@
   // is checked when we have already decided to do a GC to help determine
   // which collector to invoke, before expanding a paged space in the old
   // generation and on every allocation in large object space.
-  intptr_t old_generation_allocation_limit_;
+  size_t old_generation_allocation_limit_;
 
   // Indicates that inline bump-pointer allocation has been globally disabled
   // for all spaces. This is used to disable allocations in generated code.
@@ -2196,11 +2229,11 @@
 
   GCTracer* tracer_;
 
-  intptr_t promoted_objects_size_;
+  size_t promoted_objects_size_;
   double promotion_ratio_;
   double promotion_rate_;
-  intptr_t semi_space_copied_object_size_;
-  intptr_t previous_semi_space_copied_object_size_;
+  size_t semi_space_copied_object_size_;
+  size_t previous_semi_space_copied_object_size_;
   double semi_space_copied_rate_;
   int nodes_died_in_new_space_;
   int nodes_copied_in_new_space_;
@@ -2306,11 +2339,11 @@
   int heap_iterator_depth_;
 
   EmbedderHeapTracer* embedder_heap_tracer_;
-  EmbedderReachableReferenceReporter* embedder_reference_reporter_;
   std::vector<std::pair<void*, void*>> wrappers_to_trace_;
 
   // Used for testing purposes.
   bool force_oom_;
+  bool delay_sweeper_tasks_for_testing_;
 
   // Classes in "heap" can be friends.
   friend class AlwaysAllocateScope;
@@ -2320,7 +2353,6 @@
   friend class IdleScavengeObserver;
   friend class IncrementalMarking;
   friend class IncrementalMarkingJob;
-  friend class IteratePromotedObjectsVisitor;
   friend class LargeObjectSpace;
   friend class MarkCompactCollector;
   friend class MarkCompactMarkingVisitor;
@@ -2629,18 +2661,6 @@
   DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
 };
 
-class TracePossibleWrapperReporter : public EmbedderReachableReferenceReporter {
- public:
-  explicit TracePossibleWrapperReporter(Heap* heap) : heap_(heap) {}
-  void ReportExternalReference(Value* object) override {
-    heap_->RegisterExternallyReferencedObject(
-        reinterpret_cast<Object**>(object));
-  }
-
- private:
-  Heap* heap_;
-};
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc
index 579228c..4b1d771 100644
--- a/src/heap/incremental-marking.cc
+++ b/src/heap/incremental-marking.cc
@@ -519,17 +519,15 @@
         "[IncrementalMarking] Start marking\n");
   }
 
-  is_compacting_ = !FLAG_never_compact &&
-                   heap_->mark_compact_collector()->StartCompaction(
-                       MarkCompactCollector::INCREMENTAL_COMPACTION);
+  is_compacting_ =
+      !FLAG_never_compact && heap_->mark_compact_collector()->StartCompaction();
 
   state_ = MARKING;
 
   if (heap_->UsingEmbedderHeapTracer()) {
     TRACE_GC(heap()->tracer(),
              GCTracer::Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE);
-    heap_->embedder_heap_tracer()->TracePrologue(
-        heap_->embedder_reachable_reference_reporter());
+    heap_->embedder_heap_tracer()->TracePrologue();
   }
 
   RecordWriteStub::Mode mode = is_compacting_
@@ -538,8 +536,7 @@
 
   PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
 
-  heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize(
-      MarkCompactCollector::kMaxMarkingDequeSize);
+  heap_->mark_compact_collector()->marking_deque()->StartUsing();
 
   ActivateIncrementalWriteBarrier();
 
@@ -587,9 +584,6 @@
 }
 
 void IncrementalMarking::AbortBlackAllocation() {
-  for (Page* page : *heap()->old_space()) {
-    page->ReleaseBlackAreaEndMarkerMap();
-  }
   if (FLAG_trace_incremental_marking) {
     heap()->isolate()->PrintWithTimestamp(
         "[IncrementalMarking] Black allocation aborted\n");
@@ -628,9 +622,9 @@
 
   Object* the_hole_value = heap()->the_hole_value();
   Object* weak_cell_obj = heap()->encountered_weak_cells();
-  Object* weak_cell_head = Smi::FromInt(0);
+  Object* weak_cell_head = Smi::kZero;
   WeakCell* prev_weak_cell_obj = NULL;
-  while (weak_cell_obj != Smi::FromInt(0)) {
+  while (weak_cell_obj != Smi::kZero) {
     WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
     // We do not insert cleared weak cells into the list, so the value
     // cannot be a Smi here.
@@ -648,7 +642,7 @@
       weak_cell_obj = weak_cell->next();
       weak_cell->clear_next(the_hole_value);
     } else {
-      if (weak_cell_head == Smi::FromInt(0)) {
+      if (weak_cell_head == Smi::kZero) {
         weak_cell_head = weak_cell;
       }
       prev_weak_cell_obj = weak_cell;
@@ -1053,7 +1047,7 @@
   DCHECK(state_ == SWEEPING);
   if (heap_->mark_compact_collector()->sweeping_in_progress() &&
       (!FLAG_concurrent_sweeping ||
-       heap_->mark_compact_collector()->sweeper().IsSweepingCompleted())) {
+       !heap_->mark_compact_collector()->sweeper().AreSweeperTasksRunning())) {
     heap_->mark_compact_collector()->EnsureSweepingCompleted();
   }
   if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
@@ -1075,6 +1069,13 @@
   // target step count are chosen based on benchmarks.
   const int kRampUpIntervalMs = 300;
   const size_t kTargetStepCount = 128;
+  const size_t kTargetStepCountAtOOM = 16;
+  size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
+
+  if (heap()->IsCloseToOutOfMemory(oom_slack)) {
+    return heap()->PromotedSpaceSizeOfObjects() / kTargetStepCountAtOOM;
+  }
+
   size_t step_size = Max(initial_old_generation_size_ / kTargetStepCount,
                          IncrementalMarking::kAllocatedThreshold);
   double time_passed_ms =
diff --git a/src/heap/incremental-marking.h b/src/heap/incremental-marking.h
index c2290c4..7ce0ae2 100644
--- a/src/heap/incremental-marking.h
+++ b/src/heap/incremental-marking.h
@@ -66,6 +66,11 @@
     return request_type_ == FINALIZATION && !finalize_marking_completed_;
   }
 
+  inline bool NeedsFinalization() {
+    return IsMarking() &&
+           (request_type_ == FINALIZATION || request_type_ == COMPLETE_MARKING);
+  }
+
   GCRequestType request_type() const { return request_type_; }
 
   void reset_request_type() { request_type_ = NONE; }
diff --git a/src/heap/mark-compact-inl.h b/src/heap/mark-compact-inl.h
index fe71fb1..784a76f 100644
--- a/src/heap/mark-compact-inl.h
+++ b/src/heap/mark-compact-inl.h
@@ -163,12 +163,14 @@
         current_cell_ = *it_.CurrentCell();
       }
 
+      Map* map = nullptr;
       if (current_cell_ & second_bit_index) {
         // We found a black object. If the black object is within a black area,
         // make sure that we skip all set bits in the black area until the
         // object ends.
         HeapObject* black_object = HeapObject::FromAddress(addr);
-        Address end = addr + black_object->Size() - kPointerSize;
+        map = base::NoBarrierAtomicValue<Map*>::FromAddress(addr)->Value();
+        Address end = addr + black_object->SizeFromMap(map) - kPointerSize;
         // One word filler objects do not borrow the second mark bit. We have
         // to jump over the advancing and clearing part.
         // Note that we know that we are at a one word filler when
@@ -198,9 +200,9 @@
 
       // We found a live object.
       if (object != nullptr) {
-        if (object->IsFiller()) {
-          // Black areas together with slack tracking may result in black filler
-          // objects. We filter these objects out in the iterator.
+        if (map != nullptr && map == heap()->one_pointer_filler_map()) {
+          // Black areas together with slack tracking may result in black one
+          // word filler objects. We filter these objects out in the iterator.
           object = nullptr;
         } else {
           break;
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index 7e5ef96..88e6983 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -25,6 +25,7 @@
 #include "src/heap/spaces-inl.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
+#include "src/tracing/tracing-category-observer.h"
 #include "src/utils-inl.h"
 #include "src/v8.h"
 
@@ -58,8 +59,7 @@
       compacting_(false),
       black_allocation_(false),
       have_code_to_deoptimize_(false),
-      marking_deque_memory_(NULL),
-      marking_deque_memory_committed_(0),
+      marking_deque_(heap),
       code_flusher_(nullptr),
       sweeper_(heap) {
 }
@@ -240,9 +240,7 @@
   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
   DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
-  EnsureMarkingDequeIsReserved();
-  EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
+  marking_deque()->SetUp();
 
   if (FLAG_flush_code) {
     code_flusher_ = new CodeFlusher(isolate());
@@ -255,7 +253,7 @@
 
 void MarkCompactCollector::TearDown() {
   AbortCompaction();
-  delete marking_deque_memory_;
+  marking_deque()->TearDown();
   delete code_flusher_;
 }
 
@@ -276,8 +274,7 @@
          static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
 }
 
-
-bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
+bool MarkCompactCollector::StartCompaction() {
   if (!compacting_) {
     DCHECK(evacuation_candidates_.length() == 0);
 
@@ -293,33 +290,12 @@
       TraceFragmentation(heap()->map_space());
     }
 
-    heap()->old_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
-    heap()->code_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
-
     compacting_ = evacuation_candidates_.length() > 0;
   }
 
   return compacting_;
 }
 
-void MarkCompactCollector::ClearInvalidRememberedSetSlots() {
-  {
-    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
-    RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap());
-  }
-// There is not need to filter the old to old set because
-// it is completely cleared after the mark-compact GC.
-// The slots that become invalid due to runtime transitions are
-// cleared eagerly immediately after the transition.
-
-#ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) {
-    RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
-    RememberedSet<OLD_TO_OLD>::VerifyValidSlots(heap());
-  }
-#endif
-}
-
 void MarkCompactCollector::CollectGarbage() {
   // Make sure that Prepare() has been called. The individual steps below will
   // update the state as they proceed.
@@ -339,7 +315,7 @@
   }
 #endif
 
-  SweepSpaces();
+  StartSweepSpaces();
 
   EvacuateNewSpaceAndCandidates();
 
@@ -469,20 +445,18 @@
     std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
               [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
   });
-  if (FLAG_concurrent_sweeping) {
-    ForAllSweepingSpaces([this](AllocationSpace space) {
-      if (space == NEW_SPACE) return;
-      StartSweepingHelper(space);
-    });
-  }
 }
 
-void MarkCompactCollector::Sweeper::StartSweepingHelper(
-    AllocationSpace space_to_start) {
-  num_sweeping_tasks_.Increment(1);
-  V8::GetCurrentPlatform()->CallOnBackgroundThread(
-      new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start),
-      v8::Platform::kShortRunningTask);
+void MarkCompactCollector::Sweeper::StartSweeperTasks() {
+  if (FLAG_concurrent_sweeping && sweeping_in_progress_) {
+    ForAllSweepingSpaces([this](AllocationSpace space) {
+      if (space == NEW_SPACE) return;
+      num_sweeping_tasks_.Increment(1);
+      V8::GetCurrentPlatform()->CallOnBackgroundThread(
+          new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space),
+          v8::Platform::kShortRunningTask);
+    });
+  }
 }
 
 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
@@ -499,7 +473,8 @@
 }
 
 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
-  if (FLAG_concurrent_sweeping && !sweeper().IsSweepingCompleted()) {
+  if (FLAG_concurrent_sweeping &&
+      !sweeper().IsSweepingCompleted(space->identity())) {
     sweeper().ParallelSweepSpace(space->identity(), 0);
     space->RefillFreeList();
   }
@@ -519,10 +494,11 @@
 
   // If sweeping is not completed or not running at all, we try to complete it
   // here.
-  if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
-    ForAllSweepingSpaces(
-        [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
-  }
+  ForAllSweepingSpaces([this](AllocationSpace space) {
+    if (!FLAG_concurrent_sweeping || !this->IsSweepingCompleted(space)) {
+      ParallelSweepSpace(space, 0);
+    }
+  });
 
   if (FLAG_concurrent_sweeping) {
     while (num_sweeping_tasks_.Value() > 0) {
@@ -537,13 +513,12 @@
     }
     DCHECK(sweeping_list_[space].empty());
   });
-  late_pages_ = false;
   sweeping_in_progress_ = false;
 }
 
 void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
   if (!sweeping_in_progress_) return;
-  if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
+  if (!FLAG_concurrent_sweeping || !IsSweepingCompleted(NEW_SPACE)) {
     for (Page* p : *heap_->new_space()) {
       SweepOrWaitUntilSweepingCompleted(p);
     }
@@ -565,13 +540,20 @@
 #endif
 }
 
-bool MarkCompactCollector::Sweeper::IsSweepingCompleted() {
+bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() {
   DCHECK(FLAG_concurrent_sweeping);
   while (pending_sweeper_tasks_semaphore_.WaitFor(
       base::TimeDelta::FromSeconds(0))) {
     num_sweeping_tasks_.Increment(-1);
   }
-  return num_sweeping_tasks_.Value() == 0;
+  return num_sweeping_tasks_.Value() != 0;
+}
+
+bool MarkCompactCollector::Sweeper::IsSweepingCompleted(AllocationSpace space) {
+  DCHECK(FLAG_concurrent_sweeping);
+  if (AreSweeperTasksRunning()) return false;
+  base::LockGuard<base::Mutex> guard(&mutex_);
+  return sweeping_list_[space].empty();
 }
 
 const char* AllocationSpaceName(AllocationSpace space) {
@@ -593,22 +575,21 @@
   return NULL;
 }
 
-
 void MarkCompactCollector::ComputeEvacuationHeuristics(
-    int area_size, int* target_fragmentation_percent,
-    int* max_evacuated_bytes) {
+    size_t area_size, int* target_fragmentation_percent,
+    size_t* max_evacuated_bytes) {
   // For memory reducing and optimize for memory mode we directly define both
   // constants.
   const int kTargetFragmentationPercentForReduceMemory = 20;
-  const int kMaxEvacuatedBytesForReduceMemory = 12 * MB;
+  const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
   const int kTargetFragmentationPercentForOptimizeMemory = 20;
-  const int kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
+  const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
 
   // For regular mode (which is latency critical) we define less aggressive
   // defaults to start and switch to a trace-based (using compaction speed)
   // approach as soon as we have enough samples.
   const int kTargetFragmentationPercent = 70;
-  const int kMaxEvacuatedBytes = 4 * MB;
+  const size_t kMaxEvacuatedBytes = 4 * MB;
   // Time to take for a single area (=payload of page). Used as soon as there
   // exist enough compaction speed samples.
   const float kTargetMsPerArea = .5;
@@ -647,15 +628,22 @@
   DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
 
   int number_of_pages = space->CountTotalPages();
-  int area_size = space->AreaSize();
+  size_t area_size = space->AreaSize();
 
   // Pairs of (live_bytes_in_page, page).
-  typedef std::pair<int, Page*> LiveBytesPagePair;
+  typedef std::pair<size_t, Page*> LiveBytesPagePair;
   std::vector<LiveBytesPagePair> pages;
   pages.reserve(number_of_pages);
 
+  DCHECK(!sweeping_in_progress());
+  DCHECK(!FLAG_concurrent_sweeping ||
+         sweeper().IsSweepingCompleted(space->identity()));
+  Page* owner_of_linear_allocation_area =
+      space->top() == space->limit()
+          ? nullptr
+          : Page::FromAllocationAreaAddress(space->top());
   for (Page* p : *space) {
-    if (p->NeverEvacuate()) continue;
+    if (p->NeverEvacuate() || p == owner_of_linear_allocation_area) continue;
     // Invariant: Evacuation candidates are just created when marking is
     // started. This means that sweeping has finished. Furthermore, at the end
     // of a GC all evacuation candidates are cleared and their slot buffers are
@@ -669,7 +657,7 @@
   }
 
   int candidate_count = 0;
-  int total_live_bytes = 0;
+  size_t total_live_bytes = 0;
 
   const bool reduce_memory = heap()->ShouldReduceMemory();
   if (FLAG_manual_evacuation_candidates_selection) {
@@ -705,12 +693,12 @@
     // them starting with the page with the most free memory, adding them to the
     // set of evacuation candidates as long as both conditions (fragmentation
     // and quota) hold.
-    int max_evacuated_bytes;
+    size_t max_evacuated_bytes;
     int target_fragmentation_percent;
     ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
                                 &max_evacuated_bytes);
 
-    const intptr_t free_bytes_threshold =
+    const size_t free_bytes_threshold =
         target_fragmentation_percent * (area_size / 100);
 
     // Sort pages from the most free to the least free, then select
@@ -723,8 +711,9 @@
                 return a.first < b.first;
               });
     for (size_t i = 0; i < pages.size(); i++) {
-      int live_bytes = pages[i].first;
-      int free_bytes = area_size - live_bytes;
+      size_t live_bytes = pages[i].first;
+      DCHECK_GE(area_size, live_bytes);
+      size_t free_bytes = area_size - live_bytes;
       if (FLAG_always_compact ||
           ((free_bytes >= free_bytes_threshold) &&
            ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
@@ -733,10 +722,10 @@
       }
       if (FLAG_trace_fragmentation_verbose) {
         PrintIsolate(isolate(),
-                     "compaction-selection-page: space=%s free_bytes_page=%d "
-                     "fragmentation_limit_kb=%" V8PRIdPTR
-                     " fragmentation_limit_percent=%d sum_compaction_kb=%d "
-                     "compaction_limit_kb=%d\n",
+                     "compaction-selection-page: space=%s free_bytes_page=%zu "
+                     "fragmentation_limit_kb=%" PRIuS
+                     " fragmentation_limit_percent=%d sum_compaction_kb=%zu "
+                     "compaction_limit_kb=%zu\n",
                      AllocationSpaceName(space->identity()), free_bytes / KB,
                      free_bytes_threshold / KB, target_fragmentation_percent,
                      total_live_bytes / KB, max_evacuated_bytes / KB);
@@ -744,7 +733,8 @@
     }
     // How many pages we will allocated for the evacuated objects
     // in the worst case: ceil(total_live_bytes / area_size)
-    int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size;
+    int estimated_new_pages =
+        static_cast<int>((total_live_bytes + area_size - 1) / area_size);
     DCHECK_LE(estimated_new_pages, candidate_count);
     int estimated_released_pages = candidate_count - estimated_new_pages;
     // Avoid (compact -> expand) cycles.
@@ -759,7 +749,7 @@
   if (FLAG_trace_fragmentation) {
     PrintIsolate(isolate(),
                  "compaction-selection: space=%s reduce_memory=%d pages=%d "
-                 "total_live_bytes=%d\n",
+                 "total_live_bytes=%zu\n",
                  AllocationSpaceName(space->identity()), reduce_memory,
                  candidate_count, total_live_bytes / KB);
   }
@@ -794,6 +784,10 @@
     EnsureSweepingCompleted();
   }
 
+  if (heap()->incremental_marking()->IsSweeping()) {
+    heap()->incremental_marking()->Stop();
+  }
+
   // If concurrent unmapping tasks are still running, we should wait for
   // them here.
   heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
@@ -810,14 +804,14 @@
     if (heap_->UsingEmbedderHeapTracer()) {
       heap_->embedder_heap_tracer()->AbortTracing();
     }
+    marking_deque()->Clear();
     was_marked_incrementally_ = false;
   }
 
   if (!was_marked_incrementally_) {
     if (heap_->UsingEmbedderHeapTracer()) {
       TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
-      heap_->embedder_heap_tracer()->TracePrologue(
-          heap_->embedder_reachable_reference_reporter());
+      heap_->embedder_heap_tracer()->TracePrologue();
     }
   }
 
@@ -828,7 +822,7 @@
   // Don't start compaction if we are in the middle of incremental
   // marking cycle. We did not collect any slots.
   if (!FLAG_never_compact && !was_marked_incrementally_) {
-    StartCompaction(NON_INCREMENTAL_COMPACTION);
+    StartCompaction();
   }
 
   PagedSpaces spaces(heap());
@@ -849,10 +843,8 @@
 void MarkCompactCollector::Finish() {
   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
 
-  if (sweeper().contains_late_pages() && FLAG_concurrent_sweeping) {
-    // If we added some more pages during MC, we need to start at least one
-    // more task as all other tasks might already be finished.
-    sweeper().StartSweepingHelper(OLD_SPACE);
+  if (!heap()->delay_sweeper_tasks_for_testing_) {
+    sweeper().StartSweeperTasks();
   }
 
   // The hashing of weak_object_to_code_table is no longer valid.
@@ -1647,7 +1639,7 @@
       DCHECK_OBJECT_SIZE(size);
       DCHECK(IsAligned(size, kPointerSize));
       heap_->CopyBlock(dst_addr, src_addr, size);
-      if ((mode == kProfiled) && FLAG_ignition && dst->IsBytecodeArray()) {
+      if ((mode == kProfiled) && dst->IsBytecodeArray()) {
         PROFILE(heap_->isolate(),
                 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
       }
@@ -1720,7 +1712,7 @@
                                                local_pretenuring_feedback_);
     int size = object->Size();
     HeapObject* target_object = nullptr;
-    if (heap_->ShouldBePromoted<DEFAULT_PROMOTION>(object->address(), size) &&
+    if (heap_->ShouldBePromoted(object->address(), size) &&
         TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
                           &target_object)) {
       promoted_size_ += size;
@@ -1841,41 +1833,48 @@
   base::HashMap* local_pretenuring_feedback_;
 };
 
+template <PageEvacuationMode mode>
 class MarkCompactCollector::EvacuateNewSpacePageVisitor final
     : public MarkCompactCollector::HeapObjectVisitor {
  public:
-  explicit EvacuateNewSpacePageVisitor(Heap* heap)
-      : heap_(heap), promoted_size_(0), semispace_copied_size_(0) {}
+  explicit EvacuateNewSpacePageVisitor(
+      Heap* heap, base::HashMap* local_pretenuring_feedback)
+      : heap_(heap),
+        moved_bytes_(0),
+        local_pretenuring_feedback_(local_pretenuring_feedback) {}
 
-  static void MoveToOldSpace(Page* page, PagedSpace* owner) {
-    page->Unlink();
-    Page* new_page = Page::ConvertNewToOld(page, owner);
-    new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
-  }
-
-  static void MoveToToSpace(Page* page) {
-    page->heap()->new_space()->MovePageFromSpaceToSpace(page);
-    page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
+  static void Move(Page* page) {
+    switch (mode) {
+      case NEW_TO_NEW:
+        page->heap()->new_space()->MovePageFromSpaceToSpace(page);
+        page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
+        break;
+      case NEW_TO_OLD: {
+        page->Unlink();
+        Page* new_page = Page::ConvertNewToOld(page);
+        new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+        break;
+      }
+    }
   }
 
   inline bool Visit(HeapObject* object) {
-    RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
-    object->IterateBodyFast(&visitor);
-    promoted_size_ += object->Size();
+    heap_->UpdateAllocationSite<Heap::kCached>(object,
+                                               local_pretenuring_feedback_);
+    if (mode == NEW_TO_OLD) {
+      RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
+      object->IterateBodyFast(&visitor);
+    }
     return true;
   }
 
-  intptr_t promoted_size() { return promoted_size_; }
-  intptr_t semispace_copied_size() { return semispace_copied_size_; }
-
-  void account_semispace_copied(intptr_t copied) {
-    semispace_copied_size_ += copied;
-  }
+  intptr_t moved_bytes() { return moved_bytes_; }
+  void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
 
  private:
   Heap* heap_;
-  intptr_t promoted_size_;
-  intptr_t semispace_copied_size_;
+  intptr_t moved_bytes_;
+  base::HashMap* local_pretenuring_feedback_;
 };
 
 class MarkCompactCollector::EvacuateOldSpaceVisitor final
@@ -2121,85 +2120,87 @@
   }
 }
 
-
-void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
-  DCHECK(!marking_deque()->in_use());
-  if (marking_deque_memory_ == NULL) {
-    marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
-    marking_deque_memory_committed_ = 0;
-  }
-  if (marking_deque_memory_ == NULL) {
-    V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
+void MarkingDeque::SetUp() {
+  backing_store_ = new base::VirtualMemory(kMaxSize);
+  backing_store_committed_size_ = 0;
+  if (backing_store_ == nullptr) {
+    V8::FatalProcessOutOfMemory("MarkingDeque::SetUp");
   }
 }
 
+void MarkingDeque::TearDown() {
+  delete backing_store_;
+}
 
-void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
-  // If the marking deque is too small, we try to allocate a bigger one.
-  // If that fails, make do with a smaller one.
-  CHECK(!marking_deque()->in_use());
-  for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
-    base::VirtualMemory* memory = marking_deque_memory_;
-    size_t currently_committed = marking_deque_memory_committed_;
-
-    if (currently_committed == size) return;
-
-    if (currently_committed > size) {
-      bool success = marking_deque_memory_->Uncommit(
-          reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
-          currently_committed - size);
-      if (success) {
-        marking_deque_memory_committed_ = size;
-        return;
-      }
-      UNREACHABLE();
-    }
-
-    bool success = memory->Commit(
-        reinterpret_cast<Address>(memory->address()) + currently_committed,
-        size - currently_committed,
-        false);  // Not executable.
-    if (success) {
-      marking_deque_memory_committed_ = size;
-      return;
-    }
+void MarkingDeque::StartUsing() {
+  base::LockGuard<base::Mutex> guard(&mutex_);
+  if (in_use_) {
+    // This can happen in mark-compact GC if the incremental marker already
+    // started using the marking deque.
+    return;
   }
-  V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
-}
-
-
-void MarkCompactCollector::InitializeMarkingDeque() {
-  DCHECK(!marking_deque()->in_use());
-  DCHECK(marking_deque_memory_committed_ > 0);
-  Address addr = static_cast<Address>(marking_deque_memory_->address());
-  size_t size = marking_deque_memory_committed_;
-  if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
-  marking_deque()->Initialize(addr, addr + size);
-}
-
-
-void MarkingDeque::Initialize(Address low, Address high) {
-  DCHECK(!in_use_);
-  HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
-  HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
-  array_ = obj_low;
-  mask_ = base::bits::RoundDownToPowerOfTwo32(
-              static_cast<uint32_t>(obj_high - obj_low)) -
-          1;
+  in_use_ = true;
+  EnsureCommitted();
+  array_ = reinterpret_cast<HeapObject**>(backing_store_->address());
+  size_t size = FLAG_force_marking_deque_overflows
+                    ? 64 * kPointerSize
+                    : backing_store_committed_size_;
+  DCHECK(
+      base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize)));
+  mask_ = static_cast<int>((size / kPointerSize) - 1);
   top_ = bottom_ = 0;
   overflowed_ = false;
-  in_use_ = true;
 }
 
-
-void MarkingDeque::Uninitialize(bool aborting) {
-  if (!aborting) {
-    DCHECK(IsEmpty());
-    DCHECK(!overflowed_);
-  }
-  DCHECK(in_use_);
-  top_ = bottom_ = 0xdecbad;
+void MarkingDeque::StopUsing() {
+  base::LockGuard<base::Mutex> guard(&mutex_);
+  DCHECK(IsEmpty());
+  DCHECK(!overflowed_);
+  top_ = bottom_ = mask_ = 0;
   in_use_ = false;
+  if (FLAG_concurrent_sweeping) {
+    StartUncommitTask();
+  } else {
+    Uncommit();
+  }
+}
+
+void MarkingDeque::Clear() {
+  DCHECK(in_use_);
+  top_ = bottom_ = 0;
+  overflowed_ = false;
+}
+
+void MarkingDeque::Uncommit() {
+  DCHECK(!in_use_);
+  bool success = backing_store_->Uncommit(backing_store_->address(),
+                                          backing_store_committed_size_);
+  backing_store_committed_size_ = 0;
+  CHECK(success);
+}
+
+void MarkingDeque::EnsureCommitted() {
+  DCHECK(in_use_);
+  if (backing_store_committed_size_ > 0) return;
+
+  for (size_t size = kMaxSize; size >= kMinSize; size /= 2) {
+    if (backing_store_->Commit(backing_store_->address(), size, false)) {
+      backing_store_committed_size_ = size;
+      break;
+    }
+  }
+  if (backing_store_committed_size_ == 0) {
+    V8::FatalProcessOutOfMemory("MarkingDeque::EnsureCommitted");
+  }
+}
+
+void MarkingDeque::StartUncommitTask() {
+  if (!uncommit_task_pending_) {
+    uncommit_task_pending_ = true;
+    UncommitTask* task = new UncommitTask(heap_->isolate(), this);
+    V8::GetCurrentPlatform()->CallOnBackgroundThread(
+        task, v8::Platform::kShortRunningTask);
+  }
 }
 
 class MarkCompactCollector::ObjectStatsVisitor
@@ -2242,17 +2243,21 @@
 }
 
 void MarkCompactCollector::RecordObjectStats() {
-  if (FLAG_track_gc_object_stats) {
+  if (V8_UNLIKELY(FLAG_gc_stats)) {
+    heap()->CreateObjectStats();
     ObjectStatsVisitor visitor(heap(), heap()->live_object_stats_,
                                heap()->dead_object_stats_);
     VisitAllObjects(&visitor);
-    std::stringstream live, dead;
-    heap()->live_object_stats_->Dump(live);
-    heap()->dead_object_stats_->Dump(dead);
-    TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
-                         "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
-                         "live", TRACE_STR_COPY(live.str().c_str()), "dead",
-                         TRACE_STR_COPY(dead.str().c_str()));
+    if (V8_UNLIKELY(FLAG_gc_stats &
+                    v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
+      std::stringstream live, dead;
+      heap()->live_object_stats_->Dump(live);
+      heap()->dead_object_stats_->Dump(dead);
+      TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
+                           "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
+                           "live", TRACE_STR_COPY(live.str().c_str()), "dead",
+                           TRACE_STR_COPY(dead.str().c_str()));
+    }
     if (FLAG_trace_gc_object_stats) {
       heap()->live_object_stats_->PrintJSON("live");
       heap()->dead_object_stats_->PrintJSON("dead");
@@ -2275,11 +2280,7 @@
     if (was_marked_incrementally_) {
       incremental_marking->Finalize();
     } else {
-      // Abort any pending incremental activities e.g. incremental sweeping.
-      incremental_marking->Stop();
-      if (marking_deque()->in_use()) {
-        marking_deque()->Uninitialize(true);
-      }
+      CHECK(incremental_marking->IsStopped());
     }
   }
 
@@ -2288,8 +2289,7 @@
   state_ = MARK_LIVE_OBJECTS;
 #endif
 
-  EnsureMarkingDequeIsCommittedAndInitialize(
-      MarkCompactCollector::kMaxMarkingDequeSize);
+  marking_deque()->StartUsing();
 
   {
     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
@@ -2410,8 +2410,6 @@
   MarkDependentCodeForDeoptimization(dependent_code_list);
 
   ClearWeakCollections();
-
-  ClearInvalidRememberedSetSlots();
 }
 
 
@@ -2480,7 +2478,7 @@
     Object* non_live_map_list) {
   Object* the_hole_value = heap()->the_hole_value();
   Object* weak_cell_obj = non_live_map_list;
-  while (weak_cell_obj != Smi::FromInt(0)) {
+  while (weak_cell_obj != Smi::kZero) {
     WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
     Map* map = Map::cast(weak_cell->value());
     DCHECK(Marking::IsWhite(ObjectMarking::MarkBitFrom(map)));
@@ -2504,7 +2502,7 @@
   // A previously existing simple transition (stored in a WeakCell) is going
   // to be cleared. Clear the useless cell pointer, and take ownership
   // of the descriptor array.
-  map->set_raw_transitions(Smi::FromInt(0));
+  map->set_raw_transitions(Smi::kZero);
   int number_of_own_descriptors = map->NumberOfOwnDescriptors();
   DescriptorArray* descriptors = map->instance_descriptors();
   if (descriptors == dead_transition->instance_descriptors() &&
@@ -2519,7 +2517,7 @@
 void MarkCompactCollector::ClearFullMapTransitions() {
   HeapObject* undefined = heap()->undefined_value();
   Object* obj = heap()->encountered_transition_arrays();
-  while (obj != Smi::FromInt(0)) {
+  while (obj != Smi::kZero) {
     TransitionArray* array = TransitionArray::cast(obj);
     int num_transitions = array->number_of_entries();
     DCHECK_EQ(TransitionArray::NumberOfTransitions(array), num_transitions);
@@ -2539,7 +2537,7 @@
     obj = array->next_link();
     array->set_next_link(undefined, SKIP_WRITE_BARRIER);
   }
-  heap()->set_encountered_transition_arrays(Smi::FromInt(0));
+  heap()->set_encountered_transition_arrays(Smi::kZero);
 }
 
 
@@ -2643,7 +2641,7 @@
 
 void MarkCompactCollector::ProcessWeakCollections() {
   Object* weak_collection_obj = heap()->encountered_weak_collections();
-  while (weak_collection_obj != Smi::FromInt(0)) {
+  while (weak_collection_obj != Smi::kZero) {
     JSWeakCollection* weak_collection =
         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
     DCHECK(MarkCompactCollector::IsMarked(weak_collection));
@@ -2669,7 +2667,7 @@
 void MarkCompactCollector::ClearWeakCollections() {
   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
   Object* weak_collection_obj = heap()->encountered_weak_collections();
-  while (weak_collection_obj != Smi::FromInt(0)) {
+  while (weak_collection_obj != Smi::kZero) {
     JSWeakCollection* weak_collection =
         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
     DCHECK(MarkCompactCollector::IsMarked(weak_collection));
@@ -2685,19 +2683,19 @@
     weak_collection_obj = weak_collection->next();
     weak_collection->set_next(heap()->undefined_value());
   }
-  heap()->set_encountered_weak_collections(Smi::FromInt(0));
+  heap()->set_encountered_weak_collections(Smi::kZero);
 }
 
 
 void MarkCompactCollector::AbortWeakCollections() {
   Object* weak_collection_obj = heap()->encountered_weak_collections();
-  while (weak_collection_obj != Smi::FromInt(0)) {
+  while (weak_collection_obj != Smi::kZero) {
     JSWeakCollection* weak_collection =
         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
     weak_collection_obj = weak_collection->next();
     weak_collection->set_next(heap()->undefined_value());
   }
-  heap()->set_encountered_weak_collections(Smi::FromInt(0));
+  heap()->set_encountered_weak_collections(Smi::kZero);
 }
 
 
@@ -2709,8 +2707,8 @@
   Object* the_hole_value = heap->the_hole_value();
   DependentCode* dependent_code_head =
       DependentCode::cast(heap->empty_fixed_array());
-  Object* non_live_map_head = Smi::FromInt(0);
-  while (weak_cell_obj != Smi::FromInt(0)) {
+  Object* non_live_map_head = Smi::kZero;
+  while (weak_cell_obj != Smi::kZero) {
     WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
     Object* next_weak_cell = weak_cell->next();
     bool clear_value = true;
@@ -2770,7 +2768,7 @@
     }
     weak_cell_obj = next_weak_cell;
   }
-  heap->set_encountered_weak_cells(Smi::FromInt(0));
+  heap->set_encountered_weak_cells(Smi::kZero);
   *non_live_map_list = non_live_map_head;
   *dependent_code_list = dependent_code_head;
 }
@@ -2779,24 +2777,24 @@
 void MarkCompactCollector::AbortWeakCells() {
   Object* the_hole_value = heap()->the_hole_value();
   Object* weak_cell_obj = heap()->encountered_weak_cells();
-  while (weak_cell_obj != Smi::FromInt(0)) {
+  while (weak_cell_obj != Smi::kZero) {
     WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
     weak_cell_obj = weak_cell->next();
     weak_cell->clear_next(the_hole_value);
   }
-  heap()->set_encountered_weak_cells(Smi::FromInt(0));
+  heap()->set_encountered_weak_cells(Smi::kZero);
 }
 
 
 void MarkCompactCollector::AbortTransitionArrays() {
   HeapObject* undefined = heap()->undefined_value();
   Object* obj = heap()->encountered_transition_arrays();
-  while (obj != Smi::FromInt(0)) {
+  while (obj != Smi::kZero) {
     TransitionArray* array = TransitionArray::cast(obj);
     obj = array->next_link();
     array->set_next_link(undefined, SKIP_WRITE_BARRIER);
   }
-  heap()->set_encountered_transition_arrays(Smi::FromInt(0));
+  heap()->set_encountered_transition_arrays(Smi::kZero);
 }
 
 void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
@@ -2889,128 +2887,6 @@
   return String::cast(*p);
 }
 
-bool MarkCompactCollector::IsSlotInBlackObject(MemoryChunk* p, Address slot) {
-  Space* owner = p->owner();
-  DCHECK(owner != heap_->lo_space() && owner != nullptr);
-  USE(owner);
-
-  // We may be part of a black area.
-  if (Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(slot))) {
-    return true;
-  }
-
-  uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
-  unsigned int cell_index = mark_bit_index >> Bitmap::kBitsPerCellLog2;
-  MarkBit::CellType index_mask = 1u << Bitmap::IndexInCell(mark_bit_index);
-  MarkBit::CellType* cells = p->markbits()->cells();
-  Address base_address = p->area_start();
-  unsigned int base_address_cell_index = Bitmap::IndexToCell(
-      Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(base_address)));
-
-  // Check if the slot points to the start of an object. This can happen e.g.
-  // when we left trim a fixed array. Such slots are invalid and we can remove
-  // them.
-  if (index_mask > 1) {
-    if ((cells[cell_index] & index_mask) != 0 &&
-        (cells[cell_index] & (index_mask >> 1)) == 0) {
-      return false;
-    }
-  } else {
-    // Left trimming moves the mark bits so we cannot be in the very first cell.
-    DCHECK(cell_index != base_address_cell_index);
-    if ((cells[cell_index] & index_mask) != 0 &&
-        (cells[cell_index - 1] & (1u << Bitmap::kBitIndexMask)) == 0) {
-      return false;
-    }
-  }
-
-  // Check if the object is in the current cell.
-  MarkBit::CellType slot_mask;
-  if ((cells[cell_index] == 0) ||
-      (base::bits::CountTrailingZeros32(cells[cell_index]) >
-       base::bits::CountTrailingZeros32(cells[cell_index] | index_mask))) {
-    // If we are already in the first cell, there is no live object.
-    if (cell_index == base_address_cell_index) return false;
-
-    // If not, find a cell in a preceding cell slot that has a mark bit set.
-    do {
-      cell_index--;
-    } while (cell_index > base_address_cell_index && cells[cell_index] == 0);
-
-    // The slot must be in a dead object if there are no preceding cells that
-    // have mark bits set.
-    if (cells[cell_index] == 0) {
-      return false;
-    }
-
-    // The object is in a preceding cell. Set the mask to find any object.
-    slot_mask = ~0u;
-  } else {
-    // We are interested in object mark bits right before the slot.
-    slot_mask = index_mask + (index_mask - 1);
-  }
-
-  MarkBit::CellType current_cell = cells[cell_index];
-  CHECK(current_cell != 0);
-
-  // Find the last live object in the cell.
-  unsigned int leading_zeros =
-      base::bits::CountLeadingZeros32(current_cell & slot_mask);
-  CHECK(leading_zeros != Bitmap::kBitsPerCell);
-  int offset = static_cast<int>(Bitmap::kBitIndexMask - leading_zeros) - 1;
-
-  base_address += (cell_index - base_address_cell_index) *
-                  Bitmap::kBitsPerCell * kPointerSize;
-  Address address = base_address + offset * kPointerSize;
-
-  // If the found mark bit is part of a black area, the slot cannot be part
-  // of a live object since it is not marked.
-  if (p->IsBlackAreaEndMarker(address + kPointerSize)) return false;
-
-  HeapObject* object = HeapObject::FromAddress(address);
-  CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
-  CHECK(object->address() < reinterpret_cast<Address>(slot));
-  if ((object->address() + kPointerSize) <= slot &&
-      (object->address() + object->Size()) > slot) {
-    // If the slot is within the last found object in the cell, the slot is
-    // in a live object.
-    // Slots pointing to the first word of an object are invalid and removed.
-    // This can happen when we move the object header while left trimming.
-    return true;
-  }
-  return false;
-}
-
-HeapObject* MarkCompactCollector::FindBlackObjectBySlotSlow(Address slot) {
-  Page* p = Page::FromAddress(slot);
-  Space* owner = p->owner();
-  if (owner == heap_->lo_space() || owner == nullptr) {
-    Object* large_object = heap_->lo_space()->FindObject(slot);
-    // This object has to exist, otherwise we would not have recorded a slot
-    // for it.
-    CHECK(large_object->IsHeapObject());
-    HeapObject* large_heap_object = HeapObject::cast(large_object);
-
-    if (IsMarked(large_heap_object)) {
-      return large_heap_object;
-    }
-    return nullptr;
-  }
-
-  LiveObjectIterator<kBlackObjects> it(p);
-  HeapObject* object = nullptr;
-  while ((object = it.Next()) != nullptr) {
-    int size = object->Size();
-    if (object->address() > slot) return nullptr;
-    if (object->address() <= slot && slot < (object->address() + size)) {
-      return object;
-    }
-  }
-
-  return nullptr;
-}
-
-
 void MarkCompactCollector::EvacuateNewSpacePrologue() {
   NewSpace* new_space = heap()->new_space();
   // Append the list of new space pages to be processed.
@@ -3055,7 +2931,11 @@
         local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
         new_space_visitor_(collector->heap(), &compaction_spaces_,
                            &local_pretenuring_feedback_),
-        new_space_page_visitor(collector->heap()),
+        new_to_new_page_visitor_(collector->heap(),
+                                 &local_pretenuring_feedback_),
+        new_to_old_page_visitor_(collector->heap(),
+                                 &local_pretenuring_feedback_),
+
         old_space_visitor_(collector->heap(), &compaction_spaces_),
         duration_(0.0),
         bytes_compacted_(0) {}
@@ -3086,7 +2966,10 @@
 
   // Visitors for the corresponding spaces.
   EvacuateNewSpaceVisitor new_space_visitor_;
-  EvacuateNewSpacePageVisitor new_space_page_visitor;
+  EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
+      new_to_new_page_visitor_;
+  EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_OLD>
+      new_to_old_page_visitor_;
   EvacuateOldSpaceVisitor old_space_visitor_;
 
   // Book keeping info.
@@ -3107,20 +2990,23 @@
       case kObjectsNewToOld:
         success = collector_->VisitLiveObjects(page, &new_space_visitor_,
                                                kClearMarkbits);
+        DCHECK(success);
         ArrayBufferTracker::ProcessBuffers(
             page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
-        DCHECK(success);
         break;
       case kPageNewToOld:
-        success = collector_->VisitLiveObjects(page, &new_space_page_visitor,
+        success = collector_->VisitLiveObjects(page, &new_to_old_page_visitor_,
                                                kKeepMarking);
-        // ArrayBufferTracker will be updated during sweeping.
         DCHECK(success);
+        new_to_old_page_visitor_.account_moved_bytes(page->LiveBytes());
+        // ArrayBufferTracker will be updated during sweeping.
         break;
       case kPageNewToNew:
-        new_space_page_visitor.account_semispace_copied(page->LiveBytes());
+        success = collector_->VisitLiveObjects(page, &new_to_new_page_visitor_,
+                                               kKeepMarking);
+        DCHECK(success);
+        new_to_new_page_visitor_.account_moved_bytes(page->LiveBytes());
         // ArrayBufferTracker will be updated during sweeping.
-        success = true;
         break;
       case kObjectsOldToOld:
         success = collector_->VisitLiveObjects(page, &old_space_visitor_,
@@ -3145,8 +3031,6 @@
               page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
         }
         break;
-      default:
-        UNREACHABLE();
     }
   }
   ReportCompactionProgress(evacuation_time, saved_live_bytes);
@@ -3172,15 +3056,15 @@
       compaction_spaces_.Get(CODE_SPACE));
   heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
   heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
-                                       new_space_page_visitor.promoted_size());
+                                       new_to_old_page_visitor_.moved_bytes());
   heap()->IncrementSemiSpaceCopiedObjectSize(
       new_space_visitor_.semispace_copied_size() +
-      new_space_page_visitor.semispace_copied_size());
+      new_to_new_page_visitor_.moved_bytes());
   heap()->IncrementYoungSurvivorsCounter(
       new_space_visitor_.promoted_size() +
       new_space_visitor_.semispace_copied_size() +
-      new_space_page_visitor.promoted_size() +
-      new_space_page_visitor.semispace_copied_size());
+      new_to_old_page_visitor_.moved_bytes() +
+      new_to_new_page_visitor_.moved_bytes());
   heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
 }
 
@@ -3192,17 +3076,15 @@
   //
   // The number of parallel compaction tasks is limited by:
   // - #evacuation pages
-  // - (#cores - 1)
+  // - #cores
   const double kTargetCompactionTimeInMs = .5;
-  const int kNumSweepingTasks = 3;
 
   double compaction_speed =
       heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
 
   const int available_cores = Max(
       1, static_cast<int>(
-             V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) -
-             kNumSweepingTasks - 1);
+             V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
   int tasks;
   if (compaction_speed > 0) {
     tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
@@ -3279,9 +3161,9 @@
         (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
         !page->Contains(age_mark)) {
       if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
-        EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
+        EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
       } else {
-        EvacuateNewSpacePageVisitor::MoveToToSpace(page);
+        EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
       }
     }
 
@@ -3335,6 +3217,18 @@
   }
 };
 
+MarkCompactCollector::Sweeper::ClearOldToNewSlotsMode
+MarkCompactCollector::Sweeper::GetClearOldToNewSlotsMode(Page* p) {
+  AllocationSpace identity = p->owner()->identity();
+  if (p->old_to_new_slots() &&
+      (identity == OLD_SPACE || identity == MAP_SPACE)) {
+    return MarkCompactCollector::Sweeper::CLEAR_REGULAR_SLOTS;
+  } else if (p->typed_old_to_new_slots() && identity == CODE_SPACE) {
+    return MarkCompactCollector::Sweeper::CLEAR_TYPED_SLOTS;
+  }
+  return MarkCompactCollector::Sweeper::DO_NOT_CLEAR;
+}
+
 int MarkCompactCollector::Sweeper::RawSweep(
     Page* p, FreeListRebuildingMode free_list_mode,
     FreeSpaceTreatmentMode free_space_mode) {
@@ -3344,13 +3238,17 @@
          space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
   DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
 
+  // If there are old-to-new slots in that page, we have to filter out slots
+  // that are in dead memory which is freed by the sweeper.
+  ClearOldToNewSlotsMode slots_clearing_mode = GetClearOldToNewSlotsMode(p);
+
+  // The free ranges map is used for filtering typed slots.
+  std::map<uint32_t, uint32_t> free_ranges;
+
   // Before we sweep objects on the page, we free dead array buffers which
   // requires valid mark bits.
   ArrayBufferTracker::FreeDead(p);
 
-  // We also release the black area markers here.
-  p->ReleaseBlackAreaEndMarkerMap();
-
   Address free_start = p->area_start();
   DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
 
@@ -3370,11 +3268,13 @@
 
   LiveObjectIterator<kBlackObjects> it(p);
   HeapObject* object = NULL;
+
   while ((object = it.Next()) != NULL) {
     DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
     Address free_end = object->address();
     if (free_end != free_start) {
-      int size = static_cast<int>(free_end - free_start);
+      CHECK_GT(free_end, free_start);
+      size_t size = static_cast<size_t>(free_end - free_start);
       if (free_space_mode == ZAP_FREE_SPACE) {
         memset(free_start, 0xcc, size);
       }
@@ -3383,9 +3283,18 @@
             free_start, size);
         max_freed_bytes = Max(freed_bytes, max_freed_bytes);
       } else {
-        p->heap()->CreateFillerObjectAt(free_start, size,
+        p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
                                         ClearRecordedSlots::kNo);
       }
+
+      if (slots_clearing_mode == CLEAR_REGULAR_SLOTS) {
+        RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
+                                               SlotSet::KEEP_EMPTY_BUCKETS);
+      } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
+        free_ranges.insert(std::pair<uint32_t, uint32_t>(
+            static_cast<uint32_t>(free_start - p->address()),
+            static_cast<uint32_t>(free_end - p->address())));
+      }
     }
     Map* map = object->synchronized_map();
     int size = object->SizeFromMap(map);
@@ -3401,11 +3310,9 @@
     free_start = free_end + size;
   }
 
-  // Clear the mark bits of that page and reset live bytes count.
-  p->ClearLiveness();
-
   if (free_start != p->area_end()) {
-    int size = static_cast<int>(p->area_end() - free_start);
+    CHECK_GT(p->area_end(), free_start);
+    size_t size = static_cast<size_t>(p->area_end() - free_start);
     if (free_space_mode == ZAP_FREE_SPACE) {
       memset(free_start, 0xcc, size);
     }
@@ -3414,13 +3321,31 @@
           free_start, size);
       max_freed_bytes = Max(freed_bytes, max_freed_bytes);
     } else {
-      p->heap()->CreateFillerObjectAt(free_start, size,
+      p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
                                       ClearRecordedSlots::kNo);
     }
+
+    if (slots_clearing_mode == CLEAR_REGULAR_SLOTS) {
+      RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
+                                             SlotSet::KEEP_EMPTY_BUCKETS);
+    } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
+      free_ranges.insert(std::pair<uint32_t, uint32_t>(
+          static_cast<uint32_t>(free_start - p->address()),
+          static_cast<uint32_t>(p->area_end() - p->address())));
+    }
   }
+
+  // Clear invalid typed slots after collection all free ranges.
+  if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
+    p->typed_old_to_new_slots()->RemoveInvaldSlots(free_ranges);
+  }
+
+  // Clear the mark bits of that page and reset live bytes count.
+  p->ClearLiveness();
+
   p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
   if (free_list_mode == IGNORE_FREE_LIST) return 0;
-  return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
+  return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
 }
 
 void MarkCompactCollector::InvalidateCode(Code* code) {
@@ -3480,7 +3405,8 @@
             page->AddressToMarkbitIndex(object->address()));
         if (page->old_to_new_slots() != nullptr) {
           page->old_to_new_slots()->RemoveRange(
-              0, static_cast<int>(object->address() - page->address()));
+              0, static_cast<int>(object->address() - page->address()),
+              SlotSet::PREFREE_EMPTY_BUCKETS);
         }
         if (page->typed_old_to_new_slots() != nullptr) {
           RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
@@ -3545,12 +3471,12 @@
     for (Page* p : newspace_evacuation_candidates_) {
       if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
         p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
-        sweeper().AddLatePage(p->owner()->identity(), p);
+        sweeper().AddPage(p->owner()->identity(), p);
       } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
         p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
         p->ForAllFreeListCategories(
             [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
-        sweeper().AddLatePage(p->owner()->identity(), p);
+        sweeper().AddPage(p->owner()->identity(), p);
       }
     }
     newspace_evacuation_candidates_.Rewind(0);
@@ -3562,7 +3488,7 @@
       SkipList* list = p->skip_list();
       if (list != NULL) list->Clear();
       if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
-        sweeper().AddLatePage(p->owner()->identity(), p);
+        sweeper().AddPage(p->owner()->identity(), p);
         p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
       }
     }
@@ -3631,34 +3557,48 @@
 
   static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap,
                                                        Address slot_address) {
-    Object** slot = reinterpret_cast<Object**>(slot_address);
-    if (heap->InFromSpace(*slot)) {
-      HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
+    // There may be concurrent action on slots in dead objects. Concurrent
+    // sweeper threads may overwrite the slot content with a free space object.
+    // Moreover, the pointed-to object may also get concurrently overwritten
+    // with a free space object. The sweeper always gets priority performing
+    // these writes.
+    base::NoBarrierAtomicValue<Object*>* slot =
+        base::NoBarrierAtomicValue<Object*>::FromAddress(slot_address);
+    Object* slot_reference = slot->Value();
+    if (heap->InFromSpace(slot_reference)) {
+      HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference);
       DCHECK(heap_object->IsHeapObject());
       MapWord map_word = heap_object->map_word();
       // There could still be stale pointers in large object space, map space,
       // and old space for pages that have been promoted.
       if (map_word.IsForwardingAddress()) {
-        // Update the corresponding slot.
-        *slot = map_word.ToForwardingAddress();
+        // A sweeper thread may concurrently write a size value which looks like
+        // a forwarding pointer. We have to ignore these values.
+        if (map_word.ToRawValue() < Page::kPageSize) {
+          return REMOVE_SLOT;
+        }
+        // Update the corresponding slot only if the slot content did not
+        // change in the meantime. This may happen when a concurrent sweeper
+        // thread stored a free space object at that memory location.
+        slot->TrySetValue(slot_reference, map_word.ToForwardingAddress());
       }
       // If the object was in from space before and is after executing the
       // callback in to space, the object is still live.
       // Unfortunately, we do not know about the slot. It could be in a
       // just freed free space object.
-      if (heap->InToSpace(*slot)) {
+      if (heap->InToSpace(slot->Value())) {
         return KEEP_SLOT;
       }
-    } else if (heap->InToSpace(*slot)) {
+    } else if (heap->InToSpace(slot_reference)) {
       // Slots can point to "to" space if the page has been moved, or if the
       // slot has been recorded multiple times in the remembered set. Since
       // there is no forwarding information present we need to check the
       // markbits to determine liveness.
-      if (Marking::IsBlack(
-              ObjectMarking::MarkBitFrom(reinterpret_cast<HeapObject*>(*slot))))
+      if (Marking::IsBlack(ObjectMarking::MarkBitFrom(
+              reinterpret_cast<HeapObject*>(slot_reference))))
         return KEEP_SLOT;
     } else {
-      DCHECK(!heap->InNewSpace(*slot));
+      DCHECK(!heap->InNewSpace(slot_reference));
     }
     return REMOVE_SLOT;
   }
@@ -3666,9 +3606,11 @@
 
 int NumberOfPointerUpdateTasks(int pages) {
   if (!FLAG_parallel_pointer_update) return 1;
-  const int kMaxTasks = 4;
+  const int available_cores = Max(
+      1, static_cast<int>(
+             V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
   const int kPagesPerTask = 4;
-  return Min(kMaxTasks, (pages + kPagesPerTask - 1) / kPagesPerTask);
+  return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask);
 }
 
 template <PointerDirection direction>
@@ -3813,24 +3755,21 @@
 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
                                                      AllocationSpace identity) {
   int max_freed = 0;
-  if (page->mutex()->TryLock()) {
+  {
+    base::LockGuard<base::Mutex> guard(page->mutex());
     // If this page was already swept in the meantime, we can return here.
-    if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) {
-      page->mutex()->Unlock();
-      return 0;
-    }
+    if (page->SweepingDone()) return 0;
+    DCHECK_EQ(Page::kSweepingPending,
+              page->concurrent_sweeping_state().Value());
     page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
     const Sweeper::FreeSpaceTreatmentMode free_space_mode =
         Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
     if (identity == NEW_SPACE) {
       RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
-    } else if (identity == OLD_SPACE) {
-      max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
-    } else if (identity == CODE_SPACE) {
-      max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
     } else {
       max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
     }
+    DCHECK(page->SweepingDone());
 
     // After finishing sweeping of a page we clean up its remembered set.
     if (page->typed_old_to_new_slots()) {
@@ -3839,35 +3778,26 @@
     if (page->old_to_new_slots()) {
       page->old_to_new_slots()->FreeToBeFreedBuckets();
     }
+  }
 
-    {
-      base::LockGuard<base::Mutex> guard(&mutex_);
-      swept_list_[identity].Add(page);
-    }
-    page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
-    page->mutex()->Unlock();
+  {
+    base::LockGuard<base::Mutex> guard(&mutex_);
+    swept_list_[identity].Add(page);
   }
   return max_freed;
 }
 
 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
-  DCHECK(!sweeping_in_progress_);
+  DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
   PrepareToBeSweptPage(space, page);
   sweeping_list_[space].push_back(page);
 }
 
-void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space,
-                                                Page* page) {
-  DCHECK(sweeping_in_progress_);
-  PrepareToBeSweptPage(space, page);
-  late_pages_ = true;
-  AddSweepingPageSafe(space, page);
-}
-
 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
                                                          Page* page) {
   page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
-  int to_sweep = page->area_size() - page->LiveBytes();
+  DCHECK_GE(page->area_size(), static_cast<size_t>(page->LiveBytes()));
+  size_t to_sweep = page->area_size() - page->LiveBytes();
   if (space != NEW_SPACE)
     heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
 }
@@ -3903,7 +3833,6 @@
     if (p->IsEvacuationCandidate()) {
       // Will be processed in EvacuateNewSpaceAndCandidates.
       DCHECK(evacuation_candidates_.length() > 0);
-      DCHECK(!p->HasBlackAreas());
       continue;
     }
 
@@ -3943,8 +3872,7 @@
   }
 }
 
-
-void MarkCompactCollector::SweepSpaces() {
+void MarkCompactCollector::StartSweepSpaces() {
   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
 #ifdef DEBUG
   state_ = SWEEP_SPACES;
diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h
index 2cbb369..de18207 100644
--- a/src/heap/mark-compact.h
+++ b/src/heap/mark-compact.h
@@ -8,6 +8,8 @@
 #include <deque>
 
 #include "src/base/bits.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/cancelable-task.h"
 #include "src/heap/marking.h"
 #include "src/heap/spaces.h"
 #include "src/heap/store-buffer.h"
@@ -52,16 +54,26 @@
 // Marking deque for tracing live objects.
 class MarkingDeque {
  public:
-  MarkingDeque()
-      : array_(NULL),
+  explicit MarkingDeque(Heap* heap)
+      : backing_store_(nullptr),
+        backing_store_committed_size_(0),
+        array_(nullptr),
         top_(0),
         bottom_(0),
         mask_(0),
         overflowed_(false),
-        in_use_(false) {}
+        in_use_(false),
+        uncommit_task_pending_(false),
+        heap_(heap) {}
 
-  void Initialize(Address low, Address high);
-  void Uninitialize(bool aborting = false);
+  void SetUp();
+  void TearDown();
+
+  // Ensures that the marking deque is committed and will stay committed until
+  // StopUsing() is called.
+  void StartUsing();
+  void StopUsing();
+  void Clear();
 
   inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
 
@@ -69,8 +81,6 @@
 
   bool overflowed() const { return overflowed_; }
 
-  bool in_use() const { return in_use_; }
-
   void ClearOverflowed() { overflowed_ = false; }
 
   void SetOverflowed() { overflowed_ = true; }
@@ -118,6 +128,43 @@
   void set_top(int top) { top_ = top; }
 
  private:
+  // This task uncommits the marking_deque backing store if
+  // markin_deque->in_use_ is false.
+  class UncommitTask : public CancelableTask {
+   public:
+    explicit UncommitTask(Isolate* isolate, MarkingDeque* marking_deque)
+        : CancelableTask(isolate), marking_deque_(marking_deque) {}
+
+   private:
+    // CancelableTask override.
+    void RunInternal() override {
+      base::LockGuard<base::Mutex> guard(&marking_deque_->mutex_);
+      if (!marking_deque_->in_use_) {
+        marking_deque_->Uncommit();
+      }
+      marking_deque_->uncommit_task_pending_ = false;
+    }
+
+    MarkingDeque* marking_deque_;
+    DISALLOW_COPY_AND_ASSIGN(UncommitTask);
+  };
+
+  static const size_t kMaxSize = 4 * MB;
+  static const size_t kMinSize = 256 * KB;
+
+  // Must be called with mutex lock.
+  void EnsureCommitted();
+
+  // Must be called with mutex lock.
+  void Uncommit();
+
+  // Must be called with mutex lock.
+  void StartUncommitTask();
+
+  base::Mutex mutex_;
+
+  base::VirtualMemory* backing_store_;
+  size_t backing_store_committed_size_;
   HeapObject** array_;
   // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
   // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
@@ -126,7 +173,11 @@
   int bottom_;
   int mask_;
   bool overflowed_;
+  // in_use_ == true after taking mutex lock implies that the marking deque is
+  // committed and will stay committed at least until in_use_ == false.
   bool in_use_;
+  bool uncommit_task_pending_;
+  Heap* heap_;
 
   DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
 };
@@ -270,12 +321,16 @@
   HeapObject* Next();
 
  private:
+  inline Heap* heap() { return chunk_->heap(); }
+
   MemoryChunk* chunk_;
   MarkBitCellIterator it_;
   Address cell_base_;
   MarkBit::CellType current_cell_;
 };
 
+enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
+
 // -------------------------------------------------------------------------
 // Mark-Compact collector
 class MarkCompactCollector {
@@ -288,6 +343,11 @@
 
     enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
     enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
+    enum ClearOldToNewSlotsMode {
+      DO_NOT_CLEAR,
+      CLEAR_REGULAR_SLOTS,
+      CLEAR_TYPED_SLOTS
+    };
 
     typedef std::deque<Page*> SweepingList;
     typedef List<Page*> SweptList;
@@ -299,24 +359,25 @@
         : heap_(heap),
           pending_sweeper_tasks_semaphore_(0),
           sweeping_in_progress_(false),
-          late_pages_(false),
           num_sweeping_tasks_(0) {}
 
     bool sweeping_in_progress() { return sweeping_in_progress_; }
-    bool contains_late_pages() { return late_pages_; }
 
     void AddPage(AllocationSpace space, Page* page);
-    void AddLatePage(AllocationSpace space, Page* page);
 
     int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
                            int max_pages = 0);
     int ParallelSweepPage(Page* page, AllocationSpace identity);
 
+    // After calling this function sweeping is considered to be in progress
+    // and the main thread can sweep lazily, but the background sweeper tasks
+    // are not running yet.
     void StartSweeping();
-    void StartSweepingHelper(AllocationSpace space_to_start);
+    void StartSweeperTasks();
     void EnsureCompleted();
     void EnsureNewSpaceCompleted();
-    bool IsSweepingCompleted();
+    bool AreSweeperTasksRunning();
+    bool IsSweepingCompleted(AllocationSpace space);
     void SweepOrWaitUntilSweepingCompleted(Page* page);
 
     void AddSweptPageSafe(PagedSpace* space, Page* page);
@@ -325,6 +386,8 @@
    private:
     static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
 
+    static ClearOldToNewSlotsMode GetClearOldToNewSlotsMode(Page* p);
+
     template <typename Callback>
     void ForAllSweepingSpaces(Callback callback) {
       for (int i = 0; i < kAllocationSpaces; i++) {
@@ -343,7 +406,6 @@
     SweptList swept_list_[kAllocationSpaces];
     SweepingList sweeping_list_[kAllocationSpaces];
     bool sweeping_in_progress_;
-    bool late_pages_;
     base::AtomicNumber<intptr_t> num_sweeping_tasks_;
   };
 
@@ -369,9 +431,7 @@
   // Performs a global garbage collection.
   void CollectGarbage();
 
-  enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
-
-  bool StartCompaction(CompactionMode mode);
+  bool StartCompaction();
 
   void AbortCompaction();
 
@@ -412,7 +472,7 @@
         ->ShouldSkipEvacuationSlotRecording();
   }
 
-  INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
+  static inline bool IsOnEvacuationCandidate(HeapObject* obj) {
     return Page::FromAddress(reinterpret_cast<Address>(obj))
         ->IsEvacuationCandidate();
   }
@@ -463,34 +523,10 @@
 
   MarkingDeque* marking_deque() { return &marking_deque_; }
 
-  static const size_t kMaxMarkingDequeSize = 4 * MB;
-  static const size_t kMinMarkingDequeSize = 256 * KB;
-
-  void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) {
-    if (!marking_deque()->in_use()) {
-      EnsureMarkingDequeIsCommitted(max_size);
-      InitializeMarkingDeque();
-    }
-  }
-
-  void EnsureMarkingDequeIsCommitted(size_t max_size);
-  void EnsureMarkingDequeIsReserved();
-
-  void InitializeMarkingDeque();
-
-  // The following two methods can just be called after marking, when the
-  // whole transitive closure is known. They must be called before sweeping
-  // when mark bits are still intact.
-  bool IsSlotInBlackObject(MemoryChunk* p, Address slot);
-  HeapObject* FindBlackObjectBySlotSlow(Address slot);
-
-  // Removes all the slots in the slot buffers that are within the given
-  // address range.
-  void RemoveObjectSlots(Address start_slot, Address end_slot);
-
   Sweeper& sweeper() { return sweeper_; }
 
  private:
+  template <PageEvacuationMode mode>
   class EvacuateNewSpacePageVisitor;
   class EvacuateNewSpaceVisitor;
   class EvacuateOldSpaceVisitor;
@@ -502,11 +538,10 @@
   explicit MarkCompactCollector(Heap* heap);
 
   bool WillBeDeoptimized(Code* code);
-  void ClearInvalidRememberedSetSlots();
 
-  void ComputeEvacuationHeuristics(int area_size,
+  void ComputeEvacuationHeuristics(size_t area_size,
                                    int* target_fragmentation_percent,
-                                   int* max_evacuated_bytes);
+                                   size_t* max_evacuated_bytes);
 
   void VisitAllObjects(HeapObjectVisitor* visitor);
 
@@ -644,21 +679,10 @@
 
   void AbortTransitionArrays();
 
-  // -----------------------------------------------------------------------
-  // Phase 2: Sweeping to clear mark bits and free non-live objects for
-  // a non-compacting collection.
-  //
-  //  Before: Live objects are marked and non-live objects are unmarked.
-  //
-  //   After: Live objects are unmarked, non-live regions have been added to
-  //          their space's free list. Active eden semispace is compacted by
-  //          evacuation.
-  //
-
-  // If we are not compacting the heap, we simply sweep the spaces except
-  // for the large object space, clearing mark bits and adding unmarked
-  // regions to each space's free list.
-  void SweepSpaces();
+  // Starts sweeping of spaces by contributing on the main thread and setting
+  // up other pages for sweeping. Does not start sweeper tasks.
+  void StartSweepSpaces();
+  void StartSweepSpace(PagedSpace* space);
 
   void EvacuateNewSpacePrologue();
 
@@ -681,9 +705,6 @@
 
   void ReleaseEvacuationCandidates();
 
-  // Starts sweeping of a space by contributing on the main thread and setting
-  // up other pages for sweeping.
-  void StartSweepSpace(PagedSpace* space);
 
 #ifdef DEBUG
   friend class MarkObjectVisitor;
@@ -726,8 +747,6 @@
 
   bool have_code_to_deoptimize_;
 
-  base::VirtualMemory* marking_deque_memory_;
-  size_t marking_deque_memory_committed_;
   MarkingDeque marking_deque_;
 
   CodeFlusher* code_flusher_;
diff --git a/src/heap/memory-reducer.cc b/src/heap/memory-reducer.cc
index ba9010e..2aed4c7 100644
--- a/src/heap/memory-reducer.cc
+++ b/src/heap/memory-reducer.cc
@@ -24,27 +24,26 @@
 
 
 void MemoryReducer::TimerTask::RunInternal() {
-  const double kJsCallsPerMsThreshold = 0.5;
   Heap* heap = memory_reducer_->heap();
   Event event;
   double time_ms = heap->MonotonicallyIncreasingTimeInMs();
   heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(),
                                    heap->OldGenerationAllocationCounter());
-  double js_call_rate = memory_reducer_->SampleAndGetJsCallsPerMs(time_ms);
   bool low_allocation_rate = heap->HasLowAllocationRate();
-  bool is_idle = js_call_rate < kJsCallsPerMsThreshold && low_allocation_rate;
   bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage();
   if (FLAG_trace_gc_verbose) {
-    PrintIsolate(heap->isolate(), "Memory reducer: call rate %.3lf, %s, %s\n",
-                 js_call_rate, low_allocation_rate ? "low alloc" : "high alloc",
-                 optimize_for_memory ? "background" : "foreground");
+    heap->isolate()->PrintWithTimestamp(
+        "Memory reducer: %s, %s\n",
+        low_allocation_rate ? "low alloc" : "high alloc",
+        optimize_for_memory ? "background" : "foreground");
   }
   event.type = kTimer;
   event.time_ms = time_ms;
   // The memory reducer will start incremental markig if
   // 1) mutator is likely idle: js call rate is low and allocation rate is low.
   // 2) mutator is in background: optimize for memory flag is set.
-  event.should_start_incremental_gc = is_idle || optimize_for_memory;
+  event.should_start_incremental_gc =
+      low_allocation_rate || optimize_for_memory;
   event.can_start_incremental_gc =
       heap->incremental_marking()->IsStopped() &&
       (heap->incremental_marking()->CanBeActivated() || optimize_for_memory);
@@ -52,16 +51,6 @@
 }
 
 
-double MemoryReducer::SampleAndGetJsCallsPerMs(double time_ms) {
-  unsigned int counter = heap()->isolate()->js_calls_from_api_counter();
-  unsigned int call_delta = counter - js_calls_counter_;
-  double time_delta_ms = time_ms - js_calls_sample_time_ms_;
-  js_calls_counter_ = counter;
-  js_calls_sample_time_ms_ = time_ms;
-  return time_delta_ms > 0 ? call_delta / time_delta_ms : 0;
-}
-
-
 void MemoryReducer::NotifyTimer(const Event& event) {
   DCHECK_EQ(kTimer, event.type);
   DCHECK_EQ(kWait, state_.action);
@@ -70,8 +59,8 @@
     DCHECK(heap()->incremental_marking()->IsStopped());
     DCHECK(FLAG_incremental_marking);
     if (FLAG_trace_gc_verbose) {
-      PrintIsolate(heap()->isolate(), "Memory reducer: started GC #%d\n",
-                   state_.started_gcs);
+      heap()->isolate()->PrintWithTimestamp("Memory reducer: started GC #%d\n",
+                                            state_.started_gcs);
     }
     heap()->StartIdleIncrementalMarking(
         GarbageCollectionReason::kMemoryReducer);
@@ -93,8 +82,9 @@
     // Re-schedule the timer.
     ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
     if (FLAG_trace_gc_verbose) {
-      PrintIsolate(heap()->isolate(), "Memory reducer: waiting for %.f ms\n",
-                   state_.next_gc_start_ms - event.time_ms);
+      heap()->isolate()->PrintWithTimestamp(
+          "Memory reducer: waiting for %.f ms\n",
+          state_.next_gc_start_ms - event.time_ms);
     }
   }
 }
@@ -110,9 +100,9 @@
   }
   if (old_action == kRun) {
     if (FLAG_trace_gc_verbose) {
-      PrintIsolate(heap()->isolate(), "Memory reducer: finished GC #%d (%s)\n",
-                   state_.started_gcs,
-                   state_.action == kWait ? "will do more" : "done");
+      heap()->isolate()->PrintWithTimestamp(
+          "Memory reducer: finished GC #%d (%s)\n", state_.started_gcs,
+          state_.action == kWait ? "will do more" : "done");
     }
   }
 }
@@ -194,8 +184,6 @@
 
 void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
   DCHECK(delay_ms > 0);
-  // Record the time and the js call counter.
-  SampleAndGetJsCallsPerMs(time_ms);
   // Leave some room for precision error in task scheduler.
   const double kSlackMs = 100;
   v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
@@ -204,7 +192,6 @@
       isolate, timer_task, (delay_ms + kSlackMs) / 1000.0);
 }
 
-
 void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0); }
 
 }  // namespace internal
diff --git a/src/heap/memory-reducer.h b/src/heap/memory-reducer.h
index 0fe53e5..0421987 100644
--- a/src/heap/memory-reducer.h
+++ b/src/heap/memory-reducer.h
@@ -8,6 +8,7 @@
 #include "include/v8-platform.h"
 #include "src/base/macros.h"
 #include "src/cancelable-task.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -79,7 +80,7 @@
 // now_ms is the current time,
 // t' is t if the current event is not a GC event and is now_ms otherwise,
 // long_delay_ms, short_delay_ms, and watchdog_delay_ms are constants.
-class MemoryReducer {
+class V8_EXPORT_PRIVATE MemoryReducer {
  public:
   enum Action { kDone, kWait, kRun };
 
@@ -148,9 +149,6 @@
 
   static bool WatchdogGC(const State& state, const Event& event);
 
-  // Returns the rate of JS calls initiated from the API.
-  double SampleAndGetJsCallsPerMs(double time_ms);
-
   Heap* heap_;
   State state_;
   unsigned int js_calls_counter_;
diff --git a/src/heap/object-stats.cc b/src/heap/object-stats.cc
index 6e4b50e..ef5f657 100644
--- a/src/heap/object-stats.cc
+++ b/src/heap/object-stats.cc
@@ -52,55 +52,59 @@
   stream << "]";
 }
 
+void ObjectStats::PrintKeyAndId(const char* key, int gc_count) {
+  PrintF("\"isolate\": \"%p\", \"id\": %d, \"key\": \"%s\", ",
+         reinterpret_cast<void*>(isolate()), gc_count, key);
+}
+
+void ObjectStats::PrintInstanceTypeJSON(const char* key, int gc_count,
+                                        const char* name, int index) {
+  PrintF("{ ");
+  PrintKeyAndId(key, gc_count);
+  PrintF("\"type\": \"instance_type_data\", ");
+  PrintF("\"instance_type\": %d, ", index);
+  PrintF("\"instance_type_name\": \"%s\", ", name);
+  PrintF("\"overall\": %zu, ", object_sizes_[index]);
+  PrintF("\"count\": %zu, ", object_counts_[index]);
+  PrintF("\"over_allocated\": %zu, ", over_allocated_[index]);
+  PrintF("\"histogram\": ");
+  PrintJSONArray(size_histogram_[index], kNumberOfBuckets);
+  PrintF(",");
+  PrintF("\"over_allocated_histogram\": ");
+  PrintJSONArray(over_allocated_histogram_[index], kNumberOfBuckets);
+  PrintF(" }\n");
+}
+
 void ObjectStats::PrintJSON(const char* key) {
   double time = isolate()->time_millis_since_init();
   int gc_count = heap()->gc_count();
 
-#define PRINT_KEY_AND_ID()                                     \
-  PrintF("\"isolate\": \"%p\", \"id\": %d, \"key\": \"%s\", ", \
-         reinterpret_cast<void*>(isolate()), gc_count, key);
-
   // gc_descriptor
   PrintF("{ ");
-  PRINT_KEY_AND_ID();
+  PrintKeyAndId(key, gc_count);
   PrintF("\"type\": \"gc_descriptor\", \"time\": %f }\n", time);
   // bucket_sizes
   PrintF("{ ");
-  PRINT_KEY_AND_ID();
+  PrintKeyAndId(key, gc_count);
   PrintF("\"type\": \"bucket_sizes\", \"sizes\": [ ");
   for (int i = 0; i < kNumberOfBuckets; i++) {
     PrintF("%d", 1 << (kFirstBucketShift + i));
     if (i != (kNumberOfBuckets - 1)) PrintF(", ");
   }
   PrintF(" ] }\n");
-// instance_type_data
-#define PRINT_INSTANCE_TYPE_DATA(name, index)                         \
-  PrintF("{ ");                                                       \
-  PRINT_KEY_AND_ID();                                                 \
-  PrintF("\"type\": \"instance_type_data\", ");                       \
-  PrintF("\"instance_type\": %d, ", index);                           \
-  PrintF("\"instance_type_name\": \"%s\", ", name);                   \
-  PrintF("\"overall\": %zu, ", object_sizes_[index]);                 \
-  PrintF("\"count\": %zu, ", object_counts_[index]);                  \
-  PrintF("\"over_allocated\": %zu, ", over_allocated_[index]);        \
-  PrintF("\"histogram\": ");                                          \
-  PrintJSONArray(size_histogram_[index], kNumberOfBuckets);           \
-  PrintF(",");                                                        \
-  PrintF("\"over_allocated_histogram\": ");                           \
-  PrintJSONArray(over_allocated_histogram_[index], kNumberOfBuckets); \
-  PrintF(" }\n");
 
-#define INSTANCE_TYPE_WRAPPER(name) PRINT_INSTANCE_TYPE_DATA(#name, name)
-#define CODE_KIND_WRAPPER(name)            \
-  PRINT_INSTANCE_TYPE_DATA("*CODE_" #name, \
-                           FIRST_CODE_KIND_SUB_TYPE + Code::name)
-#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
-  PRINT_INSTANCE_TYPE_DATA("*FIXED_ARRAY_" #name,   \
-                           FIRST_FIXED_ARRAY_SUB_TYPE + name)
-#define CODE_AGE_WRAPPER(name) \
-  PRINT_INSTANCE_TYPE_DATA(    \
-      "*CODE_AGE_" #name,      \
-      FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge)
+#define INSTANCE_TYPE_WRAPPER(name) \
+  PrintInstanceTypeJSON(key, gc_count, #name, name);
+#define CODE_KIND_WRAPPER(name)                        \
+  PrintInstanceTypeJSON(key, gc_count, "*CODE_" #name, \
+                        FIRST_CODE_KIND_SUB_TYPE + Code::name);
+#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name)           \
+  PrintInstanceTypeJSON(key, gc_count, "*FIXED_ARRAY_" #name, \
+                        FIRST_FIXED_ARRAY_SUB_TYPE + name);
+#define CODE_AGE_WRAPPER(name)           \
+  PrintInstanceTypeJSON(                 \
+      key, gc_count, "*CODE_AGE_" #name, \
+      FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge);
 
   INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER)
   CODE_KIND_LIST(CODE_KIND_WRAPPER)
@@ -115,6 +119,20 @@
 #undef PRINT_KEY_AND_ID
 }
 
+void ObjectStats::DumpInstanceTypeData(std::stringstream& stream,
+                                       const char* name, int index) {
+  stream << "\"" << name << "\":{";
+  stream << "\"type\":" << static_cast<int>(index) << ",";
+  stream << "\"overall\":" << object_sizes_[index] << ",";
+  stream << "\"count\":" << object_counts_[index] << ",";
+  stream << "\"over_allocated\":" << over_allocated_[index] << ",";
+  stream << "\"histogram\":";
+  DumpJSONArray(stream, size_histogram_[index], kNumberOfBuckets);
+  stream << ",\"over_allocated_histogram\":";
+  DumpJSONArray(stream, over_allocated_histogram_[index], kNumberOfBuckets);
+  stream << "},";
+}
+
 void ObjectStats::Dump(std::stringstream& stream) {
   double time = isolate()->time_millis_since_init();
   int gc_count = heap()->gc_count();
@@ -131,29 +149,19 @@
   stream << "],";
   stream << "\"type_data\":{";
 
-#define PRINT_INSTANCE_TYPE_DATA(name, index)                                \
-  stream << "\"" << name << "\":{";                                          \
-  stream << "\"type\":" << static_cast<int>(index) << ",";                   \
-  stream << "\"overall\":" << object_sizes_[index] << ",";                   \
-  stream << "\"count\":" << object_counts_[index] << ",";                    \
-  stream << "\"over_allocated\":" << over_allocated_[index] << ",";          \
-  stream << "\"histogram\":";                                                \
-  DumpJSONArray(stream, size_histogram_[index], kNumberOfBuckets);           \
-  stream << ",\"over_allocated_histogram\":";                                \
-  DumpJSONArray(stream, over_allocated_histogram_[index], kNumberOfBuckets); \
-  stream << "},";
+#define INSTANCE_TYPE_WRAPPER(name) DumpInstanceTypeData(stream, #name, name);
+#define CODE_KIND_WRAPPER(name)                \
+  DumpInstanceTypeData(stream, "*CODE_" #name, \
+                       FIRST_CODE_KIND_SUB_TYPE + Code::name);
 
-#define INSTANCE_TYPE_WRAPPER(name) PRINT_INSTANCE_TYPE_DATA(#name, name)
-#define CODE_KIND_WRAPPER(name)            \
-  PRINT_INSTANCE_TYPE_DATA("*CODE_" #name, \
-                           FIRST_CODE_KIND_SUB_TYPE + Code::name)
-#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
-  PRINT_INSTANCE_TYPE_DATA("*FIXED_ARRAY_" #name,   \
-                           FIRST_FIXED_ARRAY_SUB_TYPE + name)
-#define CODE_AGE_WRAPPER(name) \
-  PRINT_INSTANCE_TYPE_DATA(    \
-      "*CODE_AGE_" #name,      \
-      FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge)
+#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name)   \
+  DumpInstanceTypeData(stream, "*FIXED_ARRAY_" #name, \
+                       FIRST_FIXED_ARRAY_SUB_TYPE + name);
+
+#define CODE_AGE_WRAPPER(name)    \
+  DumpInstanceTypeData(           \
+      stream, "*CODE_AGE_" #name, \
+      FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge);
 
   INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER);
   CODE_KIND_LIST(CODE_KIND_WRAPPER);
diff --git a/src/heap/object-stats.h b/src/heap/object-stats.h
index add5a12..7d0cfb5 100644
--- a/src/heap/object-stats.h
+++ b/src/heap/object-stats.h
@@ -75,6 +75,9 @@
           over_allocated;
       over_allocated_histogram_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]
                                [HistogramIndexFromSize(over_allocated)]++;
+      over_allocated_[InstanceType::FIXED_ARRAY_TYPE] += over_allocated;
+      over_allocated_histogram_[InstanceType::FIXED_ARRAY_TYPE]
+                               [HistogramIndexFromSize(over_allocated)]++;
     }
     return true;
   }
@@ -97,6 +100,14 @@
   static const int kLastBucket = 1 << kLastBucketShift;
   static const int kNumberOfBuckets = kLastBucketShift - kFirstBucketShift + 1;
 
+  void PrintKeyAndId(const char* key, int gc_count);
+  // The following functions are excluded from inline to reduce the overall
+  // binary size of VB. On x64 this save around 80KB.
+  V8_NOINLINE void PrintInstanceTypeJSON(const char* key, int gc_count,
+                                         const char* name, int index);
+  V8_NOINLINE void DumpInstanceTypeData(std::stringstream& stream,
+                                        const char* name, int index);
+
   int HistogramIndexFromSize(size_t size) {
     if (size == 0) return 0;
     int idx = static_cast<int>(base::ieee754::log2(static_cast<double>(size))) -
diff --git a/src/heap/objects-visiting-inl.h b/src/heap/objects-visiting-inl.h
index 252b2fe..f350256 100644
--- a/src/heap/objects-visiting-inl.h
+++ b/src/heap/objects-visiting-inl.h
@@ -84,7 +84,10 @@
 
   table_.Register(kVisitFreeSpace, &VisitFreeSpace);
 
-  table_.Register(kVisitJSWeakCollection, &JSObjectVisitor::Visit);
+  table_.Register(
+      kVisitJSWeakCollection,
+      &FlexibleBodyVisitor<StaticVisitor, JSWeakCollection::BodyDescriptor,
+                           int>::Visit);
 
   table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
 
diff --git a/src/heap/objects-visiting.cc b/src/heap/objects-visiting.cc
index 9393fcc..d4aa8b2 100644
--- a/src/heap/objects-visiting.cc
+++ b/src/heap/objects-visiting.cc
@@ -107,6 +107,8 @@
     case JS_ARGUMENTS_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
+    case JS_FIXED_ARRAY_ITERATOR_TYPE:
+    case JS_MODULE_NAMESPACE_TYPE:
     case JS_VALUE_TYPE:
     case JS_DATE_TYPE:
     case JS_ARRAY_TYPE:
@@ -120,6 +122,43 @@
     case JS_SET_ITERATOR_TYPE:
     case JS_MAP_ITERATOR_TYPE:
     case JS_STRING_ITERATOR_TYPE:
+
+    case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
+
     case JS_PROMISE_TYPE:
     case JS_BOUND_FUNCTION_TYPE:
       return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
diff --git a/src/heap/page-parallel-job.h b/src/heap/page-parallel-job.h
index 440c440..ad1d9b3 100644
--- a/src/heap/page-parallel-job.h
+++ b/src/heap/page-parallel-job.h
@@ -103,7 +103,8 @@
     delete main_task;
     // Wait for background tasks.
     for (int i = 0; i < num_tasks_; i++) {
-      if (!cancelable_task_manager_->TryAbort(task_ids[i])) {
+      if (cancelable_task_manager_->TryAbort(task_ids[i]) !=
+          CancelableTaskManager::kTaskAborted) {
         pending_tasks_->Wait();
       }
     }
diff --git a/src/heap/remembered-set.cc b/src/heap/remembered-set.cc
deleted file mode 100644
index c5dab90..0000000
--- a/src/heap/remembered-set.cc
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/remembered-set.h"
-#include "src/heap/heap-inl.h"
-#include "src/heap/heap.h"
-#include "src/heap/mark-compact.h"
-#include "src/heap/slot-set.h"
-#include "src/heap/spaces.h"
-#include "src/heap/store-buffer.h"
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-template <PointerDirection direction>
-void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
-  STATIC_ASSERT(direction == OLD_TO_NEW);
-  for (MemoryChunk* chunk : *heap->old_space()) {
-    SlotSet* slots = GetSlotSet(chunk);
-    if (slots != nullptr) {
-      slots->Iterate(
-          [heap, chunk](Address addr) {
-            Object** slot = reinterpret_cast<Object**>(addr);
-            return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
-          },
-          SlotSet::PREFREE_EMPTY_BUCKETS);
-    }
-  }
-  for (MemoryChunk* chunk : *heap->code_space()) {
-    TypedSlotSet* slots = GetTypedSlotSet(chunk);
-    if (slots != nullptr) {
-      slots->Iterate(
-          [heap, chunk](SlotType type, Address host_addr, Address addr) {
-            if (Marking::IsBlack(ObjectMarking::MarkBitFrom(host_addr))) {
-              return KEEP_SLOT;
-            } else {
-              return REMOVE_SLOT;
-            }
-          },
-          TypedSlotSet::PREFREE_EMPTY_CHUNKS);
-    }
-  }
-  for (MemoryChunk* chunk : *heap->map_space()) {
-    SlotSet* slots = GetSlotSet(chunk);
-    if (slots != nullptr) {
-      slots->Iterate(
-          [heap, chunk](Address addr) {
-            Object** slot = reinterpret_cast<Object**>(addr);
-            // TODO(mlippautz): In map space all allocations would ideally be
-            // map
-            // aligned. After establishing this invariant IsValidSlot could just
-            // refer to the containing object using alignment and check the mark
-            // bits.
-            return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
-          },
-          SlotSet::PREFREE_EMPTY_BUCKETS);
-    }
-  }
-}
-
-template <PointerDirection direction>
-void RememberedSet<direction>::VerifyValidSlots(Heap* heap) {
-  Iterate(heap, [heap](Address addr) {
-    HeapObject* obj =
-        heap->mark_compact_collector()->FindBlackObjectBySlotSlow(addr);
-    if (obj == nullptr) {
-      // The slot is in dead object.
-      MemoryChunk* chunk = MemoryChunk::FromAnyPointerAddress(heap, addr);
-      AllocationSpace owner = chunk->owner()->identity();
-      // The old to old remembered set should not have dead slots.
-      CHECK_NE(direction, OLD_TO_OLD);
-      // The old to new remembered set is allowed to have slots in dead
-      // objects only in map and large object space because these space
-      // cannot have raw untagged pointers.
-      CHECK(owner == MAP_SPACE || owner == LO_SPACE);
-    } else {
-      int offset = static_cast<int>(addr - obj->address());
-      CHECK(obj->IsValidSlot(offset));
-    }
-    return KEEP_SLOT;
-  });
-}
-
-template <PointerDirection direction>
-bool RememberedSet<direction>::IsValidSlot(Heap* heap, MemoryChunk* chunk,
-                                           Object** slot) {
-  STATIC_ASSERT(direction == OLD_TO_NEW);
-  Object* object = *slot;
-  if (!heap->InNewSpace(object)) {
-    return false;
-  }
-  HeapObject* heap_object = HeapObject::cast(object);
-  // If the target object is not black, the source slot must be part
-  // of a non-black (dead) object.
-  return Marking::IsBlack(ObjectMarking::MarkBitFrom(heap_object)) &&
-         heap->mark_compact_collector()->IsSlotInBlackObject(
-             chunk, reinterpret_cast<Address>(slot));
-}
-
-template void RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(Heap* heap);
-template void RememberedSet<OLD_TO_NEW>::VerifyValidSlots(Heap* heap);
-template void RememberedSet<OLD_TO_OLD>::VerifyValidSlots(Heap* heap);
-
-}  // namespace internal
-}  // namespace v8
diff --git a/src/heap/remembered-set.h b/src/heap/remembered-set.h
index 74791b9..a625b13 100644
--- a/src/heap/remembered-set.h
+++ b/src/heap/remembered-set.h
@@ -45,7 +45,8 @@
 
   // Given a page and a range of slots in that page, this function removes the
   // slots from the remembered set.
-  static void RemoveRange(MemoryChunk* chunk, Address start, Address end) {
+  static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
+                          SlotSet::EmptyBucketMode mode) {
     SlotSet* slot_set = GetSlotSet(chunk);
     if (slot_set != nullptr) {
       uintptr_t start_offset = start - chunk->address();
@@ -53,7 +54,7 @@
       DCHECK_LT(start_offset, end_offset);
       if (end_offset < static_cast<uintptr_t>(Page::kPageSize)) {
         slot_set->RemoveRange(static_cast<int>(start_offset),
-                              static_cast<int>(end_offset));
+                              static_cast<int>(end_offset), mode);
       } else {
         // The large page has multiple slot sets.
         // Compute slot set indicies for the range [start_offset, end_offset).
@@ -67,17 +68,17 @@
             end_offset - static_cast<uintptr_t>(end_chunk) * Page::kPageSize);
         if (start_chunk == end_chunk) {
           slot_set[start_chunk].RemoveRange(offset_in_start_chunk,
-                                            offset_in_end_chunk);
+                                            offset_in_end_chunk, mode);
         } else {
           // Clear all slots from start_offset to the end of first chunk.
           slot_set[start_chunk].RemoveRange(offset_in_start_chunk,
-                                            Page::kPageSize);
+                                            Page::kPageSize, mode);
           // Clear all slots in intermediate chunks.
           for (int i = start_chunk + 1; i < end_chunk; i++) {
-            slot_set[i].RemoveRange(0, Page::kPageSize);
+            slot_set[i].RemoveRange(0, Page::kPageSize, mode);
           }
           // Clear slots from the beginning of the last page to end_offset.
-          slot_set[end_chunk].RemoveRange(0, offset_in_end_chunk);
+          slot_set[end_chunk].RemoveRange(0, offset_in_end_chunk, mode);
         }
       }
     }
@@ -201,9 +202,7 @@
   // slots that are not part of live objects anymore. This method must be
   // called after marking, when the whole transitive closure is known and
   // must be called before sweeping when mark bits are still intact.
-  static void ClearInvalidSlots(Heap* heap);
-
-  static void VerifyValidSlots(Heap* heap);
+  static void ClearInvalidTypedSlots(Heap* heap, MemoryChunk* chunk);
 
  private:
   static SlotSet* GetSlotSet(MemoryChunk* chunk) {
diff --git a/src/heap/scavenge-job.h b/src/heap/scavenge-job.h
index fadfccd..f7fbfc1 100644
--- a/src/heap/scavenge-job.h
+++ b/src/heap/scavenge-job.h
@@ -6,6 +6,7 @@
 #define V8_HEAP_SCAVENGE_JOB_H_
 
 #include "src/cancelable-task.h"
+#include "src/globals.h"
 #include "src/heap/gc-tracer.h"
 
 namespace v8 {
@@ -16,7 +17,7 @@
 
 
 // This class posts idle tasks and performs scavenges in the idle tasks.
-class ScavengeJob {
+class V8_EXPORT_PRIVATE ScavengeJob {
  public:
   class IdleTask : public CancelableIdleTask {
    public:
diff --git a/src/heap/scavenger-inl.h b/src/heap/scavenger-inl.h
index 9671f36..4cc215a 100644
--- a/src/heap/scavenger-inl.h
+++ b/src/heap/scavenger-inl.h
@@ -62,10 +62,8 @@
 }
 
 // static
-template <PromotionMode promotion_mode>
-void StaticScavengeVisitor<promotion_mode>::VisitPointer(Heap* heap,
-                                                         HeapObject* obj,
-                                                         Object** p) {
+void StaticScavengeVisitor::VisitPointer(Heap* heap, HeapObject* obj,
+                                         Object** p) {
   Object* object = *p;
   if (!heap->InNewSpace(object)) return;
   Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
diff --git a/src/heap/scavenger.cc b/src/heap/scavenger.cc
index 59d0430..cad0e8a 100644
--- a/src/heap/scavenger.cc
+++ b/src/heap/scavenger.cc
@@ -22,7 +22,7 @@
 
 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
 
-template <MarksHandling marks_handling, PromotionMode promotion_mode,
+template <MarksHandling marks_handling,
           LoggingAndProfiling logging_and_profiling_mode>
 class ScavengingVisitor : public StaticVisitorBase {
  public:
@@ -185,8 +185,12 @@
     if (allocation.To(&target)) {
       MigrateObject(heap, object, target, object_size);
 
-      // Update slot to new target.
-      *slot = target;
+      // Update slot to new target using CAS. A concurrent sweeper thread my
+      // filter the slot concurrently.
+      HeapObject* old = *slot;
+      base::Release_CompareAndSwap(reinterpret_cast<base::AtomicWord*>(slot),
+                                   reinterpret_cast<base::AtomicWord>(old),
+                                   reinterpret_cast<base::AtomicWord>(target));
 
       if (object_contents == POINTER_OBJECT) {
         heap->promotion_queue()->insert(
@@ -206,8 +210,7 @@
     SLOW_DCHECK(object->Size() == object_size);
     Heap* heap = map->GetHeap();
 
-    if (!heap->ShouldBePromoted<promotion_mode>(object->address(),
-                                                object_size)) {
+    if (!heap->ShouldBePromoted(object->address(), object_size)) {
       // A semi-space copy may fail due to fragmentation. In that case, we
       // try to promote the object.
       if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
@@ -219,9 +222,7 @@
                                                   object_size)) {
       return;
     }
-    if (promotion_mode == PROMOTE_MARKED) {
-      FatalProcessOutOfMemory("Scavenger: promoting marked\n");
-    }
+
     // If promotion failed, we try to copy the object to the other semi-space
     if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
 
@@ -358,21 +359,19 @@
   static VisitorDispatchTable<ScavengingCallback> table_;
 };
 
-template <MarksHandling marks_handling, PromotionMode promotion_mode,
+template <MarksHandling marks_handling,
           LoggingAndProfiling logging_and_profiling_mode>
-VisitorDispatchTable<ScavengingCallback> ScavengingVisitor<
-    marks_handling, promotion_mode, logging_and_profiling_mode>::table_;
+VisitorDispatchTable<ScavengingCallback>
+    ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
 
 // static
 void Scavenger::Initialize() {
-  ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
+  ScavengingVisitor<TRANSFER_MARKS,
                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
-  ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
-                    LOGGING_AND_PROFILING_DISABLED>::Initialize();
-  ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
+  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<TRANSFER_MARKS,
                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
-  ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
-                    LOGGING_AND_PROFILING_ENABLED>::Initialize();
+  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
 }
 
 
@@ -397,21 +396,21 @@
   if (!heap()->incremental_marking()->IsMarking()) {
     if (!logging_and_profiling) {
       scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
+          ScavengingVisitor<IGNORE_MARKS,
                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
     } else {
       scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
+          ScavengingVisitor<IGNORE_MARKS,
                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
     }
   } else {
     if (!logging_and_profiling) {
       scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
+          ScavengingVisitor<TRANSFER_MARKS,
                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
     } else {
       scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
+          ScavengingVisitor<TRANSFER_MARKS,
                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
     }
 
diff --git a/src/heap/scavenger.h b/src/heap/scavenger.h
index f2213b8..54fe6ff 100644
--- a/src/heap/scavenger.h
+++ b/src/heap/scavenger.h
@@ -63,9 +63,8 @@
 
 // Helper class for turning the scavenger into an object visitor that is also
 // filtering out non-HeapObjects and objects which do not reside in new space.
-template <PromotionMode promotion_mode>
 class StaticScavengeVisitor
-    : public StaticNewSpaceVisitor<StaticScavengeVisitor<promotion_mode>> {
+    : public StaticNewSpaceVisitor<StaticScavengeVisitor> {
  public:
   static inline void VisitPointer(Heap* heap, HeapObject* object, Object** p);
 };
diff --git a/src/heap/slot-set.h b/src/heap/slot-set.h
index 017667b..da61052 100644
--- a/src/heap/slot-set.h
+++ b/src/heap/slot-set.h
@@ -5,6 +5,7 @@
 #ifndef V8_SLOT_SET_H
 #define V8_SLOT_SET_H
 
+#include <map>
 #include <stack>
 
 #include "src/allocation.h"
@@ -25,7 +26,13 @@
 // Each bucket is a bitmap with a bit corresponding to a single slot offset.
 class SlotSet : public Malloced {
  public:
-  enum IterationMode { PREFREE_EMPTY_BUCKETS, KEEP_EMPTY_BUCKETS };
+  enum EmptyBucketMode {
+    FREE_EMPTY_BUCKETS,     // An empty bucket will be deallocated immediately.
+    PREFREE_EMPTY_BUCKETS,  // An empty bucket will be unlinked from the slot
+                            // set, but deallocated on demand by a sweeper
+                            // thread.
+    KEEP_EMPTY_BUCKETS      // An empty bucket will be kept.
+  };
 
   SlotSet() {
     for (int i = 0; i < kBuckets; i++) {
@@ -76,7 +83,7 @@
 
   // The slot offsets specify a range of slots at addresses:
   // [page_start_ + start_offset ... page_start_ + end_offset).
-  void RemoveRange(int start_offset, int end_offset) {
+  void RemoveRange(int start_offset, int end_offset, EmptyBucketMode mode) {
     CHECK_LE(end_offset, 1 << kPageSizeBits);
     DCHECK_LE(start_offset, end_offset);
     int start_bucket, start_cell, start_bit;
@@ -93,12 +100,10 @@
     int current_cell = start_cell;
     ClearCell(current_bucket, current_cell, ~start_mask);
     current_cell++;
+    base::AtomicValue<uint32_t>* bucket_ptr = bucket[current_bucket].Value();
     if (current_bucket < end_bucket) {
-      if (bucket[current_bucket].Value() != nullptr) {
-        while (current_cell < kCellsPerBucket) {
-          bucket[current_bucket].Value()[current_cell].SetValue(0);
-          current_cell++;
-        }
+      if (bucket_ptr != nullptr) {
+        ClearBucket(bucket_ptr, current_cell, kCellsPerBucket);
       }
       // The rest of the current bucket is cleared.
       // Move on to the next bucket.
@@ -108,17 +113,27 @@
     DCHECK(current_bucket == end_bucket ||
            (current_bucket < end_bucket && current_cell == 0));
     while (current_bucket < end_bucket) {
-      ReleaseBucket(current_bucket);
+      if (mode == PREFREE_EMPTY_BUCKETS) {
+        PreFreeEmptyBucket(current_bucket);
+      } else if (mode == FREE_EMPTY_BUCKETS) {
+        ReleaseBucket(current_bucket);
+      } else {
+        DCHECK(mode == KEEP_EMPTY_BUCKETS);
+        bucket_ptr = bucket[current_bucket].Value();
+        if (bucket_ptr) {
+          ClearBucket(bucket_ptr, 0, kCellsPerBucket);
+        }
+      }
       current_bucket++;
     }
     // All buckets between start_bucket and end_bucket are cleared.
+    bucket_ptr = bucket[current_bucket].Value();
     DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
-    if (current_bucket == kBuckets ||
-        bucket[current_bucket].Value() == nullptr) {
+    if (current_bucket == kBuckets || bucket_ptr == nullptr) {
       return;
     }
     while (current_cell < end_cell) {
-      bucket[current_bucket].Value()[current_cell].SetValue(0);
+      bucket_ptr[current_cell].SetValue(0);
       current_cell++;
     }
     // All cells between start_cell and end_cell are cleared.
@@ -148,19 +163,19 @@
   //    else return REMOVE_SLOT;
   // });
   template <typename Callback>
-  int Iterate(Callback callback, IterationMode mode) {
+  int Iterate(Callback callback, EmptyBucketMode mode) {
     int new_count = 0;
     for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
-      if (bucket[bucket_index].Value() != nullptr) {
+      base::AtomicValue<uint32_t>* current_bucket =
+          bucket[bucket_index].Value();
+      if (current_bucket != nullptr) {
         int in_bucket_count = 0;
-        base::AtomicValue<uint32_t>* current_bucket =
-            bucket[bucket_index].Value();
         int cell_offset = bucket_index * kBitsPerBucket;
         for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
           if (current_bucket[i].Value()) {
             uint32_t cell = current_bucket[i].Value();
             uint32_t old_cell = cell;
-            uint32_t new_cell = cell;
+            uint32_t mask = 0;
             while (cell) {
               int bit_offset = base::bits::CountTrailingZeros32(cell);
               uint32_t bit_mask = 1u << bit_offset;
@@ -168,10 +183,11 @@
               if (callback(page_start_ + slot) == KEEP_SLOT) {
                 ++in_bucket_count;
               } else {
-                new_cell ^= bit_mask;
+                mask |= bit_mask;
               }
               cell ^= bit_mask;
             }
+            uint32_t new_cell = old_cell & ~mask;
             if (old_cell != new_cell) {
               while (!current_bucket[i].TrySetValue(old_cell, new_cell)) {
                 // If TrySetValue fails, the cell must have changed. We just
@@ -180,17 +196,13 @@
                 // method will only be called on the main thread and filtering
                 // threads will only remove slots.
                 old_cell = current_bucket[i].Value();
-                new_cell &= old_cell;
+                new_cell = old_cell & ~mask;
               }
             }
           }
         }
         if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) {
-          base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
-          base::AtomicValue<uint32_t>* bucket_ptr =
-              bucket[bucket_index].Value();
-          to_be_freed_buckets_.push(bucket_ptr);
-          bucket[bucket_index].SetValue(nullptr);
+          PreFreeEmptyBucket(bucket_index);
         }
         new_count += in_bucket_count;
       }
@@ -226,6 +238,26 @@
     return result;
   }
 
+  void ClearBucket(base::AtomicValue<uint32_t>* bucket, int start_cell,
+                   int end_cell) {
+    DCHECK_GE(start_cell, 0);
+    DCHECK_LE(end_cell, kCellsPerBucket);
+    int current_cell = start_cell;
+    while (current_cell < kCellsPerBucket) {
+      bucket[current_cell].SetValue(0);
+      current_cell++;
+    }
+  }
+
+  void PreFreeEmptyBucket(int bucket_index) {
+    base::AtomicValue<uint32_t>* bucket_ptr = bucket[bucket_index].Value();
+    if (bucket_ptr != nullptr) {
+      base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
+      to_be_freed_buckets_.push(bucket_ptr);
+      bucket[bucket_index].SetValue(nullptr);
+    }
+  }
+
   void ReleaseBucket(int bucket_index) {
     DeleteArray<base::AtomicValue<uint32_t>>(bucket[bucket_index].Value());
     bucket[bucket_index].SetValue(nullptr);
@@ -429,6 +461,28 @@
     }
   }
 
+  void RemoveInvaldSlots(std::map<uint32_t, uint32_t>& invalid_ranges) {
+    Chunk* chunk = chunk_.Value();
+    while (chunk != nullptr) {
+      TypedSlot* buffer = chunk->buffer.Value();
+      int count = chunk->count.Value();
+      for (int i = 0; i < count; i++) {
+        uint32_t host_offset = buffer[i].host_offset();
+        std::map<uint32_t, uint32_t>::iterator upper_bound =
+            invalid_ranges.upper_bound(host_offset);
+        if (upper_bound == invalid_ranges.begin()) continue;
+        // upper_bounds points to the invalid range after the given slot. Hence,
+        // we have to go to the previous element.
+        upper_bound--;
+        DCHECK_LE(upper_bound->first, host_offset);
+        if (upper_bound->second > host_offset) {
+          buffer[i].Clear();
+        }
+      }
+      chunk = chunk->next.Value();
+    }
+  }
+
  private:
   static const int kInitialBufferSize = 100;
   static const int kMaxBufferSize = 16 * KB;
diff --git a/src/heap/spaces-inl.h b/src/heap/spaces-inl.h
index 314d22f..f3f9215 100644
--- a/src/heap/spaces-inl.h
+++ b/src/heap/spaces-inl.h
@@ -203,14 +203,15 @@
   return page;
 }
 
-Page* Page::ConvertNewToOld(Page* old_page, PagedSpace* new_owner) {
+Page* Page::ConvertNewToOld(Page* old_page) {
+  OldSpace* old_space = old_page->heap()->old_space();
   DCHECK(old_page->InNewSpace());
-  old_page->set_owner(new_owner);
+  old_page->set_owner(old_space);
   old_page->SetFlags(0, ~0);
-  new_owner->AccountCommitted(old_page->size());
+  old_space->AccountCommitted(old_page->size());
   Page* new_page = Page::Initialize<kDoNotFreeMemory>(
-      old_page->heap(), old_page, NOT_EXECUTABLE, new_owner);
-  new_page->InsertAfter(new_owner->anchor()->prev_page());
+      old_page->heap(), old_page, NOT_EXECUTABLE, old_space);
+  new_page->InsertAfter(old_space->anchor()->prev_page());
   return new_page;
 }
 
@@ -279,6 +280,7 @@
     added += category->available();
     category->Relink();
   });
+  DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
   return added;
 }
 
@@ -597,8 +599,7 @@
   return static_cast<LargePage*>(chunk);
 }
 
-
-intptr_t LargeObjectSpace::Available() {
+size_t LargeObjectSpace::Available() {
   return ObjectSizeFor(heap()->memory_allocator()->Available());
 }
 
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index c2043ed..e0e6d12 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -107,7 +107,7 @@
   }
 
   const size_t reserved_area =
-      kReservedCodeRangePages * base::OS::CommitPageSize();
+      kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
   if (requested < (kMaximalCodeRangeSize - reserved_area))
     requested += reserved_area;
 
@@ -294,8 +294,8 @@
       highest_ever_allocated_(reinterpret_cast<void*>(0)),
       unmapper_(this) {}
 
-bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable,
-                            intptr_t code_range_size) {
+bool MemoryAllocator::SetUp(size_t capacity, size_t capacity_executable,
+                            size_t code_range_size) {
   capacity_ = RoundUp(capacity, Page::kPageSize);
   capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
   DCHECK_GE(capacity_, capacity_executable_);
@@ -304,23 +304,17 @@
   size_executable_ = 0;
 
   code_range_ = new CodeRange(isolate_);
-  if (!code_range_->SetUp(static_cast<size_t>(code_range_size))) return false;
+  if (!code_range_->SetUp(code_range_size)) return false;
 
   return true;
 }
 
 
 void MemoryAllocator::TearDown() {
-  unmapper()->WaitUntilCompleted();
-
-  MemoryChunk* chunk = nullptr;
-  while ((chunk = unmapper()->TryGetPooledMemoryChunkSafe()) != nullptr) {
-    FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize,
-               NOT_EXECUTABLE);
-  }
+  unmapper()->TearDown();
 
   // Check that spaces were torn down before MemoryAllocator.
-  DCHECK_EQ(size_.Value(), 0);
+  DCHECK_EQ(size_.Value(), 0u);
   // TODO(gc) this will be true again when we fix FreeMemory.
   // DCHECK(size_executable_ == 0);
   capacity_ = 0;
@@ -384,6 +378,13 @@
   }
 }
 
+void MemoryAllocator::Unmapper::TearDown() {
+  WaitUntilCompleted();
+  ReconsiderDelayedChunks();
+  CHECK(delayed_regular_chunks_.empty());
+  PerformFreeMemoryOnQueuedChunks();
+}
+
 void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
   std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
   // Move constructed, so the permanent list should be empty.
@@ -395,11 +396,12 @@
 
 bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
   MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
-  // We cannot free memory chunks in new space while the sweeper is running
-  // since a sweeper thread might be stuck right before trying to lock the
-  // corresponding page.
-  return !chunk->InNewSpace() || (mc == nullptr) || !FLAG_concurrent_sweeping ||
-         mc->sweeper().IsSweepingCompleted();
+  // We cannot free a memory chunk in new space while the sweeper is running
+  // because the memory chunk can be in the queue of a sweeper task.
+  // Chunks in old generation are unmapped if they are empty.
+  DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
+  return !chunk->InNewSpace() || mc == nullptr || !FLAG_concurrent_sweeping ||
+         mc->sweeper().IsSweepingCompleted(NEW_SPACE);
 }
 
 bool MemoryAllocator::CommitMemory(Address base, size_t size,
@@ -478,6 +480,7 @@
     // Failed to commit the body. Release the mapping and any partially
     // commited regions inside it.
     reservation.Release();
+    size_.Decrement(reserve_size);
     return NULL;
   }
 
@@ -513,7 +516,6 @@
   chunk->typed_old_to_new_slots_.SetValue(nullptr);
   chunk->typed_old_to_old_slots_ = nullptr;
   chunk->skip_list_ = nullptr;
-  chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
   chunk->progress_bar_ = 0;
   chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
   chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
@@ -525,7 +527,6 @@
   chunk->set_next_chunk(nullptr);
   chunk->set_prev_chunk(nullptr);
   chunk->local_tracker_ = nullptr;
-  chunk->black_area_end_marker_map_ = nullptr;
 
   DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
 
@@ -547,9 +548,9 @@
       IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
   size_t header_size = area_start() - address() - guard_size;
   size_t commit_size =
-      RoundUp(header_size + requested, base::OS::CommitPageSize());
+      RoundUp(header_size + requested, MemoryAllocator::GetCommitPageSize());
   size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
-                                  base::OS::CommitPageSize());
+                                  MemoryAllocator::GetCommitPageSize());
 
   if (commit_size > committed_size) {
     // Commit size should be less or equal than the reserved size.
@@ -617,8 +618,8 @@
 }
 
 void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
-  DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize()));
-  DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize());
+  DCHECK_GE(bytes_to_shrink, static_cast<size_t>(GetCommitPageSize()));
+  DCHECK_EQ(0u, bytes_to_shrink % GetCommitPageSize());
   Address free_start = chunk->area_end_ - bytes_to_shrink;
   // Don't adjust the size of the page. The area is just uncomitted but not
   // released.
@@ -628,22 +629,22 @@
     if (chunk->reservation_.IsReserved())
       chunk->reservation_.Guard(chunk->area_end_);
     else
-      base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize());
+      base::OS::Guard(chunk->area_end_, GetCommitPageSize());
   }
 }
 
-MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
-                                            intptr_t commit_area_size,
+MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
+                                            size_t commit_area_size,
                                             Executability executable,
                                             Space* owner) {
-  DCHECK(commit_area_size <= reserve_area_size);
+  DCHECK_LE(commit_area_size, reserve_area_size);
 
   size_t chunk_size;
   Heap* heap = isolate_->heap();
-  Address base = NULL;
+  Address base = nullptr;
   base::VirtualMemory reservation;
-  Address area_start = NULL;
-  Address area_end = NULL;
+  Address area_start = nullptr;
+  Address area_end = nullptr;
 
   //
   // MemoryChunk layout:
@@ -677,7 +678,7 @@
 
   if (executable == EXECUTABLE) {
     chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
-                         base::OS::CommitPageSize()) +
+                         GetCommitPageSize()) +
                  CodePageGuardSize();
 
     // Check executable memory limit.
@@ -689,7 +690,7 @@
 
     // Size of header (not executable) plus area (executable).
     size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
-                                 base::OS::CommitPageSize());
+                                 GetCommitPageSize());
     // Allocate executable memory either from code range or from the
     // OS.
 #ifdef V8_TARGET_ARCH_MIPS64
@@ -725,10 +726,10 @@
     area_end = area_start + commit_area_size;
   } else {
     chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
-                         base::OS::CommitPageSize());
+                         GetCommitPageSize());
     size_t commit_size =
         RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
-                base::OS::CommitPageSize());
+                GetCommitPageSize());
     base =
         AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
                               executable, &reservation);
@@ -777,6 +778,14 @@
   available_in_free_list_ = 0;
 }
 
+size_t Page::AvailableInFreeList() {
+  size_t sum = 0;
+  ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
+    sum += category->available();
+  });
+  return sum;
+}
+
 size_t Page::ShrinkToHighWaterMark() {
   // Shrink pages to high water mark. The water mark points either to a filler
   // or the area_end.
@@ -805,7 +814,7 @@
 
   size_t unused = RoundDown(
       static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
-      base::OS::CommitPageSize());
+      MemoryAllocator::GetCommitPageSize());
   if (unused > 0) {
     if (FLAG_trace_gc_verbose) {
       PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
@@ -914,11 +923,11 @@
     MemoryChunk* chunk);
 
 template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
-Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
+Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
                                     Executability executable) {
   MemoryChunk* chunk = nullptr;
   if (alloc_mode == kPooled) {
-    DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
+    DCHECK_EQ(size, static_cast<size_t>(MemoryChunk::kAllocatableMemory));
     DCHECK_EQ(executable, NOT_EXECUTABLE);
     chunk = AllocatePagePooled(owner);
   }
@@ -931,15 +940,15 @@
 
 template Page*
 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
-    intptr_t size, PagedSpace* owner, Executability executable);
+    size_t size, PagedSpace* owner, Executability executable);
 template Page*
 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
-    intptr_t size, SemiSpace* owner, Executability executable);
+    size_t size, SemiSpace* owner, Executability executable);
 template Page*
 MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
-    intptr_t size, SemiSpace* owner, Executability executable);
+    size_t size, SemiSpace* owner, Executability executable);
 
-LargePage* MemoryAllocator::AllocateLargePage(intptr_t size,
+LargePage* MemoryAllocator::AllocateLargePage(size_t size,
                                               LargeObjectSpace* owner,
                                               Executability executable) {
   MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
@@ -1000,30 +1009,35 @@
 }
 #endif
 
-
-int MemoryAllocator::CodePageGuardStartOffset() {
+size_t MemoryAllocator::CodePageGuardStartOffset() {
   // We are guarding code pages: the first OS page after the header
   // will be protected as non-writable.
-  return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
+  return RoundUp(Page::kObjectStartOffset, GetCommitPageSize());
 }
 
-
-int MemoryAllocator::CodePageGuardSize() {
-  return static_cast<int>(base::OS::CommitPageSize());
+size_t MemoryAllocator::CodePageGuardSize() {
+  return static_cast<int>(GetCommitPageSize());
 }
 
-
-int MemoryAllocator::CodePageAreaStartOffset() {
+size_t MemoryAllocator::CodePageAreaStartOffset() {
   // We are guarding code pages: the first OS page after the header
   // will be protected as non-writable.
   return CodePageGuardStartOffset() + CodePageGuardSize();
 }
 
-
-int MemoryAllocator::CodePageAreaEndOffset() {
+size_t MemoryAllocator::CodePageAreaEndOffset() {
   // We are guarding code pages: the last OS page will be protected as
   // non-writable.
-  return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
+  return Page::kPageSize - static_cast<int>(GetCommitPageSize());
+}
+
+intptr_t MemoryAllocator::GetCommitPageSize() {
+  if (FLAG_v8_os_page_size != 0) {
+    DCHECK(base::bits::IsPowerOfTwo32(FLAG_v8_os_page_size));
+    return FLAG_v8_os_page_size * KB;
+  } else {
+    return base::OS::CommitPageSize();
+  }
 }
 
 
@@ -1250,6 +1264,7 @@
     p->set_owner(this);
     p->InsertAfter(anchor_.prev_page());
     RelinkFreeListCategories(p);
+    DCHECK_EQ(p->AvailableInFreeList(), p->available_in_free_list());
   }
 }
 
@@ -1277,7 +1292,7 @@
   // Note: this function can only be called on iterable spaces.
   DCHECK(!heap()->mark_compact_collector()->in_use());
 
-  if (!Contains(addr)) return Smi::FromInt(0);  // Signaling not found.
+  if (!Contains(addr)) return Smi::kZero;  // Signaling not found.
 
   Page* p = Page::FromAddress(addr);
   HeapObjectIterator it(p);
@@ -1288,7 +1303,7 @@
   }
 
   UNREACHABLE();
-  return Smi::FromInt(0);
+  return Smi::kZero;
 }
 
 void PagedSpace::ShrinkImmortalImmovablePages() {
@@ -1378,12 +1393,6 @@
 
   if (heap()->incremental_marking()->black_allocation()) {
     Page* page = Page::FromAllocationAreaAddress(current_top);
-    // We have to remember the end of the current black allocation area if
-    // something was allocated in the current bump pointer range.
-    if (allocation_info_.original_top() != current_top) {
-      Address end_black_area = current_top - kPointerSize;
-      page->AddBlackAreaEndMarker(end_black_area);
-    }
 
     // Clear the bits in the unused black area.
     if (current_top != current_limit) {
@@ -1394,7 +1403,8 @@
   }
 
   SetTopAndLimit(NULL, NULL);
-  Free(current_top, static_cast<int>(current_limit - current_top));
+  DCHECK_GE(current_limit, current_top);
+  Free(current_top, current_limit - current_top);
 }
 
 void PagedSpace::IncreaseCapacity(size_t bytes) {
@@ -1408,8 +1418,6 @@
   free_list_.EvictFreeListItems(page);
   DCHECK(!free_list_.ContainsPageFreeListItems(page));
 
-  page->ReleaseBlackAreaEndMarkerMap();
-
   if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
     allocation_info_.Reset(nullptr, nullptr);
   }
@@ -1481,10 +1489,11 @@
 // -----------------------------------------------------------------------------
 // NewSpace implementation
 
-bool NewSpace::SetUp(int initial_semispace_capacity,
-                     int maximum_semispace_capacity) {
+bool NewSpace::SetUp(size_t initial_semispace_capacity,
+                     size_t maximum_semispace_capacity) {
   DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
-  DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
+  DCHECK(base::bits::IsPowerOfTwo32(
+      static_cast<uint32_t>(maximum_semispace_capacity)));
 
   to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
   from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
@@ -1529,9 +1538,9 @@
 void NewSpace::Grow() {
   // Double the semispace size but only up to maximum capacity.
   DCHECK(TotalCapacity() < MaximumCapacity());
-  int new_capacity =
+  size_t new_capacity =
       Min(MaximumCapacity(),
-          FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity()));
+          static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
   if (to_space_.GrowTo(new_capacity)) {
     // Only grow from space if we managed to grow to-space.
     if (!from_space_.GrowTo(new_capacity)) {
@@ -1549,8 +1558,8 @@
 
 
 void NewSpace::Shrink() {
-  int new_capacity = Max(InitialTotalCapacity(), 2 * static_cast<int>(Size()));
-  int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
+  size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
+  size_t rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
   if (rounded_new_capacity < TotalCapacity() &&
       to_space_.ShrinkTo(rounded_new_capacity)) {
     // Only shrink from-space if we managed to shrink to-space.
@@ -1577,7 +1586,8 @@
 
 bool SemiSpace::EnsureCurrentCapacity() {
   if (is_committed()) {
-    const int expected_pages = current_capacity_ / Page::kPageSize;
+    const int expected_pages =
+        static_cast<int>(current_capacity_ / Page::kPageSize);
     int actual_pages = 0;
     Page* current_page = anchor()->next_page();
     while (current_page != anchor()) {
@@ -1604,7 +1614,7 @@
       current_page->SetFlags(anchor()->prev_page()->GetFlags(),
                              Page::kCopyAllFlags);
       heap()->CreateFillerObjectAt(current_page->area_start(),
-                                   current_page->area_size(),
+                                   static_cast<int>(current_page->area_size()),
                                    ClearRecordedSlots::kNo);
     }
   }
@@ -1878,8 +1888,8 @@
 // -----------------------------------------------------------------------------
 // SemiSpace implementation
 
-void SemiSpace::SetUp(int initial_capacity, int maximum_capacity) {
-  DCHECK_GE(maximum_capacity, Page::kPageSize);
+void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
+  DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
   minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
   current_capacity_ = minimum_capacity_;
   maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
@@ -1902,7 +1912,7 @@
 bool SemiSpace::Commit() {
   DCHECK(!is_committed());
   Page* current = anchor();
-  const int num_pages = current_capacity_ / Page::kPageSize;
+  const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
   for (int pages_added = 0; pages_added < num_pages; pages_added++) {
     Page* new_page =
         heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
@@ -1948,17 +1958,16 @@
   return size;
 }
 
-
-bool SemiSpace::GrowTo(int new_capacity) {
+bool SemiSpace::GrowTo(size_t new_capacity) {
   if (!is_committed()) {
     if (!Commit()) return false;
   }
-  DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
+  DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
   DCHECK_LE(new_capacity, maximum_capacity_);
   DCHECK_GT(new_capacity, current_capacity_);
-  const int delta = new_capacity - current_capacity_;
+  const size_t delta = new_capacity - current_capacity_;
   DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
-  const int delta_pages = delta / Page::kPageSize;
+  const int delta_pages = static_cast<int>(delta / Page::kPageSize);
   Page* last_page = anchor()->prev_page();
   DCHECK_NE(last_page, anchor());
   for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
@@ -1993,14 +2002,14 @@
   }
 }
 
-bool SemiSpace::ShrinkTo(int new_capacity) {
-  DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
+bool SemiSpace::ShrinkTo(size_t new_capacity) {
+  DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
   DCHECK_GE(new_capacity, minimum_capacity_);
   DCHECK_LT(new_capacity, current_capacity_);
   if (is_committed()) {
-    const int delta = current_capacity_ - new_capacity;
+    const size_t delta = current_capacity_ - new_capacity;
     DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
-    int delta_pages = delta / Page::kPageSize;
+    int delta_pages = static_cast<int>(delta / Page::kPageSize);
     Page* new_last_page;
     Page* last_page;
     while (delta_pages > 0) {
@@ -2343,7 +2352,7 @@
   available_ = 0;
 }
 
-FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
+FreeSpace* FreeListCategory::PickNodeFromList(size_t* node_size) {
   DCHECK(page()->CanAllocate());
 
   FreeSpace* node = top();
@@ -2354,8 +2363,8 @@
   return node;
 }
 
-FreeSpace* FreeListCategory::TryPickNodeFromList(int minimum_size,
-                                                 int* node_size) {
+FreeSpace* FreeListCategory::TryPickNodeFromList(size_t minimum_size,
+                                                 size_t* node_size) {
   DCHECK(page()->CanAllocate());
 
   FreeSpace* node = PickNodeFromList(node_size);
@@ -2367,15 +2376,16 @@
   return node;
 }
 
-FreeSpace* FreeListCategory::SearchForNodeInList(int minimum_size,
-                                                 int* node_size) {
+FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
+                                                 size_t* node_size) {
   DCHECK(page()->CanAllocate());
 
   FreeSpace* prev_non_evac_node = nullptr;
   for (FreeSpace* cur_node = top(); cur_node != nullptr;
        cur_node = cur_node->next()) {
-    int size = cur_node->size();
+    size_t size = cur_node->size();
     if (size >= minimum_size) {
+      DCHECK_GE(available_, size);
       available_ -= size;
       if (cur_node == top()) {
         set_top(cur_node->next());
@@ -2392,7 +2402,7 @@
   return nullptr;
 }
 
-bool FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes,
+bool FreeListCategory::Free(FreeSpace* free_space, size_t size_in_bytes,
                             FreeMode mode) {
   if (!page()->CanAllocate()) return false;
 
@@ -2425,7 +2435,7 @@
 }
 
 void FreeListCategory::Invalidate() {
-  page()->add_available_in_free_list(-available());
+  page()->remove_available_in_free_list(available());
   Reset();
   type_ = kInvalidCategory;
 }
@@ -2447,10 +2457,10 @@
   ResetStats();
 }
 
-int FreeList::Free(Address start, int size_in_bytes, FreeMode mode) {
+size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
   if (size_in_bytes == 0) return 0;
 
-  owner()->heap()->CreateFillerObjectAt(start, size_in_bytes,
+  owner()->heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
                                         ClearRecordedSlots::kNo);
 
   Page* page = Page::FromAddress(start);
@@ -2469,10 +2479,11 @@
   if (page->free_list_category(type)->Free(free_space, size_in_bytes, mode)) {
     page->add_available_in_free_list(size_in_bytes);
   }
+  DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
   return 0;
 }
 
-FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) {
+FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t* node_size) {
   FreeListCategoryIterator it(this, type);
   FreeSpace* node = nullptr;
   while (it.HasNext()) {
@@ -2480,7 +2491,7 @@
     node = current->PickNodeFromList(node_size);
     if (node != nullptr) {
       Page::FromAddress(node->address())
-          ->add_available_in_free_list(-(*node_size));
+          ->remove_available_in_free_list(*node_size);
       DCHECK(IsVeryLong() || Available() == SumFreeLists());
       return node;
     }
@@ -2489,21 +2500,22 @@
   return node;
 }
 
-FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, int* node_size,
-                                   int minimum_size) {
+FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
+                                   size_t minimum_size) {
   if (categories_[type] == nullptr) return nullptr;
   FreeSpace* node =
       categories_[type]->TryPickNodeFromList(minimum_size, node_size);
   if (node != nullptr) {
     Page::FromAddress(node->address())
-        ->add_available_in_free_list(-(*node_size));
+        ->remove_available_in_free_list(*node_size);
     DCHECK(IsVeryLong() || Available() == SumFreeLists());
   }
   return node;
 }
 
 FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
-                                         int* node_size, int minimum_size) {
+                                         size_t* node_size,
+                                         size_t minimum_size) {
   FreeListCategoryIterator it(this, type);
   FreeSpace* node = nullptr;
   while (it.HasNext()) {
@@ -2511,7 +2523,7 @@
     node = current->SearchForNodeInList(minimum_size, node_size);
     if (node != nullptr) {
       Page::FromAddress(node->address())
-          ->add_available_in_free_list(-(*node_size));
+          ->remove_available_in_free_list(*node_size);
       DCHECK(IsVeryLong() || Available() == SumFreeLists());
       return node;
     }
@@ -2522,7 +2534,7 @@
   return node;
 }
 
-FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
+FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
   FreeSpace* node = nullptr;
 
   // First try the allocation fast path: try to allocate the minimum element
@@ -2559,12 +2571,19 @@
 // allocation space has been set up with the top and limit of the space.  If
 // the allocation fails then NULL is returned, and the caller can perform a GC
 // or allocate a new page before retrying.
-HeapObject* FreeList::Allocate(int size_in_bytes) {
-  DCHECK(0 < size_in_bytes);
+HeapObject* FreeList::Allocate(size_t size_in_bytes) {
   DCHECK(size_in_bytes <= kMaxBlockSize);
   DCHECK(IsAligned(size_in_bytes, kPointerSize));
+  DCHECK_LE(owner_->top(), owner_->limit());
+#ifdef DEBUG
+  if (owner_->top() != owner_->limit()) {
+    DCHECK_EQ(Page::FromAddress(owner_->top()),
+              Page::FromAddress(owner_->limit() - 1));
+  }
+#endif
   // Don't free list allocate if there is linear space available.
-  DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
+  DCHECK_LT(static_cast<size_t>(owner_->limit() - owner_->top()),
+            size_in_bytes);
 
   // Mark the old linear allocation area with a free space map so it can be
   // skipped when scanning the heap.  This also puts it back in the free list
@@ -2574,15 +2593,15 @@
   owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
       Heap::kNoGCFlags, kNoGCCallbackFlags);
 
-  int new_node_size = 0;
+  size_t new_node_size = 0;
   FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
   if (new_node == nullptr) return nullptr;
 
-  int bytes_left = new_node_size - size_in_bytes;
-  DCHECK(bytes_left >= 0);
+  DCHECK_GE(new_node_size, size_in_bytes);
+  size_t bytes_left = new_node_size - size_in_bytes;
 
 #ifdef DEBUG
-  for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
+  for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
     reinterpret_cast<Object**>(new_node->address())[i] =
         Smi::FromInt(kCodeZapValue);
   }
@@ -2593,11 +2612,11 @@
   // candidate.
   DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
 
-  const int kThreshold = IncrementalMarking::kAllocatedThreshold;
+  const size_t kThreshold = IncrementalMarking::kAllocatedThreshold;
 
   // Memory in the linear allocation area is counted as allocated.  We may free
   // a little of this again immediately - see below.
-  owner_->Allocate(new_node_size);
+  owner_->Allocate(static_cast<int>(new_node_size));
 
   if (owner_->heap()->inline_allocation_disabled()) {
     // Keep the linear allocation area empty if requested to do so, just
@@ -2608,17 +2627,17 @@
   } else if (bytes_left > kThreshold &&
              owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
              FLAG_incremental_marking) {
-    int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
+    size_t linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
     // We don't want to give too large linear areas to the allocator while
     // incremental marking is going on, because we won't check again whether
     // we want to do another increment until the linear area is used up.
+    DCHECK_GE(new_node_size, size_in_bytes + linear_size);
     owner_->Free(new_node->address() + size_in_bytes + linear_size,
                  new_node_size - size_in_bytes - linear_size);
     owner_->SetAllocationInfo(
         new_node->address() + size_in_bytes,
         new_node->address() + size_in_bytes + linear_size);
   } else {
-    DCHECK(bytes_left >= 0);
     // Normally we give the rest of the node to the allocator as its new
     // linear allocation area.
     owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
@@ -2628,8 +2647,8 @@
   return new_node;
 }
 
-intptr_t FreeList::EvictFreeListItems(Page* page) {
-  intptr_t sum = 0;
+size_t FreeList::EvictFreeListItems(Page* page) {
+  size_t sum = 0;
   page->ForAllFreeListCategories(
       [this, &sum, page](FreeListCategory* category) {
         DCHECK_EQ(this, category->owner());
@@ -2703,8 +2722,8 @@
 
 
 #ifdef DEBUG
-intptr_t FreeListCategory::SumFreeList() {
-  intptr_t sum = 0;
+size_t FreeListCategory::SumFreeList() {
+  size_t sum = 0;
   FreeSpace* cur = top();
   while (cur != NULL) {
     DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
@@ -2741,8 +2760,8 @@
 // This can take a very long time because it is linear in the number of entries
 // on the free list, so it should not be called if FreeListLength returns
 // kVeryLongFreeList.
-intptr_t FreeList::SumFreeLists() {
-  intptr_t sum = 0;
+size_t FreeList::SumFreeLists() {
+  size_t sum = 0;
   ForAllFreeListCategories(
       [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
   return sum;
@@ -2762,13 +2781,10 @@
   free_list_.Reset();
 }
 
-
-intptr_t PagedSpace::SizeOfObjects() {
-  const intptr_t size = Size() - (limit() - top());
+size_t PagedSpace::SizeOfObjects() {
   CHECK_GE(limit(), top());
-  CHECK_GE(size, 0);
-  USE(size);
-  return size;
+  DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
+  return Size() - (limit() - top());
 }
 
 
@@ -2781,24 +2797,12 @@
   // Each page may have a small free space that is not tracked by a free list.
   // Update the maps for those free space objects.
   for (Page* page : *this) {
-    int size = static_cast<int>(page->wasted_memory());
+    size_t size = page->wasted_memory();
     if (size == 0) continue;
+    DCHECK_GE(static_cast<size_t>(Page::kPageSize), size);
     Address address = page->OffsetToAddress(Page::kPageSize - size);
-    heap()->CreateFillerObjectAt(address, size, ClearRecordedSlots::kNo);
-  }
-}
-
-
-void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
-  if (allocation_info_.top() >= allocation_info_.limit()) return;
-
-  if (!Page::FromAllocationAreaAddress(allocation_info_.top())->CanAllocate()) {
-    // Create filler object to keep page iterable if it was iterable.
-    int remaining =
-        static_cast<int>(allocation_info_.limit() - allocation_info_.top());
-    heap()->CreateFillerObjectAt(allocation_info_.top(), remaining,
+    heap()->CreateFillerObjectAt(address, static_cast<int>(size),
                                  ClearRecordedSlots::kNo);
-    allocation_info_.Reset(nullptr, nullptr);
   }
 }
 
@@ -2826,8 +2830,8 @@
   return nullptr;
 }
 
-
 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
+  DCHECK_GE(size_in_bytes, 0);
   const int kMaxPagesToSweep = 1;
 
   // Allocation in this space has failed.
@@ -2840,7 +2844,8 @@
     RefillFreeList();
 
     // Retry the free list allocation.
-    HeapObject* object = free_list_.Allocate(size_in_bytes);
+    HeapObject* object =
+        free_list_.Allocate(static_cast<size_t>(size_in_bytes));
     if (object != NULL) return object;
 
     // If sweeping is still in progress try to sweep pages on the main thread.
@@ -2848,15 +2853,15 @@
         identity(), size_in_bytes, kMaxPagesToSweep);
     RefillFreeList();
     if (max_freed >= size_in_bytes) {
-      object = free_list_.Allocate(size_in_bytes);
+      object = free_list_.Allocate(static_cast<size_t>(size_in_bytes));
       if (object != nullptr) return object;
     }
   }
 
-  if (heap()->ShouldExpandOldGenerationOnAllocationFailure() && Expand()) {
+  if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
     DCHECK((CountTotalPages() > 1) ||
-           (size_in_bytes <= free_list_.Available()));
-    return free_list_.Allocate(size_in_bytes);
+           (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
+    return free_list_.Allocate(static_cast<size_t>(size_in_bytes));
   }
 
   // If sweeper threads are active, wait for them at that point and steal
@@ -2897,7 +2902,7 @@
     return 0;
   }
   size_t used_size = RoundUp((object->address() - address()) + object->Size(),
-                             base::OS::CommitPageSize());
+                             MemoryAllocator::GetCommitPageSize());
   if (used_size < CommittedPhysicalMemory()) {
     return address() + used_size;
   }
@@ -2905,8 +2910,10 @@
 }
 
 void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
-  RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end());
-  RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end());
+  RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
+                                         SlotSet::FREE_EMPTY_BUCKETS);
+  RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
+                                         SlotSet::FREE_EMPTY_BUCKETS);
   RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
   RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
 }
@@ -2967,14 +2974,15 @@
                                                Executability executable) {
   // Check if we want to force a GC before growing the old space further.
   // If so, fail the allocation.
-  if (!heap()->CanExpandOldGeneration(object_size)) {
+  if (!heap()->CanExpandOldGeneration(object_size) ||
+      !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
     return AllocationResult::Retry(identity());
   }
 
   LargePage* page = heap()->memory_allocator()->AllocateLargePage(
       object_size, this, executable);
   if (page == NULL) return AllocationResult::Retry(identity());
-  DCHECK(page->area_size() >= object_size);
+  DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
 
   size_ += static_cast<int>(page->size());
   AccountCommitted(page->size());
@@ -2993,7 +3001,7 @@
     // We only need to do this in debug builds or if verify_heap is on.
     reinterpret_cast<Object**>(object->address())[0] =
         heap()->fixed_array_map();
-    reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
+    reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
   }
 
   heap()->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
@@ -3022,7 +3030,7 @@
   if (page != NULL) {
     return page->GetObject();
   }
-  return Smi::FromInt(0);  // Signaling not found.
+  return Smi::kZero;  // Signaling not found.
 }
 
 
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index 732ba7e..f5701ad 100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -135,7 +135,8 @@
 class FreeListCategory {
  public:
   static const int kSize = kIntSize +      // FreeListCategoryType type_
-                           kIntSize +      // int available_
+                           kIntSize +      // padding for type_
+                           kSizetSize +    // size_t available_
                            kPointerSize +  // FreeSpace* top_
                            kPointerSize +  // FreeListCategory* prev_
                            kPointerSize;   // FreeListCategory* next_
@@ -167,28 +168,28 @@
   // category is currently unlinked.
   void Relink();
 
-  bool Free(FreeSpace* node, int size_in_bytes, FreeMode mode);
+  bool Free(FreeSpace* node, size_t size_in_bytes, FreeMode mode);
 
   // Picks a node from the list and stores its size in |node_size|. Returns
   // nullptr if the category is empty.
-  FreeSpace* PickNodeFromList(int* node_size);
+  FreeSpace* PickNodeFromList(size_t* node_size);
 
   // Performs a single try to pick a node of at least |minimum_size| from the
   // category. Stores the actual size in |node_size|. Returns nullptr if no
   // node is found.
-  FreeSpace* TryPickNodeFromList(int minimum_size, int* node_size);
+  FreeSpace* TryPickNodeFromList(size_t minimum_size, size_t* node_size);
 
   // Picks a node of at least |minimum_size| from the category. Stores the
   // actual size in |node_size|. Returns nullptr if no node is found.
-  FreeSpace* SearchForNodeInList(int minimum_size, int* node_size);
+  FreeSpace* SearchForNodeInList(size_t minimum_size, size_t* node_size);
 
   inline FreeList* owner();
   inline bool is_linked();
   bool is_empty() { return top() == nullptr; }
-  int available() const { return available_; }
+  size_t available() const { return available_; }
 
 #ifdef DEBUG
-  intptr_t SumFreeList();
+  size_t SumFreeList();
   int FreeListLength();
 #endif
 
@@ -211,7 +212,7 @@
 
   // |available_|: Total available bytes in all blocks of this free list
   // category.
-  int available_;
+  size_t available_;
 
   // |top_|: Points to the top FreeSpace* in the free list category.
   FreeSpace* top_;
@@ -310,11 +311,6 @@
     kSweepingInProgress,
   };
 
-  // Every n write barrier invocations we go to runtime even though
-  // we could have handled it in generated code.  This lets us check
-  // whether we have hit the limit and should do some more marking.
-  static const int kWriteBarrierCounterGranularity = 500;
-
   static const intptr_t kAlignment =
       (static_cast<uintptr_t>(1) << kPageSizeBits);
 
@@ -324,36 +320,30 @@
 
   static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
 
-  static const size_t kWriteBarrierCounterOffset =
-      kSizeOffset + kPointerSize  // size_t size
-      + kIntptrSize               // Flags flags_
-      + kPointerSize              // Address area_start_
-      + kPointerSize              // Address area_end_
-      + 2 * kPointerSize          // base::VirtualMemory reservation_
-      + kPointerSize              // Address owner_
-      + kPointerSize              // Heap* heap_
-      + kIntSize                  // int progress_bar_
-      + kIntSize                  // int live_bytes_count_
-      + kPointerSize              // SlotSet* old_to_new_slots_;
-      + kPointerSize              // SlotSet* old_to_old_slots_;
-      + kPointerSize              // TypedSlotSet* typed_old_to_new_slots_;
-      + kPointerSize              // TypedSlotSet* typed_old_to_old_slots_;
-      + kPointerSize;             // SkipList* skip_list_;
-
   static const size_t kMinHeaderSize =
-      kWriteBarrierCounterOffset +
-      kIntptrSize         // intptr_t write_barrier_counter_
-      + kPointerSize      // AtomicValue high_water_mark_
-      + kPointerSize      // base::Mutex* mutex_
-      + kPointerSize      // base::AtomicWord concurrent_sweeping_
-      + 2 * kPointerSize  // AtomicNumber free-list statistics
-      + kPointerSize      // AtomicValue next_chunk_
-      + kPointerSize      // AtomicValue prev_chunk_
+      kSizeOffset + kSizetSize  // size_t size
+      + kIntptrSize             // Flags flags_
+      + kPointerSize            // Address area_start_
+      + kPointerSize            // Address area_end_
+      + 2 * kPointerSize        // base::VirtualMemory reservation_
+      + kPointerSize            // Address owner_
+      + kPointerSize            // Heap* heap_
+      + kIntSize                // int progress_bar_
+      + kIntSize                // int live_bytes_count_
+      + kPointerSize            // SlotSet* old_to_new_slots_
+      + kPointerSize            // SlotSet* old_to_old_slots_
+      + kPointerSize            // TypedSlotSet* typed_old_to_new_slots_
+      + kPointerSize            // TypedSlotSet* typed_old_to_old_slots_
+      + kPointerSize            // SkipList* skip_list_
+      + kPointerSize            // AtomicValue high_water_mark_
+      + kPointerSize            // base::Mutex* mutex_
+      + kPointerSize            // base::AtomicWord concurrent_sweeping_
+      + 2 * kSizetSize          // AtomicNumber free-list statistics
+      + kPointerSize            // AtomicValue next_chunk_
+      + kPointerSize            // AtomicValue prev_chunk_
       // FreeListCategory categories_[kNumberOfCategories]
       + FreeListCategory::kSize * kNumberOfCategories +
-      kPointerSize  // LocalArrayBufferTracker* local_tracker_
-      // std::unordered_set<Address>* black_area_end_marker_map_
-      + kPointerSize;
+      kPointerSize;  // LocalArrayBufferTracker* local_tracker_
 
   // We add some more space to the computed header size to amount for missing
   // alignment requirements in our computation.
@@ -421,6 +411,10 @@
     return concurrent_sweeping_;
   }
 
+  bool SweepingDone() {
+    return concurrent_sweeping_state().Value() == kSweepingDone;
+  }
+
   // Manage live byte count, i.e., count of bytes in black objects.
   inline void ResetLiveBytes();
   inline void IncrementLiveBytes(int by);
@@ -436,14 +430,6 @@
     live_byte_count_ = live_bytes;
   }
 
-  int write_barrier_counter() {
-    return static_cast<int>(write_barrier_counter_);
-  }
-
-  void set_write_barrier_counter(int counter) {
-    write_barrier_counter_ = counter;
-  }
-
   size_t size() const { return size_; }
   void set_size(size_t size) { size_ = size; }
 
@@ -465,7 +451,7 @@
 
   V8_EXPORT_PRIVATE void AllocateOldToNewSlots();
   void ReleaseOldToNewSlots();
-  void AllocateOldToOldSlots();
+  V8_EXPORT_PRIVATE void AllocateOldToOldSlots();
   void ReleaseOldToOldSlots();
   void AllocateTypedOldToNewSlots();
   void ReleaseTypedOldToNewSlots();
@@ -476,7 +462,7 @@
 
   Address area_start() { return area_start_; }
   Address area_end() { return area_end_; }
-  int area_size() { return static_cast<int>(area_end() - area_start()); }
+  size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
 
   bool CommitArea(size_t requested);
 
@@ -588,33 +574,6 @@
   void InsertAfter(MemoryChunk* other);
   void Unlink();
 
-  void ReleaseBlackAreaEndMarkerMap() {
-    if (black_area_end_marker_map_) {
-      delete black_area_end_marker_map_;
-      black_area_end_marker_map_ = nullptr;
-    }
-  }
-
-  bool IsBlackAreaEndMarker(Address address) {
-    if (black_area_end_marker_map_) {
-      return black_area_end_marker_map_->find(address) !=
-             black_area_end_marker_map_->end();
-    }
-    return false;
-  }
-
-  void AddBlackAreaEndMarker(Address address) {
-    if (!black_area_end_marker_map_) {
-      black_area_end_marker_map_ = new std::unordered_set<Address>();
-    }
-    auto ret = black_area_end_marker_map_->insert(address);
-    USE(ret);
-    // Check that we inserted a new black area end marker.
-    DCHECK(ret.second);
-  }
-
-  bool HasBlackAreas() { return black_area_end_marker_map_ != nullptr; }
-
  protected:
   static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
                                  Address area_start, Address area_end,
@@ -660,8 +619,6 @@
 
   SkipList* skip_list_;
 
-  intptr_t write_barrier_counter_;
-
   // Assuming the initial allocation on a page is sequential,
   // count highest number of bytes ever allocated on the page.
   base::AtomicValue<intptr_t> high_water_mark_;
@@ -683,9 +640,6 @@
 
   LocalArrayBufferTracker* local_tracker_;
 
-  // Stores the end addresses of black areas.
-  std::unordered_set<Address>* black_area_end_marker_map_;
-
  private:
   void InitializeReservedMemory() { reservation_.Reset(); }
 
@@ -713,7 +667,7 @@
       static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
       static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
 
-  static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner);
+  static inline Page* ConvertNewToOld(Page* old_page);
 
   // Returns the page containing a given address. The address ranges
   // from [page_addr .. page_addr + kPageSize[. This only works if the object
@@ -768,13 +722,10 @@
   }
 
   // Returns the offset of a given address to this page.
-  inline int Offset(Address a) {
-    int offset = static_cast<int>(a - address());
-    return offset;
-  }
+  inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
 
   // Returns the address for a given offset to the this page.
-  Address OffsetToAddress(int offset) {
+  Address OffsetToAddress(size_t offset) {
     DCHECK_PAGE_OFFSET(offset);
     return address() + offset;
   }
@@ -788,15 +739,13 @@
     DCHECK(SweepingDone());
   }
 
-  bool SweepingDone() {
-    return concurrent_sweeping_state().Value() == kSweepingDone;
-  }
-
   void ResetFreeListStatistics();
 
-  int LiveBytesFromFreeList() {
-    return static_cast<int>(area_size() - wasted_memory() -
-                            available_in_free_list());
+  size_t AvailableInFreeList();
+
+  size_t LiveBytesFromFreeList() {
+    DCHECK_GE(area_size(), wasted_memory() + available_in_free_list());
+    return area_size() - wasted_memory() - available_in_free_list();
   }
 
   FreeListCategory* free_list_category(FreeListCategoryType type) {
@@ -805,12 +754,18 @@
 
   bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
 
-  intptr_t wasted_memory() { return wasted_memory_.Value(); }
-  void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); }
-  intptr_t available_in_free_list() { return available_in_free_list_.Value(); }
-  void add_available_in_free_list(intptr_t available) {
+  size_t wasted_memory() { return wasted_memory_.Value(); }
+  void add_wasted_memory(size_t waste) { wasted_memory_.Increment(waste); }
+  size_t available_in_free_list() { return available_in_free_list_.Value(); }
+  void add_available_in_free_list(size_t available) {
+    DCHECK_LE(available, area_size());
     available_in_free_list_.Increment(available);
   }
+  void remove_available_in_free_list(size_t available) {
+    DCHECK_LE(available, area_size());
+    DCHECK_GE(available_in_free_list(), available);
+    available_in_free_list_.Decrement(available);
+  }
 
   size_t ShrinkToHighWaterMark();
 
@@ -914,17 +869,17 @@
   virtual size_t MaximumCommittedMemory() { return max_committed_; }
 
   // Returns allocated size.
-  virtual intptr_t Size() = 0;
+  virtual size_t Size() = 0;
 
   // Returns size of objects. Can differ from the allocated size
   // (e.g. see LargeObjectSpace).
-  virtual intptr_t SizeOfObjects() { return Size(); }
+  virtual size_t SizeOfObjects() { return Size(); }
 
   // Approximate amount of physical memory committed for this space.
   virtual size_t CommittedPhysicalMemory() = 0;
 
   // Return the available bytes without growing.
-  virtual intptr_t Available() = 0;
+  virtual size_t Available() = 0;
 
   virtual int RoundSizeDownToObjectAlignment(int size) {
     if (id_ == CODE_SPACE) {
@@ -973,8 +928,6 @@
 class MemoryChunkValidator {
   // Computed offsets should match the compiler generated ones.
   STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
-  STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
-                offsetof(MemoryChunk, write_barrier_counter_));
 
   // Validate our estimates on the header size.
   STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
@@ -1137,7 +1090,7 @@
 
 // ----------------------------------------------------------------------------
 // A space acquires chunks of memory from the operating system. The memory
-// allocator allocated and deallocates pages for the paged heap spaces and large
+// allocator allocates and deallocates pages for the paged heap spaces and large
 // pages for large object space.
 class MemoryAllocator {
  public:
@@ -1180,6 +1133,7 @@
 
     void FreeQueuedChunks();
     bool WaitUntilCompleted();
+    void TearDown();
 
    private:
     enum ChunkQueueType {
@@ -1237,30 +1191,32 @@
     kPooledAndQueue,
   };
 
-  static int CodePageGuardStartOffset();
+  static size_t CodePageGuardStartOffset();
 
-  static int CodePageGuardSize();
+  static size_t CodePageGuardSize();
 
-  static int CodePageAreaStartOffset();
+  static size_t CodePageAreaStartOffset();
 
-  static int CodePageAreaEndOffset();
+  static size_t CodePageAreaEndOffset();
 
-  static int CodePageAreaSize() {
+  static size_t CodePageAreaSize() {
     return CodePageAreaEndOffset() - CodePageAreaStartOffset();
   }
 
-  static int PageAreaSize(AllocationSpace space) {
+  static size_t PageAreaSize(AllocationSpace space) {
     DCHECK_NE(LO_SPACE, space);
     return (space == CODE_SPACE) ? CodePageAreaSize()
                                  : Page::kAllocatableMemory;
   }
 
+  static intptr_t GetCommitPageSize();
+
   explicit MemoryAllocator(Isolate* isolate);
 
   // Initializes its internal bookkeeping structures.
   // Max capacity of the total space and executable memory limit.
-  bool SetUp(intptr_t max_capacity, intptr_t capacity_executable,
-             intptr_t code_range_size);
+  bool SetUp(size_t max_capacity, size_t capacity_executable,
+             size_t code_range_size);
 
   void TearDown();
 
@@ -1269,9 +1225,9 @@
   // should be tried first.
   template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
             typename SpaceType>
-  Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable);
+  Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
 
-  LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner,
+  LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
                                Executability executable);
 
   template <MemoryAllocator::FreeMode mode = kFull>
@@ -1313,8 +1269,7 @@
   // Returns a MemoryChunk in which the memory region from commit_area_size to
   // reserve_area_size of the chunk area is reserved but not committed, it
   // could be committed later by calling MemoryChunk::CommitArea.
-  MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
-                             intptr_t commit_area_size,
+  MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
                              Executability executable, Space* space);
 
   void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink);
@@ -1690,7 +1645,7 @@
  public:
   // This method returns how much memory can be allocated after freeing
   // maximum_freed memory.
-  static inline int GuaranteedAllocatable(int maximum_freed) {
+  static inline size_t GuaranteedAllocatable(size_t maximum_freed) {
     if (maximum_freed <= kTiniestListMax) {
       // Since we are not iterating over all list entries, we cannot guarantee
       // that we can find the maximum freed block in that free list.
@@ -1715,12 +1670,12 @@
   // was too small. Bookkeeping information will be written to the block, i.e.,
   // its contents will be destroyed. The start address should be word aligned,
   // and the size should be a non-zero multiple of the word size.
-  int Free(Address start, int size_in_bytes, FreeMode mode);
+  size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
 
   // Allocate a block of size {size_in_bytes} from the free list. The block is
   // unitialized. A failure is returned if no block is available. The size
   // should be a non-zero multiple of the word size.
-  MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
+  MUST_USE_RESULT HeapObject* Allocate(size_t size_in_bytes);
 
   // Clear the free list.
   void Reset();
@@ -1732,8 +1687,8 @@
   }
 
   // Return the number of bytes available on the free list.
-  intptr_t Available() {
-    intptr_t available = 0;
+  size_t Available() {
+    size_t available = 0;
     ForAllFreeListCategories([&available](FreeListCategory* category) {
       available += category->available();
     });
@@ -1751,11 +1706,11 @@
   // Used after booting the VM.
   void RepairLists(Heap* heap);
 
-  intptr_t EvictFreeListItems(Page* page);
+  size_t EvictFreeListItems(Page* page);
   bool ContainsPageFreeListItems(Page* page);
 
   PagedSpace* owner() { return owner_; }
-  intptr_t wasted_bytes() { return wasted_bytes_.Value(); }
+  size_t wasted_bytes() { return wasted_bytes_.Value(); }
 
   template <typename Callback>
   void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
@@ -1779,7 +1734,7 @@
   void PrintCategories(FreeListCategoryType type);
 
 #ifdef DEBUG
-  intptr_t SumFreeLists();
+  size_t SumFreeLists();
   bool IsVeryLong();
 #endif
 
@@ -1803,33 +1758,33 @@
   };
 
   // The size range of blocks, in bytes.
-  static const int kMinBlockSize = 3 * kPointerSize;
-  static const int kMaxBlockSize = Page::kAllocatableMemory;
+  static const size_t kMinBlockSize = 3 * kPointerSize;
+  static const size_t kMaxBlockSize = Page::kAllocatableMemory;
 
-  static const int kTiniestListMax = 0xa * kPointerSize;
-  static const int kTinyListMax = 0x1f * kPointerSize;
-  static const int kSmallListMax = 0xff * kPointerSize;
-  static const int kMediumListMax = 0x7ff * kPointerSize;
-  static const int kLargeListMax = 0x3fff * kPointerSize;
-  static const int kTinyAllocationMax = kTiniestListMax;
-  static const int kSmallAllocationMax = kTinyListMax;
-  static const int kMediumAllocationMax = kSmallListMax;
-  static const int kLargeAllocationMax = kMediumListMax;
+  static const size_t kTiniestListMax = 0xa * kPointerSize;
+  static const size_t kTinyListMax = 0x1f * kPointerSize;
+  static const size_t kSmallListMax = 0xff * kPointerSize;
+  static const size_t kMediumListMax = 0x7ff * kPointerSize;
+  static const size_t kLargeListMax = 0x3fff * kPointerSize;
+  static const size_t kTinyAllocationMax = kTiniestListMax;
+  static const size_t kSmallAllocationMax = kTinyListMax;
+  static const size_t kMediumAllocationMax = kSmallListMax;
+  static const size_t kLargeAllocationMax = kMediumListMax;
 
-  FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
+  FreeSpace* FindNodeFor(size_t size_in_bytes, size_t* node_size);
 
   // Walks all available categories for a given |type| and tries to retrieve
   // a node. Returns nullptr if the category is empty.
-  FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size);
+  FreeSpace* FindNodeIn(FreeListCategoryType type, size_t* node_size);
 
   // Tries to retrieve a node from the first category in a given |type|.
   // Returns nullptr if the category is empty.
-  FreeSpace* TryFindNodeIn(FreeListCategoryType type, int* node_size,
-                           int minimum_size);
+  FreeSpace* TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
+                           size_t minimum_size);
 
   // Searches a given |type| for a node of at least |minimum_size|.
-  FreeSpace* SearchForNodeInList(FreeListCategoryType type, int* node_size,
-                                 int minimum_size);
+  FreeSpace* SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
+                                 size_t minimum_size);
 
   FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
     if (size_in_bytes <= kTiniestListMax) {
@@ -1862,7 +1817,7 @@
   FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
 
   PagedSpace* owner_;
-  base::AtomicNumber<intptr_t> wasted_bytes_;
+  base::AtomicNumber<size_t> wasted_bytes_;
   FreeListCategory* categories_[kNumberOfCategories];
 
   friend class FreeListCategory;
@@ -1974,7 +1929,7 @@
   void PrepareForMarkCompact();
 
   // Current capacity without growing (Size() + Available()).
-  intptr_t Capacity() { return accounting_stats_.Capacity(); }
+  size_t Capacity() { return accounting_stats_.Capacity(); }
 
   // Approximate amount of physical memory committed for this space.
   size_t CommittedPhysicalMemory() override;
@@ -1996,21 +1951,21 @@
   // The bytes in the linear allocation area are not included in this total
   // because updating the stats would slow down allocation.  New pages are
   // immediately added to the free list so they show up here.
-  intptr_t Available() override { return free_list_.Available(); }
+  size_t Available() override { return free_list_.Available(); }
 
   // Allocated bytes in this space.  Garbage bytes that were not found due to
   // concurrent sweeping are counted as being allocated!  The bytes in the
   // current linear allocation area (between top and limit) are also counted
   // here.
-  intptr_t Size() override { return accounting_stats_.Size(); }
+  size_t Size() override { return accounting_stats_.Size(); }
 
   // As size, but the bytes in lazily swept pages are estimated and the bytes
   // in the current linear allocation area are not included.
-  intptr_t SizeOfObjects() override;
+  size_t SizeOfObjects() override;
 
   // Wasted bytes in this space.  These are just the bytes that were thrown away
   // due to being too small to use for allocation.
-  virtual intptr_t Waste() { return free_list_.wasted_bytes(); }
+  virtual size_t Waste() { return free_list_.wasted_bytes(); }
 
   // Returns the allocation pointer in this space.
   Address top() { return allocation_info_.top(); }
@@ -2049,14 +2004,16 @@
   // the free list or accounted as waste.
   // If add_to_freelist is false then just accounting stats are updated and
   // no attempt to add area to free list is made.
-  int Free(Address start, int size_in_bytes) {
-    int wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
+  size_t Free(Address start, size_t size_in_bytes) {
+    size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
     accounting_stats_.DeallocateBytes(size_in_bytes);
+    DCHECK_GE(size_in_bytes, wasted);
     return size_in_bytes - wasted;
   }
 
-  int UnaccountedFree(Address start, int size_in_bytes) {
-    int wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
+  size_t UnaccountedFree(Address start, size_t size_in_bytes) {
+    size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
+    DCHECK_GE(size_in_bytes, wasted);
     return size_in_bytes - wasted;
   }
 
@@ -2112,15 +2069,13 @@
   Page* FirstPage() { return anchor_.next_page(); }
   Page* LastPage() { return anchor_.prev_page(); }
 
-  void EvictEvacuationCandidatesFromLinearAllocationArea();
-
   bool CanExpand(size_t size);
 
   // Returns the number of total pages in this space.
   int CountTotalPages();
 
   // Return size of allocatable area on a page in this space.
-  inline int AreaSize() { return area_size_; }
+  inline int AreaSize() { return static_cast<int>(area_size_); }
 
   virtual bool is_local() { return false; }
 
@@ -2183,7 +2138,7 @@
   // Slow path of AllocateRaw.  This function is space-dependent.
   MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
 
-  int area_size_;
+  size_t area_size_;
 
   // Accounting information for this space.
   AllocationStats accounting_stats_;
@@ -2237,7 +2192,7 @@
   inline bool Contains(Object* o);
   inline bool ContainsSlow(Address a);
 
-  void SetUp(int initial_capacity, int maximum_capacity);
+  void SetUp(size_t initial_capacity, size_t maximum_capacity);
   void TearDown();
   bool HasBeenSetUp() { return maximum_capacity_ != 0; }
 
@@ -2247,12 +2202,12 @@
 
   // Grow the semispace to the new capacity.  The new capacity requested must
   // be larger than the current capacity and less than the maximum capacity.
-  bool GrowTo(int new_capacity);
+  bool GrowTo(size_t new_capacity);
 
   // Shrinks the semispace to the new capacity.  The new capacity requested
   // must be more than the amount of used memory in the semispace and less
   // than the current capacity.
-  bool ShrinkTo(int new_capacity);
+  bool ShrinkTo(size_t new_capacity);
 
   bool EnsureCurrentCapacity();
 
@@ -2300,13 +2255,13 @@
   void set_age_mark(Address mark);
 
   // Returns the current capacity of the semispace.
-  int current_capacity() { return current_capacity_; }
+  size_t current_capacity() { return current_capacity_; }
 
   // Returns the maximum capacity of the semispace.
-  int maximum_capacity() { return maximum_capacity_; }
+  size_t maximum_capacity() { return maximum_capacity_; }
 
   // Returns the initial capacity of the semispace.
-  int minimum_capacity() { return minimum_capacity_; }
+  size_t minimum_capacity() { return minimum_capacity_; }
 
   SemiSpaceId id() { return id_; }
 
@@ -2316,14 +2271,14 @@
   // If we don't have these here then SemiSpace will be abstract.  However
   // they should never be called:
 
-  intptr_t Size() override {
+  size_t Size() override {
     UNREACHABLE();
     return 0;
   }
 
-  intptr_t SizeOfObjects() override { return Size(); }
+  size_t SizeOfObjects() override { return Size(); }
 
-  intptr_t Available() override {
+  size_t Available() override {
     UNREACHABLE();
     return 0;
   }
@@ -2352,20 +2307,22 @@
   void RewindPages(Page* start, int num_pages);
 
   inline Page* anchor() { return &anchor_; }
-  inline int max_pages() { return current_capacity_ / Page::kPageSize; }
+  inline int max_pages() {
+    return static_cast<int>(current_capacity_ / Page::kPageSize);
+  }
 
   // Copies the flags into the masked positions on all pages in the space.
   void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
 
   // The currently committed space capacity.
-  int current_capacity_;
+  size_t current_capacity_;
 
   // The maximum capacity that can be used by this space. A space cannot grow
   // beyond that size.
-  int maximum_capacity_;
+  size_t maximum_capacity_;
 
   // The minimum capacity for the space. A space cannot shrink below this size.
-  int minimum_capacity_;
+  size_t minimum_capacity_;
 
   // Used to govern object promotion during mark-compact collection.
   Address age_mark_;
@@ -2426,7 +2383,7 @@
   inline bool ContainsSlow(Address a);
   inline bool Contains(Object* o);
 
-  bool SetUp(int initial_semispace_capacity, int max_semispace_capacity);
+  bool SetUp(size_t initial_semispace_capacity, size_t max_semispace_capacity);
 
   // Tears down the space.  Heap memory was not allocated by the space, so it
   // is not deallocated here.
@@ -2448,15 +2405,16 @@
   void Shrink();
 
   // Return the allocated bytes in the active semispace.
-  intptr_t Size() override {
+  size_t Size() override {
+    DCHECK_GE(top(), to_space_.page_low());
     return to_space_.pages_used() * Page::kAllocatableMemory +
-           static_cast<int>(top() - to_space_.page_low());
+           static_cast<size_t>(top() - to_space_.page_low());
   }
 
-  intptr_t SizeOfObjects() override { return Size(); }
+  size_t SizeOfObjects() override { return Size(); }
 
   // Return the allocatable capacity of a semispace.
-  intptr_t Capacity() {
+  size_t Capacity() {
     SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
     return (to_space_.current_capacity() / Page::kPageSize) *
            Page::kAllocatableMemory;
@@ -2464,7 +2422,7 @@
 
   // Return the current size of a semispace, allocatable and non-allocatable
   // memory.
-  intptr_t TotalCapacity() {
+  size_t TotalCapacity() {
     DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
     return to_space_.current_capacity();
   }
@@ -2484,7 +2442,10 @@
   size_t CommittedPhysicalMemory() override;
 
   // Return the available bytes without growing.
-  intptr_t Available() override { return Capacity() - Size(); }
+  size_t Available() override {
+    DCHECK_GE(Capacity(), Size());
+    return Capacity() - Size();
+  }
 
   size_t AllocatedSinceLastGC() {
     bool seen_age_mark = false;
@@ -2510,17 +2471,18 @@
       // Top was reset at some point, invalidating this metric.
       return 0;
     }
-    intptr_t allocated = age_mark_page->area_end() - age_mark;
+    DCHECK_GE(age_mark_page->area_end(), age_mark);
+    size_t allocated = age_mark_page->area_end() - age_mark;
     DCHECK_EQ(current_page, age_mark_page);
     current_page = age_mark_page->next_page();
     while (current_page != last_page) {
       allocated += Page::kAllocatableMemory;
       current_page = current_page->next_page();
     }
+    DCHECK_GE(top(), current_page->area_start());
     allocated += top() - current_page->area_start();
-    DCHECK_LE(0, allocated);
     DCHECK_LE(allocated, Size());
-    return static_cast<size_t>(allocated);
+    return allocated;
   }
 
   void MovePageFromSpaceToSpace(Page* page) {
@@ -2532,7 +2494,7 @@
   bool Rebalance();
 
   // Return the maximum capacity of a semispace.
-  int MaximumCapacity() {
+  size_t MaximumCapacity() {
     DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
     return to_space_.maximum_capacity();
   }
@@ -2540,7 +2502,7 @@
   bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
 
   // Returns the initial capacity of a semispace.
-  int InitialTotalCapacity() {
+  size_t InitialTotalCapacity() {
     DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
     return to_space_.minimum_capacity();
   }
@@ -2832,7 +2794,7 @@
   // Releases internal resources, frees objects in this space.
   void TearDown();
 
-  static intptr_t ObjectSizeFor(intptr_t chunk_size) {
+  static size_t ObjectSizeFor(size_t chunk_size) {
     if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
     return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
   }
@@ -2843,11 +2805,11 @@
       AllocateRaw(int object_size, Executability executable);
 
   // Available bytes for objects in this space.
-  inline intptr_t Available() override;
+  inline size_t Available() override;
 
-  intptr_t Size() override { return size_; }
+  size_t Size() override { return size_; }
 
-  intptr_t SizeOfObjects() override { return objects_size_; }
+  size_t SizeOfObjects() override { return objects_size_; }
 
   // Approximate amount of physical memory committed for this space.
   size_t CommittedPhysicalMemory() override;
@@ -2905,9 +2867,9 @@
  private:
   // The head of the linked list of large object chunks.
   LargePage* first_page_;
-  intptr_t size_;          // allocated bytes
+  size_t size_;            // allocated bytes
   int page_count_;         // number of chunks
-  intptr_t objects_size_;  // size of objects
+  size_t objects_size_;    // size of objects
   // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
   base::HashMap chunk_map_;
 
diff --git a/src/heap/store-buffer.cc b/src/heap/store-buffer.cc
index a982eb3..974b85e 100644
--- a/src/heap/store-buffer.cc
+++ b/src/heap/store-buffer.cc
@@ -16,63 +16,150 @@
 namespace internal {
 
 StoreBuffer::StoreBuffer(Heap* heap)
-    : heap_(heap),
-      top_(nullptr),
-      start_(nullptr),
-      limit_(nullptr),
-      virtual_memory_(nullptr) {}
+    : heap_(heap), top_(nullptr), current_(0), virtual_memory_(nullptr) {
+  for (int i = 0; i < kStoreBuffers; i++) {
+    start_[i] = nullptr;
+    limit_[i] = nullptr;
+    lazy_top_[i] = nullptr;
+  }
+  task_running_ = false;
+}
 
 void StoreBuffer::SetUp() {
   // Allocate 3x the buffer size, so that we can start the new store buffer
   // aligned to 2x the size.  This lets us use a bit test to detect the end of
   // the area.
-  virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 2);
+  virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
   uintptr_t start_as_int =
       reinterpret_cast<uintptr_t>(virtual_memory_->address());
-  start_ = reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize));
-  limit_ = start_ + (kStoreBufferSize / kPointerSize);
+  start_[0] =
+      reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize));
+  limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize);
+  start_[1] = limit_[0];
+  limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize);
 
-  DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
-  DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
   Address* vm_limit = reinterpret_cast<Address*>(
       reinterpret_cast<char*>(virtual_memory_->address()) +
       virtual_memory_->size());
-  DCHECK(start_ <= vm_limit);
-  DCHECK(limit_ <= vm_limit);
-  USE(vm_limit);
-  DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferMask) == 0);
 
-  if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_),
-                               kStoreBufferSize,
+  USE(vm_limit);
+  for (int i = 0; i < kStoreBuffers; i++) {
+    DCHECK(reinterpret_cast<Address>(start_[i]) >= virtual_memory_->address());
+    DCHECK(reinterpret_cast<Address>(limit_[i]) >= virtual_memory_->address());
+    DCHECK(start_[i] <= vm_limit);
+    DCHECK(limit_[i] <= vm_limit);
+    DCHECK((reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask) == 0);
+  }
+
+  if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_[0]),
+                               kStoreBufferSize * kStoreBuffers,
                                false)) {  // Not executable.
     V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
   }
-  top_ = start_;
+  current_ = 0;
+  top_ = start_[current_];
 }
 
 
 void StoreBuffer::TearDown() {
   delete virtual_memory_;
-  top_ = start_ = limit_ = nullptr;
+  top_ = nullptr;
+  for (int i = 0; i < kStoreBuffers; i++) {
+    start_[i] = nullptr;
+    limit_[i] = nullptr;
+    lazy_top_[i] = nullptr;
+  }
 }
 
 
 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
-  isolate->heap()->store_buffer()->MoveEntriesToRememberedSet();
+  isolate->heap()->store_buffer()->FlipStoreBuffers();
   isolate->counters()->store_buffer_overflows()->Increment();
 }
 
-void StoreBuffer::MoveEntriesToRememberedSet() {
-  if (top_ == start_) return;
-  DCHECK(top_ <= limit_);
-  for (Address* current = start_; current < top_; current++) {
+void StoreBuffer::FlipStoreBuffers() {
+  base::LockGuard<base::Mutex> guard(&mutex_);
+  int other = (current_ + 1) % kStoreBuffers;
+  MoveEntriesToRememberedSet(other);
+  lazy_top_[current_] = top_;
+  current_ = other;
+  top_ = start_[current_];
+
+  if (!task_running_) {
+    task_running_ = true;
+    Task* task = new Task(heap_->isolate(), this);
+    V8::GetCurrentPlatform()->CallOnBackgroundThread(
+        task, v8::Platform::kShortRunningTask);
+  }
+}
+
+void StoreBuffer::MoveEntriesToRememberedSet(int index) {
+  if (!lazy_top_[index]) return;
+  DCHECK_GE(index, 0);
+  DCHECK_LT(index, kStoreBuffers);
+  for (Address* current = start_[index]; current < lazy_top_[index];
+       current++) {
     DCHECK(!heap_->code_space()->Contains(*current));
     Address addr = *current;
     Page* page = Page::FromAnyPointerAddress(heap_, addr);
-    RememberedSet<OLD_TO_NEW>::Insert(page, addr);
+    if (IsDeletionAddress(addr)) {
+      current++;
+      Address end = *current;
+      DCHECK(!IsDeletionAddress(end));
+      addr = UnmarkDeletionAddress(addr);
+      if (end) {
+        RememberedSet<OLD_TO_NEW>::RemoveRange(page, addr, end,
+                                               SlotSet::PREFREE_EMPTY_BUCKETS);
+      } else {
+        RememberedSet<OLD_TO_NEW>::Remove(page, addr);
+      }
+    } else {
+      DCHECK(!IsDeletionAddress(addr));
+      RememberedSet<OLD_TO_NEW>::Insert(page, addr);
+    }
   }
-  top_ = start_;
+  lazy_top_[index] = nullptr;
 }
 
+void StoreBuffer::MoveAllEntriesToRememberedSet() {
+  base::LockGuard<base::Mutex> guard(&mutex_);
+  int other = (current_ + 1) % kStoreBuffers;
+  MoveEntriesToRememberedSet(other);
+  lazy_top_[current_] = top_;
+  MoveEntriesToRememberedSet(current_);
+  top_ = start_[current_];
+}
+
+void StoreBuffer::ConcurrentlyProcessStoreBuffer() {
+  base::LockGuard<base::Mutex> guard(&mutex_);
+  int other = (current_ + 1) % kStoreBuffers;
+  MoveEntriesToRememberedSet(other);
+  task_running_ = false;
+}
+
+void StoreBuffer::DeleteEntry(Address start, Address end) {
+  // Deletions coming from the GC are directly deleted from the remembered
+  // set. Deletions coming from the runtime are added to the store buffer
+  // to allow concurrent processing.
+  if (heap_->gc_state() == Heap::NOT_IN_GC) {
+    if (top_ + sizeof(Address) * 2 > limit_[current_]) {
+      StoreBufferOverflow(heap_->isolate());
+    }
+    *top_ = MarkDeletionAddress(start);
+    top_++;
+    *top_ = end;
+    top_++;
+  } else {
+    // In GC the store buffer has to be empty at any time.
+    DCHECK(Empty());
+    Page* page = Page::FromAddress(start);
+    if (end) {
+      RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
+                                             SlotSet::PREFREE_EMPTY_BUCKETS);
+    } else {
+      RememberedSet<OLD_TO_NEW>::Remove(page, start);
+    }
+  }
+}
 }  // namespace internal
 }  // namespace v8
diff --git a/src/heap/store-buffer.h b/src/heap/store-buffer.h
index 1b3fcb0..09faf4d 100644
--- a/src/heap/store-buffer.h
+++ b/src/heap/store-buffer.h
@@ -8,20 +8,28 @@
 #include "src/allocation.h"
 #include "src/base/logging.h"
 #include "src/base/platform/platform.h"
+#include "src/cancelable-task.h"
 #include "src/globals.h"
+#include "src/heap/remembered-set.h"
 #include "src/heap/slot-set.h"
 
 namespace v8 {
 namespace internal {
 
 // Intermediate buffer that accumulates old-to-new stores from the generated
-// code. On buffer overflow the slots are moved to the remembered set.
+// code. Moreover, it stores invalid old-to-new slots with two entries.
+// The first is a tagged address of the start of the invalid range, the second
+// one is the end address of the invalid range or null if there is just one slot
+// that needs to be removed from the remembered set. On buffer overflow the
+// slots are moved to the remembered set.
 class StoreBuffer {
  public:
   static const int kStoreBufferSize = 1 << (14 + kPointerSizeLog2);
   static const int kStoreBufferMask = kStoreBufferSize - 1;
+  static const int kStoreBuffers = 2;
+  static const intptr_t kDeletionTag = 1;
 
-  static void StoreBufferOverflow(Isolate* isolate);
+  V8_EXPORT_PRIVATE static void StoreBufferOverflow(Isolate* isolate);
 
   explicit StoreBuffer(Heap* heap);
   void SetUp();
@@ -30,17 +38,109 @@
   // Used to add entries from generated code.
   inline Address* top_address() { return reinterpret_cast<Address*>(&top_); }
 
-  void MoveEntriesToRememberedSet();
+  // Moves entries from a specific store buffer to the remembered set. This
+  // method takes a lock.
+  void MoveEntriesToRememberedSet(int index);
+
+  // This method ensures that all used store buffer entries are transfered to
+  // the remembered set.
+  void MoveAllEntriesToRememberedSet();
+
+  inline bool IsDeletionAddress(Address address) const {
+    return reinterpret_cast<intptr_t>(address) & kDeletionTag;
+  }
+
+  inline Address MarkDeletionAddress(Address address) {
+    return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(address) |
+                                     kDeletionTag);
+  }
+
+  inline Address UnmarkDeletionAddress(Address address) {
+    return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(address) &
+                                     ~kDeletionTag);
+  }
+
+  // If we only want to delete a single slot, end should be set to null which
+  // will be written into the second field. When processing the store buffer
+  // the more efficient Remove method will be called in this case.
+  void DeleteEntry(Address start, Address end = nullptr);
+
+  void InsertEntry(Address slot) {
+    // Insertions coming from the GC are directly inserted into the remembered
+    // set. Insertions coming from the runtime are added to the store buffer to
+    // allow concurrent processing.
+    if (heap_->gc_state() == Heap::NOT_IN_GC) {
+      if (top_ + sizeof(Address) > limit_[current_]) {
+        StoreBufferOverflow(heap_->isolate());
+      }
+      *top_ = slot;
+      top_++;
+    } else {
+      // In GC the store buffer has to be empty at any time.
+      DCHECK(Empty());
+      RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+    }
+  }
+
+  // Used by the concurrent processing thread to transfer entries from the
+  // store buffer to the remembered set.
+  void ConcurrentlyProcessStoreBuffer();
+
+  bool Empty() {
+    for (int i = 0; i < kStoreBuffers; i++) {
+      if (lazy_top_[i]) {
+        return false;
+      }
+    }
+    return top_ == start_[current_];
+  }
 
  private:
+  // There are two store buffers. If one store buffer fills up, the main thread
+  // publishes the top pointer of the store buffer that needs processing in its
+  // global lazy_top_ field. After that it start the concurrent processing
+  // thread. The concurrent processing thread uses the pointer in lazy_top_.
+  // It will grab the given mutex and transfer its entries to the remembered
+  // set. If the concurrent thread does not make progress, the main thread will
+  // perform the work.
+  // Important: there is an ordering constrained. The store buffer with the
+  // older entries has to be processed first.
+  class Task : public CancelableTask {
+   public:
+    Task(Isolate* isolate, StoreBuffer* store_buffer)
+        : CancelableTask(isolate), store_buffer_(store_buffer) {}
+    virtual ~Task() {}
+
+   private:
+    void RunInternal() override {
+      store_buffer_->ConcurrentlyProcessStoreBuffer();
+    }
+    StoreBuffer* store_buffer_;
+    DISALLOW_COPY_AND_ASSIGN(Task);
+  };
+
+  void FlipStoreBuffers();
+
   Heap* heap_;
 
   Address* top_;
 
   // The start and the limit of the buffer that contains store slots
-  // added from the generated code.
-  Address* start_;
-  Address* limit_;
+  // added from the generated code. We have two chunks of store buffers.
+  // Whenever one fills up, we notify a concurrent processing thread and
+  // use the other empty one in the meantime.
+  Address* start_[kStoreBuffers];
+  Address* limit_[kStoreBuffers];
+
+  // At most one lazy_top_ pointer is set at any time.
+  Address* lazy_top_[kStoreBuffers];
+  base::Mutex mutex_;
+
+  // We only want to have at most one concurrent processing tas running.
+  bool task_running_;
+
+  // Points to the current buffer in use.
+  int current_;
 
   base::VirtualMemory* virtual_memory_;
 };
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 4d31959..d4de79e 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -2912,8 +2912,9 @@
 
   // Some internal data structures overflow for very large buffers,
   // they must ensure that kMaximalBufferSize is not too large.
-  if ((desc.buffer_size > kMaximalBufferSize) ||
-      (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) {
+  if (desc.buffer_size > kMaximalBufferSize ||
+      static_cast<size_t>(desc.buffer_size) >
+          isolate()->heap()->MaxOldGenerationSize()) {
     V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
   }
 
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index a1dc4b6..79f4125 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -1446,7 +1446,8 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+  void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+                         int id);
 
   // Writes a single byte or word of data in the code stream.  Used for
   // inline tables, e.g., jump-tables.
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index edab277..9b2c51e 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -810,14 +810,10 @@
   __ add(edx, Immediate(2));  // edx was a smi.
 
   // edx: Number of capture registers
-  // Load last_match_info which is still known to be a fast-elements JSObject.
-  // Check that the fourth object is a JSObject.
-  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
-  __ JumpIfSmi(eax, &runtime);
-  __ CmpObjectType(eax, JS_OBJECT_TYPE, ebx);
-  __ j(not_equal, &runtime);
+  // Check that the last match info is a FixedArray.
+  __ mov(ebx, Operand(esp, kLastMatchInfoOffset));
+  __ JumpIfSmi(ebx, &runtime);
   // Check that the object has fast elements.
-  __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
   __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
   __ cmp(eax, factory->fixed_array_map());
   __ j(not_equal, &runtime);
@@ -825,31 +821,25 @@
   // additional information.
   __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
   __ SmiUntag(eax);
-  __ sub(eax, Immediate(RegExpImpl::kLastMatchOverhead));
+  __ sub(eax, Immediate(RegExpMatchInfo::kLastMatchOverhead));
   __ cmp(edx, eax);
   __ j(greater, &runtime);
 
-  // ebx: last_match_info backing store (FixedArray)
+  // ebx: last_match_info (FixedArray)
   // edx: number of capture registers
   // Store the capture count.
   __ SmiTag(edx);  // Number of capture registers to smi.
-  __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
+  __ mov(FieldOperand(ebx, RegExpMatchInfo::kNumberOfCapturesOffset), edx);
   __ SmiUntag(edx);  // Number of capture registers back from smi.
   // Store last subject and last input.
   __ mov(eax, Operand(esp, kSubjectOffset));
   __ mov(ecx, eax);
-  __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
-  __ RecordWriteField(ebx,
-                      RegExpImpl::kLastSubjectOffset,
-                      eax,
-                      edi,
+  __ mov(FieldOperand(ebx, RegExpMatchInfo::kLastSubjectOffset), eax);
+  __ RecordWriteField(ebx, RegExpMatchInfo::kLastSubjectOffset, eax, edi,
                       kDontSaveFPRegs);
   __ mov(eax, ecx);
-  __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
-  __ RecordWriteField(ebx,
-                      RegExpImpl::kLastInputOffset,
-                      eax,
-                      edi,
+  __ mov(FieldOperand(ebx, RegExpMatchInfo::kLastInputOffset), eax);
+  __ RecordWriteField(ebx, RegExpMatchInfo::kLastInputOffset, eax, edi,
                       kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
@@ -857,12 +847,12 @@
       ExternalReference::address_of_static_offsets_vector(isolate());
   __ mov(ecx, Immediate(address_of_static_offsets_vector));
 
-  // ebx: last_match_info backing store (FixedArray)
+  // ebx: last_match_info (FixedArray)
   // ecx: offsets vector
   // edx: number of capture registers
   Label next_capture, done;
   // Capture register counter starts from number of capture registers and
-  // counts down until wraping after zero.
+  // counts down until wrapping after zero.
   __ bind(&next_capture);
   __ sub(edx, Immediate(1));
   __ j(negative, &done, Label::kNear);
@@ -870,16 +860,14 @@
   __ mov(edi, Operand(ecx, edx, times_int_size, 0));
   __ SmiTag(edi);
   // Store the smi value in the last match info.
-  __ mov(FieldOperand(ebx,
-                      edx,
-                      times_pointer_size,
-                      RegExpImpl::kFirstCaptureOffset),
-                      edi);
+  __ mov(FieldOperand(ebx, edx, times_pointer_size,
+                      RegExpMatchInfo::kFirstCaptureOffset),
+         edi);
   __ jmp(&next_capture);
   __ bind(&done);
 
   // Return last match info.
-  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+  __ mov(eax, ebx);
   __ ret(4 * kPointerSize);
 
   // Do the runtime call to execute the regexp.
@@ -1062,7 +1050,7 @@
     // If either is a Smi (we know that not both are), then they can only
     // be equal if the other is a HeapNumber. If so, use the slow case.
     STATIC_ASSERT(kSmiTag == 0);
-    DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+    DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
     __ mov(ecx, Immediate(kSmiTagMask));
     __ and_(ecx, eax);
     __ test(ecx, edx);
@@ -1429,6 +1417,7 @@
 }
 
 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+  // eax - number of arguments
   // edi - function
   // edx - slot id
   // ebx - vector
@@ -1436,7 +1425,6 @@
   __ cmp(edi, ecx);
   __ j(not_equal, miss);
 
-  __ mov(eax, arg_count());
   // Reload ecx.
   __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
                            FixedArray::kHeaderSize));
@@ -1446,7 +1434,7 @@
 
   __ mov(ebx, ecx);
   __ mov(edx, edi);
-  ArrayConstructorStub stub(masm->isolate(), arg_count());
+  ArrayConstructorStub stub(masm->isolate());
   __ TailCallStub(&stub);
 
   // Unreachable.
@@ -1454,13 +1442,12 @@
 
 
 void CallICStub::Generate(MacroAssembler* masm) {
+  // edi - number of arguments
   // edi - function
   // edx - slot id
   // ebx - vector
   Isolate* isolate = masm->isolate();
   Label extra_checks_or_miss, call, call_function, call_count_incremented;
-  int argc = arg_count();
-  ParameterCount actual(argc);
 
   // The checks. First, does edi match the recorded monomorphic target?
   __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
@@ -1492,7 +1479,6 @@
   // Increment the call count for monomorphic function calls.
   IncrementCallCount(masm, ebx, edx);
 
-  __ Set(eax, argc);
   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
                                                     tail_call_mode()),
           RelocInfo::CODE_TARGET);
@@ -1538,7 +1524,6 @@
 
   __ bind(&call_count_incremented);
 
-  __ Set(eax, argc);
   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
@@ -1564,12 +1549,15 @@
   __ j(not_equal, &miss);
 
   // Store the function. Use a stub since we need a frame for allocation.
+  // eax - number of arguments
   // ebx - vector
   // edx - slot
   // edi - function
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     CreateWeakCellStub create_stub(isolate);
+    __ SmiTag(eax);
+    __ push(eax);
     __ push(ebx);
     __ push(edx);
     __ push(edi);
@@ -1579,6 +1567,8 @@
     __ pop(edi);
     __ pop(edx);
     __ pop(ebx);
+    __ pop(eax);
+    __ SmiUntag(eax);
   }
 
   __ jmp(&call_function);
@@ -1598,6 +1588,10 @@
 void CallICStub::GenerateMiss(MacroAssembler* masm) {
   FrameScope scope(masm, StackFrame::INTERNAL);
 
+  // Preserve the number of arguments.
+  __ SmiTag(eax);
+  __ push(eax);
+
   // Push the function and feedback info.
   __ push(edi);
   __ push(ebx);
@@ -1608,6 +1602,10 @@
 
   // Move result to edi and exit the internal frame.
   __ mov(edi, eax);
+
+  // Restore number of arguments.
+  __ pop(eax);
+  __ SmiUntag(eax);
 }
 
 
@@ -3018,254 +3016,12 @@
   __ jmp(ecx);  // Return to IC Miss stub, continuation still on stack.
 }
 
-
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate());
-  stub.GenerateForTrampoline(masm);
-}
-
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate());
-  stub.GenerateForTrampoline(masm);
-}
-
-
-static void HandleArrayCases(MacroAssembler* masm, Register receiver,
-                             Register key, Register vector, Register slot,
-                             Register feedback, bool is_polymorphic,
-                             Label* miss) {
-  // feedback initially contains the feedback array
-  Label next, next_loop, prepare_next;
-  Label load_smi_map, compare_map;
-  Label start_polymorphic;
-
-  __ push(receiver);
-  __ push(vector);
-
-  Register receiver_map = receiver;
-  Register cached_map = vector;
-
-  // Receiver might not be a heap object.
-  __ JumpIfSmi(receiver, &load_smi_map);
-  __ mov(receiver_map, FieldOperand(receiver, 0));
-  __ bind(&compare_map);
-  __ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
-
-  // A named keyed load might have a 2 element array, all other cases can count
-  // on an array with at least 2 {map, handler} pairs, so they can go right
-  // into polymorphic array handling.
-  __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
-  __ j(not_equal, is_polymorphic ? &start_polymorphic : &next);
-
-  // found, now call handler.
-  Register handler = feedback;
-  __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
-  __ pop(vector);
-  __ pop(receiver);
-  __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
-  __ jmp(handler);
-
-  if (!is_polymorphic) {
-    __ bind(&next);
-    __ cmp(FieldOperand(feedback, FixedArray::kLengthOffset),
-           Immediate(Smi::FromInt(2)));
-    __ j(not_equal, &start_polymorphic);
-    __ pop(vector);
-    __ pop(receiver);
-    __ jmp(miss);
-  }
-
-  // Polymorphic, we have to loop from 2 to N
-  __ bind(&start_polymorphic);
-  __ push(key);
-  Register counter = key;
-  __ mov(counter, Immediate(Smi::FromInt(2)));
-  __ bind(&next_loop);
-  __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
-                                  FixedArray::kHeaderSize));
-  __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
-  __ j(not_equal, &prepare_next);
-  __ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size,
-                               FixedArray::kHeaderSize + kPointerSize));
-  __ pop(key);
-  __ pop(vector);
-  __ pop(receiver);
-  __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
-  __ jmp(handler);
-
-  __ bind(&prepare_next);
-  __ add(counter, Immediate(Smi::FromInt(2)));
-  __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
-  __ j(less, &next_loop);
-
-  // We exhausted our array of map handler pairs.
-  __ pop(key);
-  __ pop(vector);
-  __ pop(receiver);
-  __ jmp(miss);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
-                                  Register key, Register vector, Register slot,
-                                  Register weak_cell, Label* miss) {
-  // feedback initially contains the feedback array
-  Label compare_smi_map;
-
-  // Move the weak map into the weak_cell register.
-  Register ic_map = weak_cell;
-  __ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
-
-  // Receiver might not be a heap object.
-  __ JumpIfSmi(receiver, &compare_smi_map);
-  __ cmp(ic_map, FieldOperand(receiver, 0));
-  __ j(not_equal, miss);
-  Register handler = weak_cell;
-  __ mov(handler, FieldOperand(vector, slot, times_half_pointer_size,
-                               FixedArray::kHeaderSize + kPointerSize));
-  __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
-  __ jmp(handler);
-
-  // In microbenchmarks, it made sense to unroll this code so that the call to
-  // the handler is duplicated for a HeapObject receiver and a Smi receiver.
-  __ bind(&compare_smi_map);
-  __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, miss);
-  __ mov(handler, FieldOperand(vector, slot, times_half_pointer_size,
-                               FixedArray::kHeaderSize + kPointerSize));
-  __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
-  __ jmp(handler);
-}
-
-
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // edx
-  Register name = LoadWithVectorDescriptor::NameRegister();          // ecx
-  Register vector = LoadWithVectorDescriptor::VectorRegister();      // ebx
-  Register slot = LoadWithVectorDescriptor::SlotRegister();          // eax
-  Register scratch = edi;
-  __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
-                               FixedArray::kHeaderSize));
-
-  // Is it a weak cell?
-  Label try_array;
-  Label not_array, smi_key, key_okay, miss;
-  __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
-  __ j(not_equal, &try_array);
-  HandleMonomorphicCase(masm, receiver, name, vector, slot, scratch, &miss);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &not_array);
-  HandleArrayCases(masm, receiver, name, vector, slot, scratch, true, &miss);
-
-  __ bind(&not_array);
-  __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
-  __ j(not_equal, &miss);
-  __ push(slot);
-  __ push(vector);
-  masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, name,
-                                                    vector, scratch);
-  __ pop(vector);
-  __ pop(slot);
-
-  __ bind(&miss);
-  LoadIC::GenerateMiss(masm);
-}
-
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
-  GenerateImpl(masm, false);
-}
-
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // edx
-  Register key = LoadWithVectorDescriptor::NameRegister();           // ecx
-  Register vector = LoadWithVectorDescriptor::VectorRegister();      // ebx
-  Register slot = LoadWithVectorDescriptor::SlotRegister();          // eax
-  Register feedback = edi;
-  __ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
-                                FixedArray::kHeaderSize));
-  // Is it a weak cell?
-  Label try_array;
-  Label not_array, smi_key, key_okay, miss;
-  __ CompareRoot(FieldOperand(feedback, 0), Heap::kWeakCellMapRootIndex);
-  __ j(not_equal, &try_array);
-  HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, &miss);
-
-  __ bind(&try_array);
-  // Is it a fixed array?
-  __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &not_array);
-
-  // We have a polymorphic element handler.
-  Label polymorphic, try_poly_name;
-  __ bind(&polymorphic);
-  HandleArrayCases(masm, receiver, key, vector, slot, feedback, true, &miss);
-
-  __ bind(&not_array);
-  // Is it generic?
-  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
-  __ j(not_equal, &try_poly_name);
-  Handle<Code> megamorphic_stub =
-      KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
-  __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
-
-  __ bind(&try_poly_name);
-  // We might have a name in feedback, and a fixed array in the next slot.
-  __ cmp(key, feedback);
-  __ j(not_equal, &miss);
-  // If the name comparison succeeded, we know we have a fixed array with
-  // at least one map/handler pair.
-  __ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
-                                FixedArray::kHeaderSize + kPointerSize));
-  HandleArrayCases(masm, receiver, key, vector, slot, feedback, false, &miss);
-
-  __ bind(&miss);
-  KeyedLoadIC::GenerateMiss(masm);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
-  StoreICStub stub(isolate(), state());
-  stub.GenerateForTrampoline(masm);
-}
-
 void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
   KeyedStoreICStub stub(isolate(), state());
   stub.GenerateForTrampoline(masm);
 }
 
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
 // value is on the stack already.
 static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
                                        Register key, Register vector,
@@ -3382,63 +3138,6 @@
   __ jmp(weak_cell);
 }
 
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // edx
-  Register key = StoreWithVectorDescriptor::NameRegister();           // ecx
-  Register value = StoreWithVectorDescriptor::ValueRegister();        // eax
-  Register vector = StoreWithVectorDescriptor::VectorRegister();      // ebx
-  Register slot = StoreWithVectorDescriptor::SlotRegister();          // edi
-  Label miss;
-
-  if (StoreWithVectorDescriptor::kPassLastArgsOnStack) {
-    // Current stack layout:
-    // - esp[8]    -- value
-    // - esp[4]    -- slot
-    // - esp[0]    -- return address
-    STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2);
-    STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
-    if (in_frame) {
-      __ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor");
-      // If the vector is not on the stack, then insert the vector beneath
-      // return address in order to prepare for calling handler with
-      // StoreWithVector calling convention.
-      __ push(Operand(esp, 0));
-      __ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister());
-      __ RecordComment("]");
-    } else {
-      __ mov(vector, Operand(esp, 1 * kPointerSize));
-    }
-    __ mov(slot, Operand(esp, 2 * kPointerSize));
-  }
-
-  Register scratch = value;
-  __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
-                               FixedArray::kHeaderSize));
-
-  // Is it a weak cell?
-  Label try_array;
-  Label not_array, smi_key, key_okay;
-  __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
-  __ j(not_equal, &try_array);
-  HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &not_array);
-  HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, true,
-                             &miss);
-
-  __ bind(&not_array);
-  __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
-  __ j(not_equal, &miss);
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, slot,
-                                                     no_reg);
-  __ bind(&miss);
-  StoreIC::GenerateMiss(masm);
-}
-
 void KeyedStoreICStub::Generate(MacroAssembler* masm) {
   GenerateImpl(masm, false);
 }
@@ -3487,7 +3186,7 @@
   // - esp[12]   -- value
   // - receiver, key, handler in registers.
   Register counter = key;
-  __ mov(counter, Immediate(Smi::FromInt(0)));
+  __ mov(counter, Immediate(Smi::kZero));
   __ bind(&next_loop);
   __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
                                   FixedArray::kHeaderSize));
@@ -3830,37 +3529,23 @@
   }
 }
 
-
 void ArrayConstructorStub::GenerateDispatchToArrayStub(
-    MacroAssembler* masm,
-    AllocationSiteOverrideMode mode) {
-  if (argument_count() == ANY) {
-    Label not_zero_case, not_one_case;
-    __ test(eax, eax);
-    __ j(not_zero, &not_zero_case);
-    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+    MacroAssembler* masm, AllocationSiteOverrideMode mode) {
+  Label not_zero_case, not_one_case;
+  __ test(eax, eax);
+  __ j(not_zero, &not_zero_case);
+  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
 
-    __ bind(&not_zero_case);
-    __ cmp(eax, 1);
-    __ j(greater, &not_one_case);
-    CreateArrayDispatchOneArgument(masm, mode);
+  __ bind(&not_zero_case);
+  __ cmp(eax, 1);
+  __ j(greater, &not_one_case);
+  CreateArrayDispatchOneArgument(masm, mode);
 
-    __ bind(&not_one_case);
-    ArrayNArgumentsConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else if (argument_count() == NONE) {
-    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-  } else if (argument_count() == ONE) {
-    CreateArrayDispatchOneArgument(masm, mode);
-  } else if (argument_count() == MORE_THAN_ONE) {
-    ArrayNArgumentsConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else {
-    UNREACHABLE();
-  }
+  __ bind(&not_one_case);
+  ArrayNArgumentsConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
 }
 
-
 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax : argc (only if argument_count() is ANY or MORE_THAN_ONE)
@@ -3912,21 +3597,8 @@
 
   // Subclassing.
   __ bind(&subclassing);
-  switch (argument_count()) {
-    case ANY:
-    case MORE_THAN_ONE:
-      __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
-      __ add(eax, Immediate(3));
-      break;
-    case NONE:
-      __ mov(Operand(esp, 1 * kPointerSize), edi);
-      __ mov(eax, Immediate(3));
-      break;
-    case ONE:
-      __ mov(Operand(esp, 2 * kPointerSize), edi);
-      __ mov(eax, Immediate(4));
-      break;
-  }
+  __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+  __ add(eax, Immediate(3));
   __ PopReturnAddressTo(ecx);
   __ Push(edx);
   __ Push(ebx);
@@ -4210,8 +3882,7 @@
     __ mov(ecx, isolate()->factory()->empty_fixed_array());
     __ mov(FieldOperand(eax, JSArray::kPropertiesOffset), ecx);
     __ mov(FieldOperand(eax, JSArray::kElementsOffset), ecx);
-    __ mov(FieldOperand(eax, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(0)));
+    __ mov(FieldOperand(eax, JSArray::kLengthOffset), Immediate(Smi::kZero));
     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
     __ Ret();
 
@@ -4252,7 +3923,7 @@
     __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
     {
       Label loop, done_loop;
-      __ Move(ecx, Smi::FromInt(0));
+      __ Move(ecx, Smi::kZero);
       __ bind(&loop);
       __ cmp(ecx, eax);
       __ j(equal, &done_loop, Label::kNear);
@@ -4641,7 +4312,7 @@
   __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
   {
     Label loop, done_loop;
-    __ Move(ecx, Smi::FromInt(0));
+    __ Move(ecx, Smi::kZero);
     __ bind(&loop);
     __ cmp(ecx, eax);
     __ j(equal, &done_loop, Label::kNear);
@@ -4704,129 +4375,6 @@
 }
 
 
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register context_reg = esi;
-  Register slot_reg = ebx;
-  Register value_reg = eax;
-  Register cell_reg = edi;
-  Register cell_details_reg = edx;
-  Register cell_value_reg = ecx;
-  Label fast_heapobject_case, fast_smi_case, slow_case;
-
-  if (FLAG_debug_code) {
-    __ CompareRoot(value_reg, Heap::kTheHoleValueRootIndex);
-    __ Check(not_equal, kUnexpectedValue);
-  }
-
-  // Go up context chain to the script context.
-  for (int i = 0; i < depth(); ++i) {
-    __ mov(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
-    context_reg = cell_reg;
-  }
-
-  // Load the PropertyCell at the specified slot.
-  __ mov(cell_reg, ContextOperand(context_reg, slot_reg));
-
-  // Load PropertyDetails for the cell (actually only the cell_type and kind).
-  __ mov(cell_details_reg,
-         FieldOperand(cell_reg, PropertyCell::kDetailsOffset));
-  __ SmiUntag(cell_details_reg);
-  __ and_(cell_details_reg,
-          Immediate(PropertyDetails::PropertyCellTypeField::kMask |
-                    PropertyDetails::KindField::kMask |
-                    PropertyDetails::kAttributesReadOnlyMask));
-
-  // Check if PropertyCell holds mutable data.
-  Label not_mutable_data;
-  __ cmp(cell_details_reg,
-         Immediate(PropertyDetails::PropertyCellTypeField::encode(
-                       PropertyCellType::kMutable) |
-                   PropertyDetails::KindField::encode(kData)));
-  __ j(not_equal, &not_mutable_data);
-  __ JumpIfSmi(value_reg, &fast_smi_case);
-  __ bind(&fast_heapobject_case);
-  __ mov(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
-  __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
-                      cell_details_reg, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  // RecordWriteField clobbers the value register, so we need to reload.
-  __ mov(value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
-  __ Ret();
-  __ bind(&not_mutable_data);
-
-  // Check if PropertyCell value matches the new value (relevant for Constant,
-  // ConstantType and Undefined cells).
-  Label not_same_value;
-  __ mov(cell_value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
-  __ cmp(cell_value_reg, value_reg);
-  __ j(not_equal, &not_same_value,
-       FLAG_debug_code ? Label::kFar : Label::kNear);
-  // Make sure the PropertyCell is not marked READ_ONLY.
-  __ test(cell_details_reg,
-          Immediate(PropertyDetails::kAttributesReadOnlyMask));
-  __ j(not_zero, &slow_case);
-  if (FLAG_debug_code) {
-    Label done;
-    // This can only be true for Constant, ConstantType and Undefined cells,
-    // because we never store the_hole via this stub.
-    __ cmp(cell_details_reg,
-           Immediate(PropertyDetails::PropertyCellTypeField::encode(
-                         PropertyCellType::kConstant) |
-                     PropertyDetails::KindField::encode(kData)));
-    __ j(equal, &done);
-    __ cmp(cell_details_reg,
-           Immediate(PropertyDetails::PropertyCellTypeField::encode(
-                         PropertyCellType::kConstantType) |
-                     PropertyDetails::KindField::encode(kData)));
-    __ j(equal, &done);
-    __ cmp(cell_details_reg,
-           Immediate(PropertyDetails::PropertyCellTypeField::encode(
-                         PropertyCellType::kUndefined) |
-                     PropertyDetails::KindField::encode(kData)));
-    __ Check(equal, kUnexpectedValue);
-    __ bind(&done);
-  }
-  __ Ret();
-  __ bind(&not_same_value);
-
-  // Check if PropertyCell contains data with constant type (and is not
-  // READ_ONLY).
-  __ cmp(cell_details_reg,
-         Immediate(PropertyDetails::PropertyCellTypeField::encode(
-                       PropertyCellType::kConstantType) |
-                   PropertyDetails::KindField::encode(kData)));
-  __ j(not_equal, &slow_case, Label::kNear);
-
-  // Now either both old and new values must be SMIs or both must be heap
-  // objects with same map.
-  Label value_is_heap_object;
-  __ JumpIfNotSmi(value_reg, &value_is_heap_object, Label::kNear);
-  __ JumpIfNotSmi(cell_value_reg, &slow_case, Label::kNear);
-  // Old and new values are SMIs, no need for a write barrier here.
-  __ bind(&fast_smi_case);
-  __ mov(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
-  __ Ret();
-  __ bind(&value_is_heap_object);
-  __ JumpIfSmi(cell_value_reg, &slow_case, Label::kNear);
-  Register cell_value_map_reg = cell_value_reg;
-  __ mov(cell_value_map_reg,
-         FieldOperand(cell_value_reg, HeapObject::kMapOffset));
-  __ cmp(cell_value_map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
-  __ j(equal, &fast_heapobject_case);
-
-  // Fallback to the runtime.
-  __ bind(&slow_case);
-  __ SmiTag(slot_reg);
-  __ Pop(cell_reg);  // Pop return address.
-  __ Push(slot_reg);
-  __ Push(value_reg);
-  __ Push(cell_reg);  // Push return address.
-  __ TailCallRuntime(is_strict(language_mode())
-                         ? Runtime::kStoreGlobalViaContext_Strict
-                         : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
-
 // Generates an Operand for saving parameters after PrepareCallApiFunction.
 static Operand ApiParameterOperand(int index) {
   return Operand(esp, index * kPointerSize);
@@ -5160,7 +4708,7 @@
   __ PushRoot(Heap::kUndefinedValueRootIndex);
   __ push(Immediate(ExternalReference::isolate_address(isolate())));
   __ push(holder);
-  __ push(Immediate(Smi::FromInt(0)));  // should_throw_on_error -> false
+  __ push(Immediate(Smi::kZero));  // should_throw_on_error -> false
   __ push(FieldOperand(callback, AccessorInfo::kNameOffset));
   __ push(scratch);  // Restore return address.
 
diff --git a/src/ia32/interface-descriptors-ia32.cc b/src/ia32/interface-descriptors-ia32.cc
index 220484c..8ce7872 100644
--- a/src/ia32/interface-descriptors-ia32.cc
+++ b/src/ia32/interface-descriptors-ia32.cc
@@ -31,6 +31,7 @@
 
 const Register LoadWithVectorDescriptor::VectorRegister() { return ebx; }
 
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return edi; }
 
 const Register StoreDescriptor::ReceiverRegister() { return edx; }
 const Register StoreDescriptor::NameRegister() { return ecx; }
@@ -40,15 +41,9 @@
 const Register StoreWithVectorDescriptor::VectorRegister() { return ebx; }
 
 const Register StoreTransitionDescriptor::SlotRegister() { return no_reg; }
-
 const Register StoreTransitionDescriptor::VectorRegister() { return ebx; }
-
 const Register StoreTransitionDescriptor::MapRegister() { return edi; }
 
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
-
-
 const Register StringCompareDescriptor::LeftRegister() { return edx; }
 const Register StringCompareDescriptor::RightRegister() { return eax; }
 
@@ -161,7 +156,7 @@
 
 void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {edi, edx, ebx};
+  Register registers[] = {edi, eax, edx, ebx};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -210,13 +205,6 @@
 }
 
 
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {ecx, ebx, eax};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
 void TransitionElementsKindDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {eax, ebx};
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 2bd8760..2fa9d0e 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -810,20 +810,6 @@
   cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
 }
 
-
-void MacroAssembler::CheckFastElements(Register map,
-                                       Label* fail,
-                                       Label::Distance distance) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  STATIC_ASSERT(FAST_ELEMENTS == 2);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
-  cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Immediate(Map::kMaximumBitField2FastHoleyElementValue));
-  j(above, fail, distance);
-}
-
-
 void MacroAssembler::CheckFastObjectElements(Register map,
                                              Label* fail,
                                              Label::Distance distance) {
@@ -1296,79 +1282,6 @@
 }
 
 
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
-                                            Register scratch1,
-                                            Register scratch2,
-                                            Label* miss) {
-  Label same_contexts;
-
-  DCHECK(!holder_reg.is(scratch1));
-  DCHECK(!holder_reg.is(scratch2));
-  DCHECK(!scratch1.is(scratch2));
-
-  // Load current lexical context from the active StandardFrame, which
-  // may require crawling past STUB frames.
-  Label load_context;
-  Label has_context;
-  mov(scratch2, ebp);
-  bind(&load_context);
-  mov(scratch1,
-      MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
-  JumpIfNotSmi(scratch1, &has_context);
-  mov(scratch2, MemOperand(scratch2, CommonFrameConstants::kCallerFPOffset));
-  jmp(&load_context);
-  bind(&has_context);
-
-  // When generating debug code, make sure the lexical context is set.
-  if (emit_debug_code()) {
-    cmp(scratch1, Immediate(0));
-    Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
-  }
-  // Load the native context of the current context.
-  mov(scratch1, ContextOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
-
-  // Check the context is a native context.
-  if (emit_debug_code()) {
-    // Read the first word and compare to native_context_map.
-    cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
-        isolate()->factory()->native_context_map());
-    Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
-  }
-
-  // Check if both contexts are the same.
-  cmp(scratch1, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-  j(equal, &same_contexts);
-
-  // Compare security tokens, save holder_reg on the stack so we can use it
-  // as a temporary register.
-  //
-  // Check that the security token in the calling global object is
-  // compatible with the security token in the receiving global
-  // object.
-  mov(scratch2,
-      FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-
-  // Check the context is a native context.
-  if (emit_debug_code()) {
-    cmp(scratch2, isolate()->factory()->null_value());
-    Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
-
-    // Read the first word and compare to native_context_map(),
-    cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
-        isolate()->factory()->native_context_map());
-    Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
-  }
-
-  int token_offset = Context::kHeaderSize +
-                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
-  mov(scratch1, FieldOperand(scratch1, token_offset));
-  cmp(scratch1, FieldOperand(scratch2, token_offset));
-  j(not_equal, miss);
-
-  bind(&same_contexts);
-}
-
-
 // Compute the hash code from the untagged key.  This must be kept in sync with
 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
 // code-stub-hydrogen.cc
@@ -1413,82 +1326,6 @@
   and_(r0, 0x3fffffff);
 }
 
-
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
-                                              Register elements,
-                                              Register key,
-                                              Register r0,
-                                              Register r1,
-                                              Register r2,
-                                              Register result) {
-  // Register use:
-  //
-  // elements - holds the slow-case elements of the receiver and is unchanged.
-  //
-  // key      - holds the smi key on entry and is unchanged.
-  //
-  // Scratch registers:
-  //
-  // r0 - holds the untagged key on entry and holds the hash once computed.
-  //
-  // r1 - used to hold the capacity mask of the dictionary
-  //
-  // r2 - used for the index into the dictionary.
-  //
-  // result - holds the result on exit if the load succeeds and we fall through.
-
-  Label done;
-
-  GetNumberHash(r0, r1);
-
-  // Compute capacity mask.
-  mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
-  shr(r1, kSmiTagSize);  // convert smi to int
-  dec(r1);
-
-  // Generate an unrolled loop that performs a few probes before giving up.
-  for (int i = 0; i < kNumberDictionaryProbes; i++) {
-    // Use r2 for index calculations and keep the hash intact in r0.
-    mov(r2, r0);
-    // Compute the masked index: (hash + i + i * i) & mask.
-    if (i > 0) {
-      add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
-    }
-    and_(r2, r1);
-
-    // Scale the index by multiplying by the entry size.
-    DCHECK(SeededNumberDictionary::kEntrySize == 3);
-    lea(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
-
-    // Check if the key matches.
-    cmp(key, FieldOperand(elements,
-                          r2,
-                          times_pointer_size,
-                          SeededNumberDictionary::kElementsStartOffset));
-    if (i != (kNumberDictionaryProbes - 1)) {
-      j(equal, &done);
-    } else {
-      j(not_equal, miss);
-    }
-  }
-
-  bind(&done);
-  // Check that the value is a field property.
-  const int kDetailsOffset =
-      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  DCHECK_EQ(DATA, 0);
-  test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
-       Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
-  j(not_zero, miss);
-
-  // Get the value at the masked, scaled index.
-  const int kValueOffset =
-      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
-  mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
 void MacroAssembler::LoadAllocationTopHelper(Register result,
                                              Register scratch,
                                              AllocationFlags flags) {
@@ -1971,74 +1808,6 @@
   STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
 }
 
-
-// Copy memory, byte-by-byte, from source to destination.  Not optimized for
-// long or aligned copies.  The contents of scratch and length are destroyed.
-// Source and destination are incremented by length.
-// Many variants of movsb, loop unrolling, word moves, and indexed operands
-// have been tried here already, and this is fastest.
-// A simpler loop is faster on small copies, but 30% slower on large ones.
-// The cld() instruction must have been emitted, to set the direction flag(),
-// before calling this function.
-void MacroAssembler::CopyBytes(Register source,
-                               Register destination,
-                               Register length,
-                               Register scratch) {
-  Label short_loop, len4, len8, len12, done, short_string;
-  DCHECK(source.is(esi));
-  DCHECK(destination.is(edi));
-  DCHECK(length.is(ecx));
-  cmp(length, Immediate(4));
-  j(below, &short_string, Label::kNear);
-
-  // Because source is 4-byte aligned in our uses of this function,
-  // we keep source aligned for the rep_movs call by copying the odd bytes
-  // at the end of the ranges.
-  mov(scratch, Operand(source, length, times_1, -4));
-  mov(Operand(destination, length, times_1, -4), scratch);
-
-  cmp(length, Immediate(8));
-  j(below_equal, &len4, Label::kNear);
-  cmp(length, Immediate(12));
-  j(below_equal, &len8, Label::kNear);
-  cmp(length, Immediate(16));
-  j(below_equal, &len12, Label::kNear);
-
-  mov(scratch, ecx);
-  shr(ecx, 2);
-  rep_movs();
-  and_(scratch, Immediate(0x3));
-  add(destination, scratch);
-  jmp(&done, Label::kNear);
-
-  bind(&len12);
-  mov(scratch, Operand(source, 8));
-  mov(Operand(destination, 8), scratch);
-  bind(&len8);
-  mov(scratch, Operand(source, 4));
-  mov(Operand(destination, 4), scratch);
-  bind(&len4);
-  mov(scratch, Operand(source, 0));
-  mov(Operand(destination, 0), scratch);
-  add(destination, length);
-  jmp(&done, Label::kNear);
-
-  bind(&short_string);
-  test(length, length);
-  j(zero, &done, Label::kNear);
-
-  bind(&short_loop);
-  mov_b(scratch, Operand(source, 0));
-  mov_b(Operand(destination, 0), scratch);
-  inc(source);
-  inc(destination);
-  dec(length);
-  j(not_zero, &short_loop);
-
-  bind(&done);
-}
-
-
 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
                                                 Register end_address,
                                                 Register filler) {
@@ -2153,20 +1922,6 @@
   return has_frame_ || !stub->SometimesSetsUpAFrame();
 }
 
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
-  // The assert checks that the constants for the maximum number of digits
-  // for an array index cached in the hash field and the number of bits
-  // reserved for it does not conflict.
-  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
-         (1 << String::kArrayIndexValueBits));
-  if (!index.is(hash)) {
-    mov(index, hash);
-  }
-  DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
-}
-
-
 void MacroAssembler::CallRuntime(const Runtime::Function* f,
                                  int num_arguments,
                                  SaveFPRegsMode save_doubles) {
@@ -3090,7 +2845,7 @@
   cmp(index, FieldOperand(string, String::kLengthOffset));
   Check(less, kIndexIsTooLarge);
 
-  cmp(index, Immediate(Smi::FromInt(0)));
+  cmp(index, Immediate(Smi::kZero));
   Check(greater_equal, kIndexIsNegative);
 
   // Restore the index
@@ -3343,7 +3098,7 @@
 
   // For all objects but the receiver, check that the cache is empty.
   EnumLength(edx, ebx);
-  cmp(edx, Immediate(Smi::FromInt(0)));
+  cmp(edx, Immediate(Smi::kZero));
   j(not_equal, call_runtime);
 
   bind(&start);
@@ -3375,20 +3130,21 @@
   ExternalReference new_space_allocation_top =
       ExternalReference::new_space_allocation_top_address(isolate());
   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
-  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+  const int kMementoLastWordOffset =
+      kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
 
   // Bail out if the object is not in new space.
   JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
   // If the object is in new space, we need to check whether it is on the same
   // page as the current top.
-  lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+  lea(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
   xor_(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
   test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
   j(zero, &top_check);
   // The object is on a different page than allocation top. Bail out if the
   // object sits on the page boundary as no memento can follow and we cannot
   // touch the memory following it.
-  lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+  lea(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
   xor_(scratch_reg, receiver_reg);
   test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
   j(not_zero, no_memento_found);
@@ -3397,9 +3153,9 @@
   // If top is on the same page as the current object, we need to check whether
   // we are below top.
   bind(&top_check);
-  lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+  lea(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
   cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
-  j(greater, no_memento_found);
+  j(greater_equal, no_memento_found);
   // Memento map check.
   bind(&map_check);
   mov(scratch_reg, Operand(receiver_reg, kMementoMapOffset));
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 2220ca7..e8ff59d 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -391,11 +391,6 @@
   // Compare instance type for map.
   void CmpInstanceType(Register map, InstanceType type);
 
-  // Check if a map for a JSObject indicates that the object has fast elements.
-  // Jump to the specified label if it does not.
-  void CheckFastElements(Register map, Label* fail,
-                         Label::Distance distance = Label::kFar);
-
   // Check if a map for a JSObject indicates that the object can have both smi
   // and HeapObject elements.  Jump to the specified label if it does not.
   void CheckFastObjectElements(Register map, Label* fail,
@@ -604,18 +599,8 @@
   // ---------------------------------------------------------------------------
   // Inline caching support
 
-  // Generate code for checking access rights - used for security checks
-  // on access to global objects across environments. The holder register
-  // is left untouched, but the scratch register is clobbered.
-  void CheckAccessGlobalProxy(Register holder_reg, Register scratch1,
-                              Register scratch2, Label* miss);
-
   void GetNumberHash(Register r0, Register scratch);
 
-  void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
-                                Register r0, Register r1, Register r2,
-                                Register result);
-
   // ---------------------------------------------------------------------------
   // Allocation support
 
@@ -685,12 +670,6 @@
   void AllocateJSValue(Register result, Register constructor, Register value,
                        Register scratch, Label* gc_required);
 
-  // Copy memory, byte-by-byte, from source to destination.  Not optimized for
-  // long or aligned copies.
-  // The contents of index and scratch are destroyed.
-  void CopyBytes(Register source, Register destination, Register length,
-                 Register scratch);
-
   // Initialize fields with filler values.  Fields starting at |current_address|
   // not including |end_address| are overwritten with the value in |filler|.  At
   // the end the loop, |current_address| takes the value of |end_address|.
@@ -723,12 +702,6 @@
   void TryGetFunctionPrototype(Register function, Register result,
                                Register scratch, Label* miss);
 
-  // Picks out an array index from the hash field.
-  // Register use:
-  //   hash - holds the index's hash. Clobbered.
-  //   index - holds the overwritten index on exit.
-  void IndexFromHash(Register hash, Register index);
-
   // ---------------------------------------------------------------------------
   // Runtime calls
 
@@ -821,7 +794,10 @@
   void Drop(int element_count);
 
   void Call(Label* target) { call(target); }
-  void Call(Handle<Code> target, RelocInfo::Mode rmode) { call(target, rmode); }
+  void Call(Handle<Code> target, RelocInfo::Mode rmode,
+            TypeFeedbackId id = TypeFeedbackId::None()) {
+    call(target, rmode, id);
+  }
   void Jump(Handle<Code> target, RelocInfo::Mode rmode) { jmp(target, rmode); }
   void Push(Register src) { push(src); }
   void Push(const Operand& src) { push(src); }
diff --git a/src/ic/access-compiler-data.h b/src/ic/access-compiler-data.h
new file mode 100644
index 0000000..dffcac7
--- /dev/null
+++ b/src/ic/access-compiler-data.h
@@ -0,0 +1,48 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_ACCESS_COMPILER_DATA_H_
+#define V8_IC_ACCESS_COMPILER_DATA_H_
+
+#include <memory>
+
+#include "src/allocation.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+class AccessCompilerData {
+ public:
+  AccessCompilerData() {}
+
+  bool IsInitialized() const { return load_calling_convention_ != nullptr; }
+  void Initialize(int load_register_count, const Register* load_registers,
+                  int store_register_count, const Register* store_registers) {
+    load_calling_convention_.reset(NewArray<Register>(load_register_count));
+    for (int i = 0; i < load_register_count; ++i) {
+      load_calling_convention_[i] = load_registers[i];
+    }
+    store_calling_convention_.reset(NewArray<Register>(store_register_count));
+    for (int i = 0; i < store_register_count; ++i) {
+      store_calling_convention_[i] = store_registers[i];
+    }
+  }
+
+  Register* load_calling_convention() { return load_calling_convention_.get(); }
+  Register* store_calling_convention() {
+    return store_calling_convention_.get();
+  }
+
+ private:
+  std::unique_ptr<Register[]> load_calling_convention_;
+  std::unique_ptr<Register[]> store_calling_convention_;
+
+  DISALLOW_COPY_AND_ASSIGN(AccessCompilerData);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_IC_ACCESS_COMPILER_DATA_H_
diff --git a/src/ic/access-compiler.cc b/src/ic/access-compiler.cc
index bb6b5e5..d92f9c0 100644
--- a/src/ic/access-compiler.cc
+++ b/src/ic/access-compiler.cc
@@ -4,7 +4,6 @@
 
 #include "src/ic/access-compiler.h"
 
-
 namespace v8 {
 namespace internal {
 
@@ -42,13 +41,17 @@
   GenerateTailCall(masm, code);
 }
 
-
-Register* PropertyAccessCompiler::GetCallingConvention(Code::Kind kind) {
+Register* PropertyAccessCompiler::GetCallingConvention(Isolate* isolate,
+                                                       Code::Kind kind) {
+  AccessCompilerData* data = isolate->access_compiler_data();
+  if (!data->IsInitialized()) {
+    InitializePlatformSpecific(data);
+  }
   if (kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC) {
-    return load_calling_convention();
+    return data->load_calling_convention();
   }
   DCHECK(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
-  return store_calling_convention();
+  return data->store_calling_convention();
 }
 
 
diff --git a/src/ic/access-compiler.h b/src/ic/access-compiler.h
index ecc5c08..3d488e8 100644
--- a/src/ic/access-compiler.h
+++ b/src/ic/access-compiler.h
@@ -6,13 +6,13 @@
 #define V8_IC_ACCESS_COMPILER_H_
 
 #include "src/code-stubs.h"
+#include "src/ic/access-compiler-data.h"
 #include "src/macro-assembler.h"
 #include "src/objects.h"
 
 namespace v8 {
 namespace internal {
 
-
 class PropertyAccessCompiler BASE_EMBEDDED {
  public:
   static Builtins::Name MissBuiltin(Code::Kind kind) {
@@ -36,7 +36,7 @@
  protected:
   PropertyAccessCompiler(Isolate* isolate, Code::Kind kind,
                          CacheHolderFlag cache_holder)
-      : registers_(GetCallingConvention(kind)),
+      : registers_(GetCallingConvention(isolate, kind)),
         kind_(kind),
         cache_holder_(cache_holder),
         isolate_(isolate),
@@ -59,11 +59,6 @@
   Register scratch1() const { return registers_[2]; }
   Register scratch2() const { return registers_[3]; }
 
-  static Register* GetCallingConvention(Code::Kind);
-  static Register* load_calling_convention();
-  static Register* store_calling_convention();
-  static Register* keyed_store_calling_convention();
-
   Register* registers_;
 
   static void GenerateTailCall(MacroAssembler* masm, Handle<Code> code);
@@ -72,6 +67,9 @@
   Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<Name> name);
 
  private:
+  static Register* GetCallingConvention(Isolate* isolate, Code::Kind kind);
+  static void InitializePlatformSpecific(AccessCompilerData* data);
+
   Code::Kind kind_;
   CacheHolderFlag cache_holder_;
 
diff --git a/src/ic/arm/access-compiler-arm.cc b/src/ic/arm/access-compiler-arm.cc
index 9ce485e..e501cdc 100644
--- a/src/ic/arm/access-compiler-arm.cc
+++ b/src/ic/arm/access-compiler-arm.cc
@@ -17,24 +17,22 @@
   __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
-
-Register* PropertyAccessCompiler::load_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+    AccessCompilerData* data) {
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, r3, r0, r4};
-  return registers;
-}
 
+  // Load calling convention.
+  // receiver, name, scratch1, scratch2, scratch3.
+  Register load_registers[] = {receiver, name, r3, r0, r4};
 
-Register* PropertyAccessCompiler::store_calling_convention() {
+  // Store calling convention.
   // receiver, name, scratch1, scratch2.
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, r3, r4};
-  return registers;
-}
+  Register store_registers[] = {receiver, name, r3, r4};
 
+  data->Initialize(arraysize(load_registers), load_registers,
+                   arraysize(store_registers), store_registers);
+}
 
 #undef __
 }  // namespace internal
diff --git a/src/ic/arm/handler-compiler-arm.cc b/src/ic/arm/handler-compiler-arm.cc
index 691fe3d..6145d43 100644
--- a/src/ic/arm/handler-compiler-arm.cc
+++ b/src/ic/arm/handler-compiler-arm.cc
@@ -407,10 +407,34 @@
   }
 }
 
+void PropertyHandlerCompiler::GenerateAccessCheck(
+    Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+    Label* miss, bool compare_native_contexts_only) {
+  Label done;
+  // Load current native context.
+  __ ldr(scratch1, NativeContextMemOperand());
+  // Load expected native context.
+  __ LoadWeakValue(scratch2, native_context_cell, miss);
+  __ cmp(scratch1, scratch2);
+
+  if (!compare_native_contexts_only) {
+    __ b(eq, &done);
+
+    // Compare security tokens of current and expected native contexts.
+    __ ldr(scratch1,
+           ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+    __ ldr(scratch2,
+           ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+    __ cmp(scratch1, scratch2);
+  }
+  __ b(ne, miss);
+
+  __ bind(&done);
+}
 
 Register PropertyHandlerCompiler::CheckPrototypes(
     Register object_reg, Register holder_reg, Register scratch1,
-    Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+    Register scratch2, Handle<Name> name, Label* miss,
     ReturnHolder return_what) {
   Handle<Map> receiver_map = map();
 
@@ -429,17 +453,6 @@
     __ b(ne, miss);
   }
 
-  // The prototype chain of primitives (and their JSValue wrappers) depends
-  // on the native context, which can't be guarded by validity cells.
-  // |object_reg| holds the native context specific prototype in this case;
-  // we need to check its map.
-  if (check == CHECK_ALL_MAPS) {
-    __ ldr(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
-    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-    __ CmpWeakValue(scratch1, cell, scratch2);
-    __ b(ne, miss);
-  }
-
   // Keep track of the current object in register reg.
   Register reg = object_reg;
   int depth = 0;
@@ -449,46 +462,28 @@
     current = isolate()->global_object();
   }
 
-  // Check access rights to the global object.  This has to happen after
-  // the map check so that we know that the object is actually a global
-  // object.
-  // This allows us to install generated handlers for accesses to the
-  // global proxy (as opposed to using slow ICs). See corresponding code
-  // in LookupForRead().
-  if (receiver_map->IsJSGlobalProxyMap()) {
-    __ CheckAccessGlobalProxy(reg, scratch2, miss);
-  }
-
-  Handle<JSObject> prototype = Handle<JSObject>::null();
-  Handle<Map> current_map = receiver_map;
+  Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+                          isolate());
   Handle<Map> holder_map(holder()->map());
   // Traverse the prototype chain and check the maps in the prototype chain for
   // fast and global objects or do negative lookup for normal objects.
   while (!current_map.is_identical_to(holder_map)) {
     ++depth;
 
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    DCHECK(current_map->IsJSGlobalProxyMap() ||
-           !current_map->is_access_check_needed());
-
-    prototype = handle(JSObject::cast(current_map->prototype()));
     if (current_map->IsJSGlobalObjectMap()) {
       GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
                                 name, scratch2, miss);
     } else if (current_map->is_dictionary_map()) {
       DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
-      if (!name->IsUniqueName()) {
-        DCHECK(name->IsString());
-        name = factory()->InternalizeString(Handle<String>::cast(name));
-      }
+      DCHECK(name->IsUniqueName());
       DCHECK(current.is_null() ||
              current->property_dictionary()->FindEntry(name) ==
                  NameDictionary::kNotFound);
 
       if (depth > 1) {
-        // TODO(jkummerow): Cache and re-use weak cell.
-        __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+        Handle<WeakCell> weak_cell =
+            Map::GetOrCreatePrototypeWeakCell(current, isolate());
+        __ LoadWeakValue(reg, weak_cell, miss);
       }
       GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
                                        scratch2);
@@ -496,7 +491,7 @@
 
     reg = holder_reg;  // From now on the object will be in holder_reg.
     // Go to the next object in the prototype chain.
-    current = prototype;
+    current = handle(JSObject::cast(current_map->prototype()));
     current_map = handle(current->map());
   }
 
@@ -507,7 +502,9 @@
 
   bool return_holder = return_what == RETURN_HOLDER;
   if (return_holder && depth != 0) {
-    __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+    Handle<WeakCell> weak_cell =
+        Map::GetOrCreatePrototypeWeakCell(current, isolate());
+    __ LoadWeakValue(reg, weak_cell, miss);
   }
 
   // Return the register containing the holder.
diff --git a/src/ic/arm/ic-arm.cc b/src/ic/arm/ic-arm.cc
index 10ec578..babf497 100644
--- a/src/ic/arm/ic-arm.cc
+++ b/src/ic/arm/ic-arm.cc
@@ -19,18 +19,6 @@
 
 #define __ ACCESS_MASM(masm)
 
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
-                                            Label* global_object) {
-  // Register usage:
-  //   type: holds the receiver instance type on entry.
-  __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
-  __ b(eq, global_object);
-  __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
-  __ b(eq, global_object);
-}
-
-
 // Helper function used from LoadIC GenerateNormal.
 //
 // elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -126,138 +114,6 @@
                  kDontSaveFPRegs);
 }
 
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
-                                           Register receiver, Register map,
-                                           Register scratch,
-                                           int interceptor_bit, Label* slow) {
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, slow);
-  // Get the map of the receiver.
-  __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check bit field.
-  __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
-  __ tst(scratch,
-         Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
-  __ b(ne, slow);
-  // Check that the object is some kind of JS object EXCEPT JS Value type.
-  // In the case that the object is a value-wrapper object,
-  // we enter the runtime system to make sure that indexing into string
-  // objects work as intended.
-  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-  __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  __ cmp(scratch, Operand(JS_OBJECT_TYPE));
-  __ b(lt, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
-                                  Register key, Register elements,
-                                  Register scratch1, Register scratch2,
-                                  Register result, Label* slow) {
-  // Register use:
-  //
-  // receiver - holds the receiver on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // key      - holds the smi key on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // result   - holds the result on exit if the load succeeded.
-  //            Allowed to be the the same as 'receiver' or 'key'.
-  //            Unchanged on bailout so 'receiver' and 'key' can be safely
-  //            used by further computation.
-  //
-  // Scratch registers:
-  //
-  // elements - holds the elements of the receiver and its prototypes.
-  //
-  // scratch1 - used to hold elements length, bit fields, base addresses.
-  //
-  // scratch2 - used to hold maps, prototypes, and the loaded value.
-  Label check_prototypes, check_next_prototype;
-  Label done, in_bounds, absent;
-
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ AssertFastElements(elements);
-
-  // Check that the key (index) is within bounds.
-  __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ cmp(key, Operand(scratch1));
-  __ b(lo, &in_bounds);
-  // Out-of-bounds. Check the prototype chain to see if we can just return
-  // 'undefined'.
-  __ cmp(key, Operand(0));
-  __ b(lt, slow);  // Negative keys can't take the fast OOB path.
-  __ bind(&check_prototypes);
-  __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ bind(&check_next_prototype);
-  __ ldr(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
-  // scratch2: current prototype
-  __ CompareRoot(scratch2, Heap::kNullValueRootIndex);
-  __ b(eq, &absent);
-  __ ldr(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
-  __ ldr(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
-  // elements: elements of current prototype
-  // scratch2: map of current prototype
-  __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
-  __ b(lo, slow);
-  __ ldrb(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
-  __ tst(scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
-                           (1 << Map::kHasIndexedInterceptor)));
-  __ b(ne, slow);
-  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
-  __ b(ne, slow);
-  __ jmp(&check_next_prototype);
-
-  __ bind(&absent);
-  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
-  __ jmp(&done);
-
-  __ bind(&in_bounds);
-  // Fast case: Do the load.
-  __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
-  __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
-  // In case the loaded value is the_hole we have to check the prototype chain.
-  __ b(eq, &check_prototypes);
-  __ mov(result, scratch2);
-  __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
-                                 Register map, Register hash,
-                                 Label* index_string, Label* not_unique) {
-  // The key is not a smi.
-  Label unique;
-  // Is it a name?
-  __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
-  __ b(hi, not_unique);
-  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
-  __ b(eq, &unique);
-
-  // Is the string an array index, with cached numeric value?
-  __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
-  __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
-  __ b(eq, index_string);
-
-  // Is the string internalized? We know it's a string, so a single
-  // bit test is enough.
-  // map: key map
-  __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(kInternalizedTag == 0);
-  __ tst(hash, Operand(kIsNotInternalizedMask));
-  __ b(ne, not_unique);
-
-  __ bind(&unique);
-}
-
 void LoadIC::GenerateNormal(MacroAssembler* masm) {
   Register dictionary = r0;
   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@@ -340,106 +196,6 @@
   __ TailCallRuntime(Runtime::kKeyedGetProperty);
 }
 
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // The return address is in lr.
-  Label slow, check_name, index_smi, index_name, property_array_property;
-  Label probe_dictionary, check_number_dictionary;
-
-  Register key = LoadDescriptor::NameRegister();
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  DCHECK(key.is(r2));
-  DCHECK(receiver.is(r1));
-
-  Isolate* isolate = masm->isolate();
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &check_name);
-  __ bind(&index_smi);
-  // Now the key is known to be a smi. This place is also jumped to from below
-  // where a numeric string is converted to a smi.
-
-  GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
-                                 Map::kHasIndexedInterceptor, &slow);
-
-  // Check the receiver's map to see if it has fast elements.
-  __ CheckFastElements(r0, r3, &check_number_dictionary);
-
-  GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, &slow);
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r4,
-                      r3);
-  __ Ret();
-
-  __ bind(&check_number_dictionary);
-  __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
-
-  // Check whether the elements is a number dictionary.
-  // r3: elements map
-  // r4: elements
-  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
-  __ cmp(r3, ip);
-  __ b(ne, &slow);
-  __ SmiUntag(r0, key);
-  __ LoadFromNumberDictionary(&slow, r4, key, r0, r0, r3, r5);
-  __ Ret();
-
-  // Slow case, key and receiver still in r2 and r1.
-  __ bind(&slow);
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r4,
-                      r3);
-  GenerateRuntimeGetProperty(masm);
-
-  __ bind(&check_name);
-  GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow);
-
-  GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
-                                 Map::kHasNamedInterceptor, &slow);
-
-  // If the receiver is a fast-case object, check the stub cache. Otherwise
-  // probe the dictionary.
-  __ ldr(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
-  __ cmp(r4, ip);
-  __ b(eq, &probe_dictionary);
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-  Register slot = LoadWithVectorDescriptor::SlotRegister();
-  DCHECK(!AreAliased(vector, slot, r4, r5, r6, r9));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
-  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
-  __ mov(slot, Operand(Smi::FromInt(slot_index)));
-
-  masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, r4, r5,
-                                                    r6, r9);
-  // Cache miss.
-  GenerateMiss(masm);
-
-  // Do a quick inline probe of the receiver's dictionary, if it
-  // exists.
-  __ bind(&probe_dictionary);
-  // r3: elements
-  __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
-  GenerateGlobalInstanceTypeCheck(masm, r0, &slow);
-  // Load the property to r0.
-  GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4);
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
-                      r4, r3);
-  __ Ret();
-
-  __ bind(&index_name);
-  __ IndexFromHash(r3, key);
-  // Now jump to the place where smi keys are handled.
-  __ jmp(&index_smi);
-}
-
-
 static void StoreIC_PushArgs(MacroAssembler* masm) {
   __ Push(StoreWithVectorDescriptor::ValueRegister(),
           StoreWithVectorDescriptor::SlotRegister(),
diff --git a/src/ic/arm64/access-compiler-arm64.cc b/src/ic/arm64/access-compiler-arm64.cc
index 6273633..8cbb527 100644
--- a/src/ic/arm64/access-compiler-arm64.cc
+++ b/src/ic/arm64/access-compiler-arm64.cc
@@ -25,24 +25,23 @@
 // registers are actually scratch registers, and which are important. For now,
 // we use the same assignments as ARM to remain on the safe side.
 
-Register* PropertyAccessCompiler::load_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+    AccessCompilerData* data) {
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, x3, x0, x4};
-  return registers;
+
+  // Load calling convention.
+  // receiver, name, scratch1, scratch2, scratch3.
+  Register load_registers[] = {receiver, name, x3, x0, x4};
+
+  // Store calling convention.
+  // receiver, name, scratch1, scratch2.
+  Register store_registers[] = {receiver, name, x3, x4};
+
+  data->Initialize(arraysize(load_registers), load_registers,
+                   arraysize(store_registers), store_registers);
 }
 
-
-Register* PropertyAccessCompiler::store_calling_convention() {
-  // receiver, value, scratch1, scratch2.
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, x3, x4};
-  return registers;
-}
-
-
 #undef __
 }  // namespace internal
 }  // namespace v8
diff --git a/src/ic/arm64/handler-compiler-arm64.cc b/src/ic/arm64/handler-compiler-arm64.cc
index 3f97fdd..58d0bb7 100644
--- a/src/ic/arm64/handler-compiler-arm64.cc
+++ b/src/ic/arm64/handler-compiler-arm64.cc
@@ -437,10 +437,34 @@
   }
 }
 
+void PropertyHandlerCompiler::GenerateAccessCheck(
+    Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+    Label* miss, bool compare_native_contexts_only) {
+  Label done;
+  // Load current native context.
+  __ Ldr(scratch1, NativeContextMemOperand());
+  // Load expected native context.
+  __ LoadWeakValue(scratch2, native_context_cell, miss);
+  __ Cmp(scratch1, scratch2);
+
+  if (!compare_native_contexts_only) {
+    __ B(eq, &done);
+
+    // Compare security tokens of current and expected native contexts.
+    __ Ldr(scratch1,
+           ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+    __ Ldr(scratch2,
+           ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+    __ Cmp(scratch1, scratch2);
+  }
+  __ B(ne, miss);
+
+  __ bind(&done);
+}
 
 Register PropertyHandlerCompiler::CheckPrototypes(
     Register object_reg, Register holder_reg, Register scratch1,
-    Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+    Register scratch2, Handle<Name> name, Label* miss,
     ReturnHolder return_what) {
   Handle<Map> receiver_map = map();
 
@@ -454,19 +478,10 @@
     DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
     __ Mov(scratch1, Operand(validity_cell));
     __ Ldr(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
-    __ Cmp(scratch1, Operand(Smi::FromInt(Map::kPrototypeChainValid)));
-    __ B(ne, miss);
-  }
-
-  // The prototype chain of primitives (and their JSValue wrappers) depends
-  // on the native context, which can't be guarded by validity cells.
-  // |object_reg| holds the native context specific prototype in this case;
-  // we need to check its map.
-  if (check == CHECK_ALL_MAPS) {
-    __ Ldr(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
-    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-    __ CmpWeakValue(scratch1, cell, scratch2);
-    __ B(ne, miss);
+    // Compare scratch1 against Map::kPrototypeChainValid.
+    static_assert(Map::kPrototypeChainValid == 0,
+                  "Map::kPrototypeChainValid has unexpected value");
+    __ Cbnz(scratch1, miss);
   }
 
   // Keep track of the current object in register reg.
@@ -478,46 +493,27 @@
     current = isolate()->global_object();
   }
 
-  // Check access rights to the global object.  This has to happen after
-  // the map check so that we know that the object is actually a global
-  // object.
-  // This allows us to install generated handlers for accesses to the
-  // global proxy (as opposed to using slow ICs). See corresponding code
-  // in LookupForRead().
-  if (receiver_map->IsJSGlobalProxyMap()) {
-    UseScratchRegisterScope temps(masm());
-    __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss);
-  }
-
-  Handle<JSObject> prototype = Handle<JSObject>::null();
-  Handle<Map> current_map = receiver_map;
+  Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+                          isolate());
   Handle<Map> holder_map(holder()->map());
   // Traverse the prototype chain and check the maps in the prototype chain for
   // fast and global objects or do negative lookup for normal objects.
   while (!current_map.is_identical_to(holder_map)) {
     ++depth;
 
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    DCHECK(current_map->IsJSGlobalProxyMap() ||
-           !current_map->is_access_check_needed());
-
-    prototype = handle(JSObject::cast(current_map->prototype()));
     if (current_map->IsJSGlobalObjectMap()) {
       GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
                                 name, scratch2, miss);
     } else if (current_map->is_dictionary_map()) {
       DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
-      if (!name->IsUniqueName()) {
-        DCHECK(name->IsString());
-        name = factory()->InternalizeString(Handle<String>::cast(name));
-      }
+      DCHECK(name->IsUniqueName());
       DCHECK(current.is_null() || (current->property_dictionary()->FindEntry(
                                        name) == NameDictionary::kNotFound));
 
       if (depth > 1) {
-        // TODO(jkummerow): Cache and re-use weak cell.
-        __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+        Handle<WeakCell> weak_cell =
+            Map::GetOrCreatePrototypeWeakCell(current, isolate());
+        __ LoadWeakValue(reg, weak_cell, miss);
       }
       GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
                                        scratch2);
@@ -525,7 +521,7 @@
 
     reg = holder_reg;  // From now on the object will be in holder_reg.
     // Go to the next object in the prototype chain.
-    current = prototype;
+    current = handle(JSObject::cast(current_map->prototype()));
     current_map = handle(current->map());
   }
 
@@ -536,7 +532,9 @@
 
   bool return_holder = return_what == RETURN_HOLDER;
   if (return_holder && depth != 0) {
-    __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+    Handle<WeakCell> weak_cell =
+        Map::GetOrCreatePrototypeWeakCell(current, isolate());
+    __ LoadWeakValue(reg, weak_cell, miss);
   }
 
   // Return the register containing the holder.
diff --git a/src/ic/arm64/ic-arm64.cc b/src/ic/arm64/ic-arm64.cc
index fa9d7c1..0ced207 100644
--- a/src/ic/arm64/ic-arm64.cc
+++ b/src/ic/arm64/ic-arm64.cc
@@ -15,18 +15,6 @@
 
 #define __ ACCESS_MASM(masm)
 
-
-// "type" holds an instance type on entry and is not clobbered.
-// Generated code branch on "global_object" if type is any kind of global
-// JS object.
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
-                                            Label* global_object) {
-  __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
-  __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
-  __ B(eq, global_object);
-}
-
-
 // Helper function used from LoadIC GenerateNormal.
 //
 // elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -116,144 +104,6 @@
                  kDontSaveFPRegs);
 }
 
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object and return the map of the
-// receiver in 'map_scratch' if the receiver is not a SMI.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
-                                           Register receiver,
-                                           Register map_scratch,
-                                           Register scratch,
-                                           int interceptor_bit, Label* slow) {
-  DCHECK(!AreAliased(map_scratch, scratch));
-
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, slow);
-  // Get the map of the receiver.
-  __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check bit field.
-  __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
-  __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
-  __ Tbnz(scratch, interceptor_bit, slow);
-
-  // Check that the object is some kind of JS object EXCEPT JS Value type.
-  // In the case that the object is a value-wrapper object, we enter the
-  // runtime system to make sure that indexing into string objects work
-  // as intended.
-  STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-  __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
-  __ Cmp(scratch, JS_OBJECT_TYPE);
-  __ B(lt, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-//
-// receiver - holds the receiver on entry.
-//            Unchanged unless 'result' is the same register.
-//
-// key      - holds the smi key on entry.
-//            Unchanged unless 'result' is the same register.
-//
-// elements - holds the elements of the receiver and its prototypes. Clobbered.
-//
-// result   - holds the result on exit if the load succeeded.
-//            Allowed to be the the same as 'receiver' or 'key'.
-//            Unchanged on bailout so 'receiver' and 'key' can be safely
-//            used by further computation.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
-                                  Register key, Register elements,
-                                  Register scratch1, Register scratch2,
-                                  Register result, Label* slow) {
-  DCHECK(!AreAliased(receiver, key, elements, scratch1, scratch2));
-
-  Label check_prototypes, check_next_prototype;
-  Label done, in_bounds, absent;
-
-  // Check for fast array.
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ AssertFastElements(elements);
-
-  // Check that the key (index) is within bounds.
-  __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Cmp(key, scratch1);
-  __ B(lo, &in_bounds);
-
-  // Out of bounds. Check the prototype chain to see if we can just return
-  // 'undefined'.
-  __ Cmp(key, Operand(Smi::FromInt(0)));
-  __ B(lt, slow);  // Negative keys can't take the fast OOB path.
-  __ Bind(&check_prototypes);
-  __ Ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Bind(&check_next_prototype);
-  __ Ldr(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
-  // scratch2: current prototype
-  __ JumpIfRoot(scratch2, Heap::kNullValueRootIndex, &absent);
-  __ Ldr(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
-  __ Ldr(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
-  // elements: elements of current prototype
-  // scratch2: map of current prototype
-  __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
-  __ B(lo, slow);
-  __ Ldrb(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
-  __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, slow);
-  __ Tbnz(scratch1, Map::kHasIndexedInterceptor, slow);
-  __ JumpIfNotRoot(elements, Heap::kEmptyFixedArrayRootIndex, slow);
-  __ B(&check_next_prototype);
-
-  __ Bind(&absent);
-  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
-  __ B(&done);
-
-  __ Bind(&in_bounds);
-  // Fast case: Do the load.
-  __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ SmiUntag(scratch2, key);
-  __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
-
-  // In case the loaded value is the_hole we have to check the prototype chain.
-  __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, &check_prototypes);
-
-  // Move the value to the result register.
-  // 'result' can alias with 'receiver' or 'key' but these two must be
-  // preserved if we jump to 'slow'.
-  __ Mov(result, scratch2);
-  __ Bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-// The map of the key is returned in 'map_scratch'.
-// If the jump to 'index_string' is done the hash of the key is left
-// in 'hash_scratch'.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
-                                 Register map_scratch, Register hash_scratch,
-                                 Label* index_string, Label* not_unique) {
-  DCHECK(!AreAliased(key, map_scratch, hash_scratch));
-
-  // Is the key a name?
-  Label unique;
-  __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
-                      not_unique, hi);
-  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
-  __ B(eq, &unique);
-
-  // Is the string an array index with cached numeric value?
-  __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
-  __ TestAndBranchIfAllClear(hash_scratch, Name::kContainsCachedArrayIndexMask,
-                             index_string);
-
-  // Is the string internalized? We know it's a string, so a single bit test is
-  // enough.
-  __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(kInternalizedTag == 0);
-  __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
-
-  __ Bind(&unique);
-  // Fall through if the key is a unique name.
-}
-
 void LoadIC::GenerateNormal(MacroAssembler* masm) {
   Register dictionary = x0;
   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@@ -323,127 +173,6 @@
   __ TailCallRuntime(Runtime::kKeyedGetProperty);
 }
 
-static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
-                                        Register receiver, Register scratch1,
-                                        Register scratch2, Register scratch3,
-                                        Register scratch4, Register scratch5,
-                                        Label* slow) {
-  DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
-                     scratch5));
-
-  Isolate* isolate = masm->isolate();
-  Label check_number_dictionary;
-  // If we can load the value, it should be returned in x0.
-  Register result = x0;
-
-  GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
-                                 Map::kHasIndexedInterceptor, slow);
-
-  // Check the receiver's map to see if it has fast elements.
-  __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
-
-  GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1,
-                        result, slow);
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1,
-                      scratch1, scratch2);
-  __ Ret();
-
-  __ Bind(&check_number_dictionary);
-  __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
-
-  // Check whether we have a number dictionary.
-  __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
-
-  __ LoadFromNumberDictionary(slow, scratch3, key, result, scratch1, scratch2,
-                              scratch4, scratch5);
-  __ Ret();
-}
-
-static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
-                                         Register receiver, Register scratch1,
-                                         Register scratch2, Register scratch3,
-                                         Register scratch4, Register scratch5,
-                                         Label* slow) {
-  DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
-                     scratch5));
-
-  Isolate* isolate = masm->isolate();
-  Label probe_dictionary, property_array_property;
-  // If we can load the value, it should be returned in x0.
-  Register result = x0;
-
-  GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
-                                 Map::kHasNamedInterceptor, slow);
-
-  // If the receiver is a fast-case object, check the stub cache. Otherwise
-  // probe the dictionary.
-  __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
-  __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-  Register slot = LoadWithVectorDescriptor::SlotRegister();
-  DCHECK(!AreAliased(vector, slot, scratch1, scratch2, scratch3, scratch4));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
-  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
-  __ Mov(slot, Operand(Smi::FromInt(slot_index)));
-
-  masm->isolate()->load_stub_cache()->GenerateProbe(
-      masm, receiver, key, scratch1, scratch2, scratch3, scratch4);
-  // Cache miss.
-  KeyedLoadIC::GenerateMiss(masm);
-
-  // Do a quick inline probe of the receiver's dictionary, if it exists.
-  __ Bind(&probe_dictionary);
-  __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
-  GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
-  // Load the property.
-  GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
-                      scratch1, scratch2);
-  __ Ret();
-}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // The return address is in lr.
-  Label slow, check_name, index_smi, index_name;
-
-  Register key = LoadDescriptor::NameRegister();
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  DCHECK(key.is(x2));
-  DCHECK(receiver.is(x1));
-
-  __ JumpIfNotSmi(key, &check_name);
-  __ Bind(&index_smi);
-  // Now the key is known to be a smi. This place is also jumped to from below
-  // where a numeric string is converted to a smi.
-  GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
-
-  // Slow case.
-  __ Bind(&slow);
-  __ IncrementCounter(masm->isolate()->counters()->ic_keyed_load_generic_slow(),
-                      1, x4, x3);
-  GenerateRuntimeGetProperty(masm);
-
-  __ Bind(&check_name);
-  GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
-
-  GenerateKeyedLoadWithNameKey(masm, key, receiver, x4, x5, x6, x7, x3, &slow);
-
-  __ Bind(&index_name);
-  __ IndexFromHash(x3, key);
-  // Now jump to the place where smi keys are handled.
-  __ B(&index_smi);
-}
-
-
 static void StoreIC_PushArgs(MacroAssembler* masm) {
   __ Push(StoreWithVectorDescriptor::ValueRegister(),
           StoreWithVectorDescriptor::SlotRegister(),
diff --git a/src/ic/handler-compiler.cc b/src/ic/handler-compiler.cc
index 3b2e115..05e9031 100644
--- a/src/ic/handler-compiler.cc
+++ b/src/ic/handler-compiler.cc
@@ -6,7 +6,7 @@
 
 #include "src/field-type.h"
 #include "src/ic/call-optimization.h"
-#include "src/ic/handler-configuration.h"
+#include "src/ic/handler-configuration-inl.h"
 #include "src/ic/ic-inl.h"
 #include "src/ic/ic.h"
 #include "src/isolate-inl.h"
@@ -65,7 +65,10 @@
   // name specific if there are global objects involved.
   Handle<Code> handler = PropertyHandlerCompiler::Find(
       cache_name, stub_holder_map, Code::LOAD_IC, flag);
-  if (!handler.is_null()) return handler;
+  if (!handler.is_null()) {
+    TRACE_HANDLER_STATS(isolate, LoadIC_HandlerCacheHit_NonExistent);
+    return handler;
+  }
 
   TRACE_HANDLER_STATS(isolate, LoadIC_LoadNonexistent);
   NamedLoadHandlerCompiler compiler(isolate, receiver_map, last, flag);
@@ -95,24 +98,23 @@
                                                   Handle<Name> name,
                                                   Label* miss,
                                                   ReturnHolder return_what) {
-  PrototypeCheckType check_type = SKIP_RECEIVER;
-  int function_index = map()->IsPrimitiveMap()
-                           ? map()->GetConstructorFunctionIndex()
-                           : Map::kNoConstructorFunctionIndex;
-  if (function_index != Map::kNoConstructorFunctionIndex) {
-    GenerateDirectLoadGlobalFunctionPrototype(masm(), function_index,
-                                              scratch1(), miss);
-    Object* function = isolate()->native_context()->get(function_index);
-    Object* prototype = JSFunction::cast(function)->instance_prototype();
-    Handle<Map> map(JSObject::cast(prototype)->map());
-    set_map(map);
-    object_reg = scratch1();
-    check_type = CHECK_ALL_MAPS;
+  if (map()->IsPrimitiveMap() || map()->IsJSGlobalProxyMap()) {
+    // If the receiver is a global proxy and if we get to this point then
+    // the compile-time (current) native context has access to global proxy's
+    // native context. Since access rights revocation is not supported at all,
+    // we can generate a check that an execution-time native context is either
+    // the same as compile-time native context or has the same access token.
+    Handle<Context> native_context = isolate()->native_context();
+    Handle<WeakCell> weak_cell(native_context->self_weak_cell(), isolate());
+
+    bool compare_native_contexts_only = map()->IsPrimitiveMap();
+    GenerateAccessCheck(weak_cell, scratch1(), scratch2(), miss,
+                        compare_native_contexts_only);
   }
 
   // Check that the maps starting from the prototype haven't changed.
   return CheckPrototypes(object_reg, scratch1(), scratch2(), scratch3(), name,
-                         miss, check_type, return_what);
+                         miss, return_what);
 }
 
 
@@ -122,8 +124,14 @@
                                                    Handle<Name> name,
                                                    Label* miss,
                                                    ReturnHolder return_what) {
+  if (map()->IsJSGlobalProxyMap()) {
+    Handle<Context> native_context = isolate()->native_context();
+    Handle<WeakCell> weak_cell(native_context->self_weak_cell(), isolate());
+    GenerateAccessCheck(weak_cell, scratch1(), scratch2(), miss, false);
+  }
+
   return CheckPrototypes(object_reg, this->name(), scratch1(), scratch2(), name,
-                         miss, SKIP_RECEIVER, return_what);
+                         miss, return_what);
 }
 
 
@@ -224,7 +232,7 @@
 
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
     Handle<Name> name, Handle<AccessorInfo> callback, Handle<Code> slow_stub) {
-  if (FLAG_runtime_call_stats) {
+  if (V8_UNLIKELY(FLAG_runtime_stats)) {
     GenerateTailCall(masm(), slow_stub);
   }
   Register reg = Frontend(name);
@@ -236,7 +244,7 @@
     Handle<Name> name, const CallOptimization& call_optimization,
     int accessor_index, Handle<Code> slow_stub) {
   DCHECK(call_optimization.is_simple_api_call());
-  if (FLAG_runtime_call_stats) {
+  if (V8_UNLIKELY(FLAG_runtime_stats)) {
     GenerateTailCall(masm(), slow_stub);
   }
   Register holder = Frontend(name);
@@ -590,7 +598,7 @@
     Handle<JSObject> object, Handle<Name> name,
     const CallOptimization& call_optimization, int accessor_index,
     Handle<Code> slow_stub) {
-  if (FLAG_runtime_call_stats) {
+  if (V8_UNLIKELY(FLAG_runtime_stats)) {
     GenerateTailCall(masm(), slow_stub);
   }
   Register holder = Frontend(name);
@@ -633,11 +641,9 @@
   bool is_js_array = instance_type == JS_ARRAY_TYPE;
   if (elements_kind == DICTIONARY_ELEMENTS) {
     if (FLAG_tf_load_ic_stub) {
-      int config = KeyedLoadElementsKind::encode(elements_kind) |
-                   KeyedLoadConvertHole::encode(false) |
-                   KeyedLoadIsJsArray::encode(is_js_array) |
-                   LoadHandlerTypeBit::encode(kLoadICHandlerForElements);
-      return handle(Smi::FromInt(config), isolate);
+      TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadElementDH);
+      return LoadHandler::LoadElement(isolate, elements_kind, false,
+                                      is_js_array);
     }
     TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadDictionaryElementStub);
     return LoadDictionaryElementStub(isolate).GetCode();
@@ -649,11 +655,9 @@
       is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
       *receiver_map == isolate->get_initial_js_array_map(elements_kind);
   if (FLAG_tf_load_ic_stub) {
-    int config = KeyedLoadElementsKind::encode(elements_kind) |
-                 KeyedLoadConvertHole::encode(convert_hole_to_undefined) |
-                 KeyedLoadIsJsArray::encode(is_js_array) |
-                 LoadHandlerTypeBit::encode(kLoadICHandlerForElements);
-    return handle(Smi::FromInt(config), isolate);
+    TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadElementDH);
+    return LoadHandler::LoadElement(isolate, elements_kind,
+                                    convert_hole_to_undefined, is_js_array);
   } else {
     TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadFastElementStub);
     return LoadFastElementStub(isolate, is_js_array, elements_kind,
diff --git a/src/ic/handler-compiler.h b/src/ic/handler-compiler.h
index 63ca050..0dec36a 100644
--- a/src/ic/handler-compiler.h
+++ b/src/ic/handler-compiler.h
@@ -13,7 +13,6 @@
 
 class CallOptimization;
 
-enum PrototypeCheckType { CHECK_ALL_MAPS, SKIP_RECEIVER };
 enum ReturnHolder { RETURN_HOLDER, DONT_RETURN_ANYTHING };
 
 class PropertyHandlerCompiler : public PropertyAccessCompiler {
@@ -84,6 +83,18 @@
                                         Handle<Name> name, Register scratch,
                                         Label* miss);
 
+  // Generates check that current native context has the same access rights
+  // as the given |native_context_cell|.
+  // If |compare_native_contexts_only| is true then access check is considered
+  // passed if the execution-time native context is equal to contents of
+  // |native_context_cell|.
+  // If |compare_native_contexts_only| is false then access check is considered
+  // passed if the execution-time native context is equal to contents of
+  // |native_context_cell| or security tokens of both contexts are equal.
+  void GenerateAccessCheck(Handle<WeakCell> native_context_cell,
+                           Register scratch1, Register scratch2, Label* miss,
+                           bool compare_native_contexts_only);
+
   // Generates code that verifies that the property holder has not changed
   // (checking maps of objects in the prototype chain for fast and global
   // objects or doing negative lookup for slow objects, ensures that the
@@ -99,7 +110,7 @@
   Register CheckPrototypes(Register object_reg, Register holder_reg,
                            Register scratch1, Register scratch2,
                            Handle<Name> name, Label* miss,
-                           PrototypeCheckType check, ReturnHolder return_what);
+                           ReturnHolder return_what);
 
   Handle<Code> GetCode(Code::Kind kind, Handle<Name> name);
   void set_holder(Handle<JSObject> holder) { holder_ = holder; }
diff --git a/src/ic/handler-configuration-inl.h b/src/ic/handler-configuration-inl.h
new file mode 100644
index 0000000..505d67c
--- /dev/null
+++ b/src/ic/handler-configuration-inl.h
@@ -0,0 +1,145 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_HANDLER_CONFIGURATION_INL_H_
+#define V8_IC_HANDLER_CONFIGURATION_INL_H_
+
+#include "src/ic/handler-configuration.h"
+
+#include "src/field-index-inl.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Object> LoadHandler::LoadField(Isolate* isolate,
+                                      FieldIndex field_index) {
+  int config = KindBits::encode(kForFields) |
+               IsInobjectBits::encode(field_index.is_inobject()) |
+               IsDoubleBits::encode(field_index.is_double()) |
+               FieldOffsetBits::encode(field_index.offset());
+  return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> LoadHandler::LoadConstant(Isolate* isolate, int descriptor) {
+  int config = KindBits::encode(kForConstants) |
+               IsAccessorInfoBits::encode(false) |
+               DescriptorValueIndexBits::encode(
+                   DescriptorArray::ToValueIndex(descriptor));
+  return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> LoadHandler::LoadApiGetter(Isolate* isolate, int descriptor) {
+  int config = KindBits::encode(kForConstants) |
+               IsAccessorInfoBits::encode(true) |
+               DescriptorValueIndexBits::encode(
+                   DescriptorArray::ToValueIndex(descriptor));
+  return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> LoadHandler::EnableAccessCheckOnReceiver(
+    Isolate* isolate, Handle<Object> smi_handler) {
+  int config = Smi::cast(*smi_handler)->value();
+#ifdef DEBUG
+  Kind kind = KindBits::decode(config);
+  DCHECK_NE(kForElements, kind);
+#endif
+  config = DoAccessCheckOnReceiverBits::update(config, true);
+  return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> LoadHandler::EnableNegativeLookupOnReceiver(
+    Isolate* isolate, Handle<Object> smi_handler) {
+  int config = Smi::cast(*smi_handler)->value();
+#ifdef DEBUG
+  Kind kind = KindBits::decode(config);
+  DCHECK_NE(kForElements, kind);
+#endif
+  config = DoNegativeLookupOnReceiverBits::update(config, true);
+  return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> LoadHandler::LoadNonExistent(
+    Isolate* isolate, bool do_negative_lookup_on_receiver) {
+  int config =
+      KindBits::encode(kForNonExistent) |
+      DoNegativeLookupOnReceiverBits::encode(do_negative_lookup_on_receiver);
+  return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> LoadHandler::LoadElement(Isolate* isolate,
+                                        ElementsKind elements_kind,
+                                        bool convert_hole_to_undefined,
+                                        bool is_js_array) {
+  int config = KindBits::encode(kForElements) |
+               ElementsKindBits::encode(elements_kind) |
+               ConvertHoleBits::encode(convert_hole_to_undefined) |
+               IsJsArrayBits::encode(is_js_array);
+  return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> StoreHandler::StoreField(Isolate* isolate, Kind kind,
+                                        int descriptor, FieldIndex field_index,
+                                        Representation representation,
+                                        bool extend_storage) {
+  StoreHandler::FieldRepresentation field_rep;
+  switch (representation.kind()) {
+    case Representation::kSmi:
+      field_rep = StoreHandler::kSmi;
+      break;
+    case Representation::kDouble:
+      field_rep = StoreHandler::kDouble;
+      break;
+    case Representation::kHeapObject:
+      field_rep = StoreHandler::kHeapObject;
+      break;
+    case Representation::kTagged:
+      field_rep = StoreHandler::kTagged;
+      break;
+    default:
+      UNREACHABLE();
+      return Handle<Object>::null();
+  }
+  int value_index = DescriptorArray::ToValueIndex(descriptor);
+
+  DCHECK(kind == kStoreField || kind == kTransitionToField);
+  DCHECK_IMPLIES(kind == kStoreField, !extend_storage);
+
+  int config = StoreHandler::KindBits::encode(kind) |
+               StoreHandler::ExtendStorageBits::encode(extend_storage) |
+               StoreHandler::IsInobjectBits::encode(field_index.is_inobject()) |
+               StoreHandler::FieldRepresentationBits::encode(field_rep) |
+               StoreHandler::DescriptorValueIndexBits::encode(value_index) |
+               StoreHandler::FieldOffsetBits::encode(field_index.offset());
+  return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> StoreHandler::StoreField(Isolate* isolate, int descriptor,
+                                        FieldIndex field_index,
+                                        Representation representation) {
+  return StoreField(isolate, kStoreField, descriptor, field_index,
+                    representation, false);
+}
+
+Handle<Object> StoreHandler::TransitionToField(Isolate* isolate, int descriptor,
+                                               FieldIndex field_index,
+                                               Representation representation,
+                                               bool extend_storage) {
+  return StoreField(isolate, kTransitionToField, descriptor, field_index,
+                    representation, extend_storage);
+}
+
+Handle<Object> StoreHandler::TransitionToConstant(Isolate* isolate,
+                                                  int descriptor) {
+  int value_index = DescriptorArray::ToValueIndex(descriptor);
+  int config =
+      StoreHandler::KindBits::encode(StoreHandler::kTransitionToConstant) |
+      StoreHandler::DescriptorValueIndexBits::encode(value_index);
+  return handle(Smi::FromInt(config), isolate);
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_IC_HANDLER_CONFIGURATION_INL_H_
diff --git a/src/ic/handler-configuration.h b/src/ic/handler-configuration.h
index bf7c477..a529173 100644
--- a/src/ic/handler-configuration.h
+++ b/src/ic/handler-configuration.h
@@ -6,38 +6,196 @@
 #define V8_IC_HANDLER_CONFIGURATION_H_
 
 #include "src/elements-kind.h"
+#include "src/field-index.h"
 #include "src/globals.h"
 #include "src/utils.h"
 
 namespace v8 {
 namespace internal {
 
-enum LoadHandlerType {
-  kLoadICHandlerForElements = 0,
-  kLoadICHandlerForProperties = 1
+// A set of bit fields representing Smi handlers for loads.
+class LoadHandler {
+ public:
+  enum Kind { kForElements, kForFields, kForConstants, kForNonExistent };
+  class KindBits : public BitField<Kind, 0, 2> {};
+
+  // Defines whether access rights check should be done on receiver object.
+  // Applicable to kForFields, kForConstants and kForNonExistent kinds only when
+  // loading value from prototype chain. Ignored when loading from holder.
+  class DoAccessCheckOnReceiverBits
+      : public BitField<bool, KindBits::kNext, 1> {};
+
+  // Defines whether negative lookup check should be done on receiver object.
+  // Applicable to kForFields, kForConstants and kForNonExistent kinds only when
+  // loading value from prototype chain. Ignored when loading from holder.
+  class DoNegativeLookupOnReceiverBits
+      : public BitField<bool, DoAccessCheckOnReceiverBits::kNext, 1> {};
+
+  //
+  // Encoding when KindBits contains kForConstants.
+  //
+
+  class IsAccessorInfoBits
+      : public BitField<bool, DoNegativeLookupOnReceiverBits::kNext, 1> {};
+  // Index of a value entry in the descriptor array.
+  // +2 here is because each descriptor entry occupies 3 slots in array.
+  class DescriptorValueIndexBits
+      : public BitField<unsigned, IsAccessorInfoBits::kNext,
+                        kDescriptorIndexBitCount + 2> {};
+  // Make sure we don't overflow the smi.
+  STATIC_ASSERT(DescriptorValueIndexBits::kNext <= kSmiValueSize);
+
+  //
+  // Encoding when KindBits contains kForFields.
+  //
+  class IsInobjectBits
+      : public BitField<bool, DoNegativeLookupOnReceiverBits::kNext, 1> {};
+  class IsDoubleBits : public BitField<bool, IsInobjectBits::kNext, 1> {};
+  // +1 here is to cover all possible JSObject header sizes.
+  class FieldOffsetBits
+      : public BitField<unsigned, IsDoubleBits::kNext,
+                        kDescriptorIndexBitCount + 1 + kPointerSizeLog2> {};
+  // Make sure we don't overflow the smi.
+  STATIC_ASSERT(FieldOffsetBits::kNext <= kSmiValueSize);
+
+  //
+  // Encoding when KindBits contains kForElements.
+  //
+  class IsJsArrayBits : public BitField<bool, KindBits::kNext, 1> {};
+  class ConvertHoleBits : public BitField<bool, IsJsArrayBits::kNext, 1> {};
+  class ElementsKindBits
+      : public BitField<ElementsKind, ConvertHoleBits::kNext, 8> {};
+  // Make sure we don't overflow the smi.
+  STATIC_ASSERT(ElementsKindBits::kNext <= kSmiValueSize);
+
+  // The layout of an Tuple3 handler representing a load of a field from
+  // prototype when prototype chain checks do not include non-existing lookups
+  // or access checks.
+  static const int kHolderCellOffset = Tuple3::kValue1Offset;
+  static const int kSmiHandlerOffset = Tuple3::kValue2Offset;
+  static const int kValidityCellOffset = Tuple3::kValue3Offset;
+
+  // The layout of an array handler representing a load of a field from
+  // prototype when prototype chain checks include non-existing lookups and
+  // access checks.
+  static const int kSmiHandlerIndex = 0;
+  static const int kValidityCellIndex = 1;
+  static const int kHolderCellIndex = 2;
+  static const int kFirstPrototypeIndex = 3;
+
+  // Creates a Smi-handler for loading a field from fast object.
+  static inline Handle<Object> LoadField(Isolate* isolate,
+                                         FieldIndex field_index);
+
+  // Creates a Smi-handler for loading a constant from fast object.
+  static inline Handle<Object> LoadConstant(Isolate* isolate, int descriptor);
+
+  // Creates a Smi-handler for loading an Api getter property from fast object.
+  static inline Handle<Object> LoadApiGetter(Isolate* isolate, int descriptor);
+
+  // Sets DoAccessCheckOnReceiverBits in given Smi-handler. The receiver
+  // check is a part of a prototype chain check.
+  static inline Handle<Object> EnableAccessCheckOnReceiver(
+      Isolate* isolate, Handle<Object> smi_handler);
+
+  // Sets DoNegativeLookupOnReceiverBits in given Smi-handler. The receiver
+  // check is a part of a prototype chain check.
+  static inline Handle<Object> EnableNegativeLookupOnReceiver(
+      Isolate* isolate, Handle<Object> smi_handler);
+
+  // Creates a Smi-handler for loading a non-existent property. Works only as
+  // a part of prototype chain check.
+  static inline Handle<Object> LoadNonExistent(
+      Isolate* isolate, bool do_negative_lookup_on_receiver);
+
+  // Creates a Smi-handler for loading an element.
+  static inline Handle<Object> LoadElement(Isolate* isolate,
+                                           ElementsKind elements_kind,
+                                           bool convert_hole_to_undefined,
+                                           bool is_js_array);
 };
 
-class LoadHandlerTypeBit : public BitField<bool, 0, 1> {};
+// A set of bit fields representing Smi handlers for stores.
+class StoreHandler {
+ public:
+  enum Kind {
+    kStoreElement,
+    kStoreField,
+    kTransitionToField,
+    kTransitionToConstant
+  };
+  class KindBits : public BitField<Kind, 0, 2> {};
 
-// Encoding for configuration Smis for property loads:
-class FieldOffsetIsInobject
-    : public BitField<bool, LoadHandlerTypeBit::kNext, 1> {};
-class FieldOffsetIsDouble
-    : public BitField<bool, FieldOffsetIsInobject::kNext, 1> {};
-class FieldOffsetOffset : public BitField<int, FieldOffsetIsDouble::kNext, 27> {
-};
-// Make sure we don't overflow into the sign bit.
-STATIC_ASSERT(FieldOffsetOffset::kNext <= kSmiValueSize - 1);
+  enum FieldRepresentation { kSmi, kDouble, kHeapObject, kTagged };
 
-// Encoding for configuration Smis for elements loads:
-class KeyedLoadIsJsArray : public BitField<bool, LoadHandlerTypeBit::kNext, 1> {
+  // Applicable to kStoreField, kTransitionToField and kTransitionToConstant
+  // kinds.
+
+  // Index of a value entry in the descriptor array.
+  // +2 here is because each descriptor entry occupies 3 slots in array.
+  class DescriptorValueIndexBits
+      : public BitField<unsigned, KindBits::kNext,
+                        kDescriptorIndexBitCount + 2> {};
+  //
+  // Encoding when KindBits contains kTransitionToConstant.
+  //
+
+  // Make sure we don't overflow the smi.
+  STATIC_ASSERT(DescriptorValueIndexBits::kNext <= kSmiValueSize);
+
+  //
+  // Encoding when KindBits contains kStoreField or kTransitionToField.
+  //
+  class ExtendStorageBits
+      : public BitField<bool, DescriptorValueIndexBits::kNext, 1> {};
+  class IsInobjectBits : public BitField<bool, ExtendStorageBits::kNext, 1> {};
+  class FieldRepresentationBits
+      : public BitField<FieldRepresentation, IsInobjectBits::kNext, 2> {};
+  // +1 here is to cover all possible JSObject header sizes.
+  class FieldOffsetBits
+      : public BitField<unsigned, FieldRepresentationBits::kNext,
+                        kDescriptorIndexBitCount + 1 + kPointerSizeLog2> {};
+  // Make sure we don't overflow the smi.
+  STATIC_ASSERT(FieldOffsetBits::kNext <= kSmiValueSize);
+
+  // The layout of an Tuple3 handler representing a transitioning store
+  // when prototype chain checks do not include non-existing lookups or access
+  // checks.
+  static const int kTransitionCellOffset = Tuple3::kValue1Offset;
+  static const int kSmiHandlerOffset = Tuple3::kValue2Offset;
+  static const int kValidityCellOffset = Tuple3::kValue3Offset;
+
+  // The layout of an array handler representing a transitioning store
+  // when prototype chain checks include non-existing lookups and access checks.
+  static const int kSmiHandlerIndex = 0;
+  static const int kValidityCellIndex = 1;
+  static const int kTransitionCellIndex = 2;
+  static const int kFirstPrototypeIndex = 3;
+
+  // Creates a Smi-handler for storing a field to fast object.
+  static inline Handle<Object> StoreField(Isolate* isolate, int descriptor,
+                                          FieldIndex field_index,
+                                          Representation representation);
+
+  // Creates a Smi-handler for transitioning store to a field.
+  static inline Handle<Object> TransitionToField(Isolate* isolate,
+                                                 int descriptor,
+                                                 FieldIndex field_index,
+                                                 Representation representation,
+                                                 bool extend_storage);
+
+  // Creates a Smi-handler for transitioning store to a constant field (in this
+  // case the only thing that needs to be done is an update of a map).
+  static inline Handle<Object> TransitionToConstant(Isolate* isolate,
+                                                    int descriptor);
+
+ private:
+  static inline Handle<Object> StoreField(Isolate* isolate, Kind kind,
+                                          int descriptor,
+                                          FieldIndex field_index,
+                                          Representation representation,
+                                          bool extend_storage);
 };
-class KeyedLoadConvertHole
-    : public BitField<bool, KeyedLoadIsJsArray::kNext, 1> {};
-class KeyedLoadElementsKind
-    : public BitField<ElementsKind, KeyedLoadConvertHole::kNext, 8> {};
-// Make sure we don't overflow into the sign bit.
-STATIC_ASSERT(KeyedLoadElementsKind::kNext <= kSmiValueSize - 1);
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/ic/ia32/access-compiler-ia32.cc b/src/ic/ia32/access-compiler-ia32.cc
index 3219f3d..411c744 100644
--- a/src/ic/ia32/access-compiler-ia32.cc
+++ b/src/ic/ia32/access-compiler-ia32.cc
@@ -16,22 +16,21 @@
   __ jmp(code, RelocInfo::CODE_TARGET);
 }
 
-
-Register* PropertyAccessCompiler::load_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+    AccessCompilerData* data) {
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, ebx, eax, edi};
-  return registers;
-}
 
+  // Load calling convention.
+  // receiver, name, scratch1, scratch2, scratch3.
+  Register load_registers[] = {receiver, name, ebx, eax, edi};
 
-Register* PropertyAccessCompiler::store_calling_convention() {
+  // Store calling convention.
   // receiver, name, scratch1, scratch2.
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, ebx, edi};
-  return registers;
+  Register store_registers[] = {receiver, name, ebx, edi};
+
+  data->Initialize(arraysize(load_registers), load_registers,
+                   arraysize(store_registers), store_registers);
 }
 
 #undef __
diff --git a/src/ic/ia32/handler-compiler-ia32.cc b/src/ic/ia32/handler-compiler-ia32.cc
index 06c58b8..68fd1b9 100644
--- a/src/ic/ia32/handler-compiler-ia32.cc
+++ b/src/ic/ia32/handler-compiler-ia32.cc
@@ -411,10 +411,32 @@
   }
 }
 
+void PropertyHandlerCompiler::GenerateAccessCheck(
+    Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+    Label* miss, bool compare_native_contexts_only) {
+  Label done;
+  // Load current native context.
+  __ mov(scratch1, NativeContextOperand());
+  // Load expected native context.
+  __ LoadWeakValue(scratch2, native_context_cell, miss);
+  __ cmp(scratch1, scratch2);
+
+  if (!compare_native_contexts_only) {
+    __ j(equal, &done);
+
+    // Compare security tokens of current and expected native contexts.
+    __ mov(scratch1, ContextOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+    __ mov(scratch2, ContextOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+    __ cmp(scratch1, scratch2);
+  }
+  __ j(not_equal, miss);
+
+  __ bind(&done);
+}
 
 Register PropertyHandlerCompiler::CheckPrototypes(
     Register object_reg, Register holder_reg, Register scratch1,
-    Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+    Register scratch2, Handle<Name> name, Label* miss,
     ReturnHolder return_what) {
   Handle<Map> receiver_map = map();
 
@@ -433,17 +455,6 @@
     __ j(not_equal, miss);
   }
 
-  // The prototype chain of primitives (and their JSValue wrappers) depends
-  // on the native context, which can't be guarded by validity cells.
-  // |object_reg| holds the native context specific prototype in this case;
-  // we need to check its map.
-  if (check == CHECK_ALL_MAPS) {
-    __ mov(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
-    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-    __ CmpWeakValue(scratch1, cell, scratch2);
-    __ j(not_equal, miss);
-  }
-
   // Keep track of the current object in register reg.
   Register reg = object_reg;
   int depth = 0;
@@ -453,46 +464,28 @@
     current = isolate()->global_object();
   }
 
-  // Check access rights to the global object.  This has to happen after
-  // the map check so that we know that the object is actually a global
-  // object.
-  // This allows us to install generated handlers for accesses to the
-  // global proxy (as opposed to using slow ICs). See corresponding code
-  // in LookupForRead().
-  if (receiver_map->IsJSGlobalProxyMap()) {
-    __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
-  }
-
-  Handle<JSObject> prototype = Handle<JSObject>::null();
-  Handle<Map> current_map = receiver_map;
+  Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+                          isolate());
   Handle<Map> holder_map(holder()->map());
   // Traverse the prototype chain and check the maps in the prototype chain for
   // fast and global objects or do negative lookup for normal objects.
   while (!current_map.is_identical_to(holder_map)) {
     ++depth;
 
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    DCHECK(current_map->IsJSGlobalProxyMap() ||
-           !current_map->is_access_check_needed());
-
-    prototype = handle(JSObject::cast(current_map->prototype()));
     if (current_map->IsJSGlobalObjectMap()) {
       GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
                                 name, scratch2, miss);
     } else if (current_map->is_dictionary_map()) {
       DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
-      if (!name->IsUniqueName()) {
-        DCHECK(name->IsString());
-        name = factory()->InternalizeString(Handle<String>::cast(name));
-      }
+      DCHECK(name->IsUniqueName());
       DCHECK(current.is_null() ||
              current->property_dictionary()->FindEntry(name) ==
                  NameDictionary::kNotFound);
 
       if (depth > 1) {
-        // TODO(jkummerow): Cache and re-use weak cell.
-        __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+        Handle<WeakCell> weak_cell =
+            Map::GetOrCreatePrototypeWeakCell(current, isolate());
+        __ LoadWeakValue(reg, weak_cell, miss);
       }
       GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
                                        scratch2);
@@ -500,7 +493,7 @@
 
     reg = holder_reg;  // From now on the object will be in holder_reg.
     // Go to the next object in the prototype chain.
-    current = prototype;
+    current = handle(JSObject::cast(current_map->prototype()));
     current_map = handle(current->map());
   }
 
@@ -511,7 +504,9 @@
 
   bool return_holder = return_what == RETURN_HOLDER;
   if (return_holder && depth != 0) {
-    __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+    Handle<WeakCell> weak_cell =
+        Map::GetOrCreatePrototypeWeakCell(current, isolate());
+    __ LoadWeakValue(reg, weak_cell, miss);
   }
 
   // Return the register containing the holder.
diff --git a/src/ic/ia32/ic-ia32.cc b/src/ic/ia32/ic-ia32.cc
index b7496d4..44a5b9f 100644
--- a/src/ic/ia32/ic-ia32.cc
+++ b/src/ic/ia32/ic-ia32.cc
@@ -18,18 +18,6 @@
 
 #define __ ACCESS_MASM(masm)
 
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
-                                            Label* global_object) {
-  // Register usage:
-  //   type: holds the receiver instance type on entry.
-  __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
-  __ j(equal, global_object);
-  __ cmp(type, JS_GLOBAL_PROXY_TYPE);
-  __ j(equal, global_object);
-}
-
-
 // Helper function used to load a property from a dictionary backing
 // storage. This function may fail to load a property even though it is
 // in the dictionary, so code at miss_label must always call a backup
@@ -132,238 +120,6 @@
   __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
 }
 
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
-                                           Register receiver, Register map,
-                                           int interceptor_bit, Label* slow) {
-  // Register use:
-  //   receiver - holds the receiver and is unchanged.
-  // Scratch registers:
-  //   map - used to hold the map of the receiver.
-
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, slow);
-
-  // Get the map of the receiver.
-  __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
-
-  // Check bit field.
-  __ test_b(
-      FieldOperand(map, Map::kBitFieldOffset),
-      Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
-  __ j(not_zero, slow);
-  // Check that the object is some kind of JS object EXCEPT JS Value type. In
-  // the case that the object is a value-wrapper object, we enter the runtime
-  // system to make sure that indexing into string objects works as intended.
-  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-
-  __ CmpInstanceType(map, JS_OBJECT_TYPE);
-  __ j(below, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
-                                  Register key, Register scratch,
-                                  Register scratch2, Register result,
-                                  Label* slow) {
-  // Register use:
-  //   receiver - holds the receiver and is unchanged.
-  //   key - holds the key and is unchanged (must be a smi).
-  // Scratch registers:
-  //   scratch - used to hold elements of the receiver and the loaded value.
-  //   scratch2 - holds maps and prototypes during prototype chain check.
-  //   result - holds the result on exit if the load succeeds and
-  //            we fall through.
-  Label check_prototypes, check_next_prototype;
-  Label done, in_bounds, absent;
-
-  __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ AssertFastElements(scratch);
-
-  // Check that the key (index) is within bounds.
-  __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
-  __ j(below, &in_bounds);
-  // Out-of-bounds. Check the prototype chain to see if we can just return
-  // 'undefined'.
-  __ cmp(key, 0);
-  __ j(less, slow);  // Negative keys can't take the fast OOB path.
-  __ bind(&check_prototypes);
-  __ mov(scratch2, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ bind(&check_next_prototype);
-  __ mov(scratch2, FieldOperand(scratch2, Map::kPrototypeOffset));
-  // scratch2: current prototype
-  __ cmp(scratch2, masm->isolate()->factory()->null_value());
-  __ j(equal, &absent);
-  __ mov(scratch, FieldOperand(scratch2, JSObject::kElementsOffset));
-  __ mov(scratch2, FieldOperand(scratch2, HeapObject::kMapOffset));
-  // scratch: elements of current prototype
-  // scratch2: map of current prototype
-  __ CmpInstanceType(scratch2, JS_OBJECT_TYPE);
-  __ j(below, slow);
-  __ test_b(FieldOperand(scratch2, Map::kBitFieldOffset),
-            Immediate((1 << Map::kIsAccessCheckNeeded) |
-                      (1 << Map::kHasIndexedInterceptor)));
-  __ j(not_zero, slow);
-  __ cmp(scratch, masm->isolate()->factory()->empty_fixed_array());
-  __ j(not_equal, slow);
-  __ jmp(&check_next_prototype);
-
-  __ bind(&absent);
-  __ mov(result, masm->isolate()->factory()->undefined_value());
-  __ jmp(&done);
-
-  __ bind(&in_bounds);
-  // Fast case: Do the load.
-  STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
-  __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
-  __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
-  // In case the loaded value is the_hole we have to check the prototype chain.
-  __ j(equal, &check_prototypes);
-  __ Move(result, scratch);
-  __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if the key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
-                                 Register map, Register hash,
-                                 Label* index_string, Label* not_unique) {
-  // Register use:
-  //   key - holds the key and is unchanged. Assumed to be non-smi.
-  // Scratch registers:
-  //   map - used to hold the map of the key.
-  //   hash - used to hold the hash of the key.
-  Label unique;
-  __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
-  __ j(above, not_unique);
-  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
-  __ j(equal, &unique);
-
-  // Is the string an array index, with cached numeric value?
-  __ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
-  __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
-  __ j(zero, index_string);
-
-  // Is the string internalized? We already know it's a string so a single
-  // bit test is enough.
-  STATIC_ASSERT(kNotInternalizedTag != 0);
-  __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
-            Immediate(kIsNotInternalizedMask));
-  __ j(not_zero, not_unique);
-
-  __ bind(&unique);
-}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // The return address is on the stack.
-  Label slow, check_name, index_smi, index_name, property_array_property;
-  Label probe_dictionary, check_number_dictionary;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register key = LoadDescriptor::NameRegister();
-  DCHECK(receiver.is(edx));
-  DCHECK(key.is(ecx));
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &check_name);
-  __ bind(&index_smi);
-  // Now the key is known to be a smi. This place is also jumped to from
-  // where a numeric string is converted to a smi.
-
-  GenerateKeyedLoadReceiverCheck(masm, receiver, eax,
-                                 Map::kHasIndexedInterceptor, &slow);
-
-  // Check the receiver's map to see if it has fast elements.
-  __ CheckFastElements(eax, &check_number_dictionary);
-
-  GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow);
-  Isolate* isolate = masm->isolate();
-  Counters* counters = isolate->counters();
-  __ IncrementCounter(counters->ic_keyed_load_generic_smi(), 1);
-  __ ret(0);
-
-  __ bind(&check_number_dictionary);
-  __ mov(ebx, key);
-  __ SmiUntag(ebx);
-  __ mov(eax, FieldOperand(receiver, JSObject::kElementsOffset));
-
-  // Check whether the elements is a number dictionary.
-  // ebx: untagged index
-  // eax: elements
-  __ CheckMap(eax, isolate->factory()->hash_table_map(), &slow,
-              DONT_DO_SMI_CHECK);
-  Label slow_pop_receiver;
-  // Push receiver on the stack to free up a register for the dictionary
-  // probing.
-  __ push(receiver);
-  __ LoadFromNumberDictionary(&slow_pop_receiver, eax, key, ebx, edx, edi, eax);
-  // Pop receiver before returning.
-  __ pop(receiver);
-  __ ret(0);
-
-  __ bind(&slow_pop_receiver);
-  // Pop the receiver from the stack and jump to runtime.
-  __ pop(receiver);
-
-  __ bind(&slow);
-  // Slow case: jump to runtime.
-  __ IncrementCounter(counters->ic_keyed_load_generic_slow(), 1);
-  GenerateRuntimeGetProperty(masm);
-
-  __ bind(&check_name);
-  GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
-
-  GenerateKeyedLoadReceiverCheck(masm, receiver, eax, Map::kHasNamedInterceptor,
-                                 &slow);
-
-  // If the receiver is a fast-case object, check the stub cache. Otherwise
-  // probe the dictionary.
-  __ mov(ebx, FieldOperand(receiver, JSObject::kPropertiesOffset));
-  __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
-         Immediate(isolate->factory()->hash_table_map()));
-  __ j(equal, &probe_dictionary);
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(isolate);
-  int slot = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
-  __ push(Immediate(Smi::FromInt(slot)));
-  __ push(Immediate(dummy_vector));
-
-  masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, ebx,
-                                                    edi);
-
-  __ pop(LoadWithVectorDescriptor::VectorRegister());
-  __ pop(LoadDescriptor::SlotRegister());
-
-  // Cache miss.
-  GenerateMiss(masm);
-
-  // Do a quick inline probe of the receiver's dictionary, if it
-  // exists.
-  __ bind(&probe_dictionary);
-
-  __ mov(eax, FieldOperand(receiver, JSObject::kMapOffset));
-  __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
-  GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
-
-  GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax);
-  __ IncrementCounter(counters->ic_keyed_load_generic_symbol(), 1);
-  __ ret(0);
-
-  __ bind(&index_name);
-  __ IndexFromHash(ebx, key);
-  // Now jump to the place where smi keys are handled.
-  __ jmp(&index_smi);
-}
-
-
 static void KeyedStoreGenerateMegamorphicHelper(
     MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
     KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
diff --git a/src/ic/ic-compiler.cc b/src/ic/ic-compiler.cc
index 2f0633e..750c88d 100644
--- a/src/ic/ic-compiler.cc
+++ b/src/ic/ic-compiler.cc
@@ -56,9 +56,11 @@
     // Tracking to do a better job of ensuring the data types are what they need
     // to be. Not all the elements are in place yet, pessimistic elements
     // transitions are still important for performance.
-    bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
-    ElementsKind elements_kind = receiver_map->elements_kind();
     if (!transitioned_map.is_null()) {
+      bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+      ElementsKind elements_kind = receiver_map->elements_kind();
+      TRACE_HANDLER_STATS(isolate(),
+                          KeyedStoreIC_ElementsTransitionAndStoreStub);
       cached_stub =
           ElementsTransitionAndStoreStub(isolate(), elements_kind,
                                          transitioned_map->elements_kind(),
@@ -66,19 +68,11 @@
     } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
       // TODO(mvstanton): Consider embedding store_mode in the state of the slow
       // keyed store ic for uniformity.
+      TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub);
       cached_stub = isolate()->builtins()->KeyedStoreIC_Slow();
     } else {
-      if (IsSloppyArgumentsElements(elements_kind)) {
-        cached_stub =
-            KeyedStoreSloppyArgumentsStub(isolate(), store_mode).GetCode();
-      } else if (receiver_map->has_fast_elements() ||
-                 receiver_map->has_fixed_typed_array_elements()) {
-        cached_stub = StoreFastElementStub(isolate(), is_js_array,
-                                           elements_kind, store_mode).GetCode();
-      } else {
-        cached_stub =
-            StoreElementStub(isolate(), elements_kind, store_mode).GetCode();
-      }
+      cached_stub =
+          CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
     }
     DCHECK(!cached_stub.is_null());
     handlers->Add(cached_stub);
diff --git a/src/ic/ic-inl.h b/src/ic/ic-inl.h
index 4fc8ada..1b5d063 100644
--- a/src/ic/ic-inl.h
+++ b/src/ic/ic-inl.h
@@ -92,6 +92,12 @@
   return GetTargetAtAddress(address(), constant_pool());
 }
 
+bool IC::IsHandler(Object* object) {
+  return (object->IsSmi() && (object != nullptr)) || object->IsTuple3() ||
+         object->IsFixedArray() ||
+         (object->IsCode() && Code::cast(object)->is_handler());
+}
+
 Handle<Map> IC::GetHandlerCacheHolder(Handle<Map> receiver_map,
                                       bool receiver_is_holder, Isolate* isolate,
                                       CacheHolderFlag* flag) {
diff --git a/src/ic/ic-state.cc b/src/ic/ic-state.cc
index ea1f16c..f948036 100644
--- a/src/ic/ic-state.cc
+++ b/src/ic/ic-state.cc
@@ -17,7 +17,7 @@
 
 
 std::ostream& operator<<(std::ostream& os, const CallICState& s) {
-  return os << "(args(" << s.argc() << "), " << s.convert_mode() << ", ";
+  return os << "(" << s.convert_mode() << ", " << s.tail_call_mode() << ")";
 }
 
 
@@ -256,10 +256,10 @@
 
   if (old_extra_ic_state == GetExtraICState()) {
     // Tagged operations can lead to non-truncating HChanges
-    if (left->IsUndefined(isolate_) || left->IsBoolean()) {
+    if (left->IsOddball()) {
       left_kind_ = GENERIC;
     } else {
-      DCHECK(right->IsUndefined(isolate_) || right->IsBoolean());
+      DCHECK(right->IsOddball());
       right_kind_ = GENERIC;
     }
   }
@@ -270,8 +270,8 @@
                                                   Kind kind) const {
   Kind new_kind = GENERIC;
   bool is_truncating = Token::IsTruncatingBinaryOp(op());
-  if (object->IsBoolean() && is_truncating) {
-    // Booleans will be automatically truncated by HChange.
+  if (object->IsOddball() && is_truncating) {
+    // Oddballs will be automatically truncated by HChange.
     new_kind = INT32;
   } else if (object->IsUndefined(isolate_)) {
     // Undefined will be automatically truncated by HChange.
diff --git a/src/ic/ic-state.h b/src/ic/ic-state.h
index 38be57a..1ba37b9 100644
--- a/src/ic/ic-state.h
+++ b/src/ic/ic-state.h
@@ -26,10 +26,8 @@
  public:
   explicit CallICState(ExtraICState extra_ic_state)
       : bit_field_(extra_ic_state) {}
-  CallICState(int argc, ConvertReceiverMode convert_mode,
-              TailCallMode tail_call_mode)
-      : bit_field_(ArgcBits::encode(argc) |
-                   ConvertModeBits::encode(convert_mode) |
+  CallICState(ConvertReceiverMode convert_mode, TailCallMode tail_call_mode)
+      : bit_field_(ConvertModeBits::encode(convert_mode) |
                    TailCallModeBits::encode(tail_call_mode)) {}
 
   ExtraICState GetExtraICState() const { return bit_field_; }
@@ -38,7 +36,6 @@
                                   void (*Generate)(Isolate*,
                                                    const CallICState&));
 
-  int argc() const { return ArgcBits::decode(bit_field_); }
   ConvertReceiverMode convert_mode() const {
     return ConvertModeBits::decode(bit_field_);
   }
@@ -47,8 +44,7 @@
   }
 
  private:
-  typedef BitField<int, 0, Code::kArgumentsBits> ArgcBits;
-  typedef BitField<ConvertReceiverMode, ArgcBits::kNext, 2> ConvertModeBits;
+  typedef BitField<ConvertReceiverMode, 0, 2> ConvertModeBits;
   typedef BitField<TailCallMode, ConvertModeBits::kNext, 1> TailCallModeBits;
 
   int const bit_field_;
diff --git a/src/ic/ic.cc b/src/ic/ic.cc
index 0e751bd..7e0cefd 100644
--- a/src/ic/ic.cc
+++ b/src/ic/ic.cc
@@ -4,6 +4,8 @@
 
 #include "src/ic/ic.h"
 
+#include <iostream>
+
 #include "src/accessors.h"
 #include "src/api-arguments-inl.h"
 #include "src/api.h"
@@ -16,6 +18,7 @@
 #include "src/frames-inl.h"
 #include "src/ic/call-optimization.h"
 #include "src/ic/handler-compiler.h"
+#include "src/ic/handler-configuration-inl.h"
 #include "src/ic/ic-compiler.h"
 #include "src/ic/ic-inl.h"
 #include "src/ic/stub-cache.h"
@@ -98,38 +101,51 @@
 
 void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
                  State new_state) {
-  if (FLAG_trace_ic) {
-    PrintF("[%s%s in ", is_keyed() ? "Keyed" : "", type);
+  if (!FLAG_trace_ic) return;
+  PrintF("[%s%s in ", is_keyed() ? "Keyed" : "", type);
 
-    // TODO(jkummerow): Add support for "apply". The logic is roughly:
-    // marker = [fp_ + kMarkerOffset];
-    // if marker is smi and marker.value == INTERNAL and
-    //     the frame's code == builtin(Builtins::kFunctionApply):
-    // then print "apply from" and advance one frame
+  // TODO(jkummerow): Add support for "apply". The logic is roughly:
+  // marker = [fp_ + kMarkerOffset];
+  // if marker is smi and marker.value == INTERNAL and
+  //     the frame's code == builtin(Builtins::kFunctionApply):
+  // then print "apply from" and advance one frame
 
-    Object* maybe_function =
-        Memory::Object_at(fp_ + JavaScriptFrameConstants::kFunctionOffset);
-    if (maybe_function->IsJSFunction()) {
-      JSFunction* function = JSFunction::cast(maybe_function);
-      JavaScriptFrame::PrintFunctionAndOffset(function, function->code(), pc(),
-                                              stdout, true);
+  Object* maybe_function =
+      Memory::Object_at(fp_ + JavaScriptFrameConstants::kFunctionOffset);
+  if (maybe_function->IsJSFunction()) {
+    JSFunction* function = JSFunction::cast(maybe_function);
+    int code_offset = 0;
+    if (function->IsInterpreted()) {
+      code_offset = InterpretedFrame::GetBytecodeOffset(fp());
+    } else {
+      code_offset =
+          static_cast<int>(pc() - function->code()->instruction_start());
     }
-
-    const char* modifier = "";
-    if (kind() == Code::KEYED_STORE_IC) {
-      KeyedAccessStoreMode mode =
-          casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
-      modifier = GetTransitionMarkModifier(mode);
-    }
-    void* map = nullptr;
-    if (!receiver_map().is_null()) {
-      map = reinterpret_cast<void*>(*receiver_map());
-    }
-    PrintF(" (%c->%c%s) map=%p ", TransitionMarkFromState(old_state),
-           TransitionMarkFromState(new_state), modifier, map);
-    name->ShortPrint(stdout);
-    PrintF("]\n");
+    JavaScriptFrame::PrintFunctionAndOffset(function, function->abstract_code(),
+                                            code_offset, stdout, true);
   }
+
+  const char* modifier = "";
+  if (kind() == Code::KEYED_STORE_IC) {
+    KeyedAccessStoreMode mode =
+        casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
+    modifier = GetTransitionMarkModifier(mode);
+  }
+  Map* map = nullptr;
+  if (!receiver_map().is_null()) {
+    map = *receiver_map();
+  }
+  PrintF(" (%c->%c%s) map=(%p", TransitionMarkFromState(old_state),
+         TransitionMarkFromState(new_state), modifier,
+         reinterpret_cast<void*>(map));
+  if (map != nullptr) {
+    PrintF(" dict=%u own=%u type=", map->is_dictionary_map(),
+           map->NumberOfOwnDescriptors());
+    std::cout << map->instance_type();
+  }
+  PrintF(") ");
+  name->ShortPrint(stdout);
+  PrintF("]\n");
 }
 
 
@@ -171,6 +187,16 @@
   StackFrame* frame = it.frame();
   DCHECK(fp == frame->fp() && pc_address == frame->pc_address());
 #endif
+  // For interpreted functions, some bytecode handlers construct a
+  // frame. We have to skip the constructed frame to find the interpreted
+  // function's frame. Check if the there is an additional frame, and if there
+  // is skip this frame. However, the pc should not be updated. The call to
+  // ICs happen from bytecode handlers.
+  Object* frame_type =
+      Memory::Object_at(fp + TypedFrameConstants::kFrameTypeOffset);
+  if (frame_type == Smi::FromInt(StackFrame::STUB)) {
+    fp = Memory::Address_at(fp + TypedFrameConstants::kCallerFPOffset);
+  }
   fp_ = fp;
   if (FLAG_enable_embedded_constant_pool) {
     constant_pool_address_ = constant_pool;
@@ -224,11 +250,6 @@
   // corresponding to the frame.
   StackFrameIterator it(isolate());
   while (it.frame()->fp() != this->fp()) it.Advance();
-  if (FLAG_ignition && it.frame()->type() == StackFrame::STUB) {
-    // Advance over bytecode handler frame.
-    // TODO(rmcilroy): Remove this once bytecode handlers don't need a frame.
-    it.Advance();
-  }
   JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
   // Find the function on the stack and both the active code for the
   // function and the original code.
@@ -504,19 +525,6 @@
   PatchInlinedSmiCode(isolate, address, DISABLE_INLINED_SMI_CHECK);
 }
 
-
-// static
-Handle<Code> KeyedLoadIC::ChooseMegamorphicStub(Isolate* isolate,
-                                                ExtraICState extra_state) {
-  // TODO(ishell): remove extra_ic_state
-  if (FLAG_compiled_keyed_generic_loads) {
-    return KeyedLoadGenericStub(isolate).GetCode();
-  } else {
-    return isolate->builtins()->KeyedLoadIC_Megamorphic();
-  }
-}
-
-
 static bool MigrateDeprecated(Handle<Object> object) {
   if (!object->IsJSObject()) return false;
   Handle<JSObject> receiver = Handle<JSObject>::cast(object);
@@ -562,11 +570,11 @@
     nexus->ConfigureMonomorphic(name, map, handler);
   } else if (kind() == Code::STORE_IC) {
     StoreICNexus* nexus = casted_nexus<StoreICNexus>();
-    nexus->ConfigureMonomorphic(map, Handle<Code>::cast(handler));
+    nexus->ConfigureMonomorphic(map, handler);
   } else {
     DCHECK(kind() == Code::KEYED_STORE_IC);
     KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
-    nexus->ConfigureMonomorphic(name, map, Handle<Code>::cast(handler));
+    nexus->ConfigureMonomorphic(name, map, handler);
   }
 
   vector_set_ = true;
@@ -691,11 +699,8 @@
   return true;
 }
 
-bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Object> code) {
-  DCHECK(code->IsSmi() || code->IsCode());
-  if (!code->IsSmi() && !Code::cast(*code)->is_handler()) {
-    return false;
-  }
+bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Object> handler) {
+  DCHECK(IsHandler(*handler));
   if (is_keyed() && state() != RECOMPUTE_HANDLER) return false;
   Handle<Map> map = receiver_map();
   MapHandleList maps;
@@ -735,16 +740,16 @@
   number_of_valid_maps++;
   if (number_of_valid_maps > 1 && is_keyed()) return false;
   if (number_of_valid_maps == 1) {
-    ConfigureVectorState(name, receiver_map(), code);
+    ConfigureVectorState(name, receiver_map(), handler);
   } else {
     if (handler_to_overwrite >= 0) {
-      handlers.Set(handler_to_overwrite, code);
+      handlers.Set(handler_to_overwrite, handler);
       if (!map.is_identical_to(maps.at(handler_to_overwrite))) {
         maps.Set(handler_to_overwrite, map);
       }
     } else {
       maps.Add(map);
-      handlers.Add(code);
+      handlers.Add(handler);
     }
 
     ConfigureVectorState(name, &maps, &handlers);
@@ -754,8 +759,7 @@
 }
 
 void IC::UpdateMonomorphicIC(Handle<Object> handler, Handle<Name> name) {
-  DCHECK(handler->IsSmi() ||
-         (handler->IsCode() && Handle<Code>::cast(handler)->is_handler()));
+  DCHECK(IsHandler(*handler));
   ConfigureVectorState(name, receiver_map(), handler);
 }
 
@@ -786,24 +790,28 @@
   return transitioned_map == target_map;
 }
 
-void IC::PatchCache(Handle<Name> name, Handle<Object> code) {
-  DCHECK(code->IsCode() || (code->IsSmi() && (kind() == Code::LOAD_IC ||
-                                              kind() == Code::KEYED_LOAD_IC)));
+void IC::PatchCache(Handle<Name> name, Handle<Object> handler) {
+  DCHECK(IsHandler(*handler));
+  // Currently only LoadIC and KeyedLoadIC support non-code handlers.
+  DCHECK_IMPLIES(!handler->IsCode(), kind() == Code::LOAD_IC ||
+                                         kind() == Code::KEYED_LOAD_IC ||
+                                         kind() == Code::STORE_IC ||
+                                         kind() == Code::KEYED_STORE_IC);
   switch (state()) {
     case UNINITIALIZED:
     case PREMONOMORPHIC:
-      UpdateMonomorphicIC(code, name);
+      UpdateMonomorphicIC(handler, name);
       break;
     case RECOMPUTE_HANDLER:
     case MONOMORPHIC:
       if (kind() == Code::LOAD_GLOBAL_IC) {
-        UpdateMonomorphicIC(code, name);
+        UpdateMonomorphicIC(handler, name);
         break;
       }
     // Fall through.
     case POLYMORPHIC:
       if (!is_keyed() || state() == RECOMPUTE_HANDLER) {
-        if (UpdatePolymorphicIC(name, code)) break;
+        if (UpdatePolymorphicIC(name, handler)) break;
         // For keyed stubs, we can't know whether old handlers were for the
         // same key.
         CopyICToMegamorphicCache(name);
@@ -812,7 +820,7 @@
       ConfigureVectorState(MEGAMORPHIC, name);
     // Fall through.
     case MEGAMORPHIC:
-      UpdateMegamorphicCache(*receiver_map(), *name, *code);
+      UpdateMegamorphicCache(*receiver_map(), *name, *handler);
       // Indicate that we've handled this case.
       DCHECK(UseVector());
       vector_set_ = true;
@@ -825,6 +833,7 @@
 
 Handle<Code> KeyedStoreIC::ChooseMegamorphicStub(Isolate* isolate,
                                                  ExtraICState extra_state) {
+  DCHECK(!FLAG_tf_store_ic_stub);
   LanguageMode mode = StoreICState::GetLanguageMode(extra_state);
   return is_strict(mode)
              ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict()
@@ -833,13 +842,186 @@
 
 Handle<Object> LoadIC::SimpleFieldLoad(FieldIndex index) {
   if (FLAG_tf_load_ic_stub) {
-    return handle(Smi::FromInt(index.GetLoadByFieldOffset()), isolate());
+    TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldDH);
+    return LoadHandler::LoadField(isolate(), index);
   }
   TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldStub);
   LoadFieldStub stub(isolate(), index);
   return stub.GetCode();
 }
 
+namespace {
+
+template <bool fill_array = true>
+int InitPrototypeChecks(Isolate* isolate, Handle<Map> receiver_map,
+                        Handle<JSObject> holder, Handle<Name> name,
+                        Handle<FixedArray> array, int first_index) {
+  DCHECK(holder.is_null() || holder->HasFastProperties());
+
+  // We don't encode the requirement to check access rights because we already
+  // passed the access check for current native context and the access
+  // can't be revoked.
+
+  HandleScope scope(isolate);
+  int checks_count = 0;
+
+  if (receiver_map->IsPrimitiveMap() || receiver_map->IsJSGlobalProxyMap()) {
+    // The validity cell check for primitive and global proxy receivers does
+    // not guarantee that certain native context ever had access to other
+    // native context. However, a handler created for one native context could
+    // be used in other native context through the megamorphic stub cache.
+    // So we record the original native context to which this handler
+    // corresponds.
+    if (fill_array) {
+      Handle<Context> native_context = isolate->native_context();
+      array->set(LoadHandler::kFirstPrototypeIndex + checks_count,
+                 native_context->self_weak_cell());
+    }
+    checks_count++;
+
+  } else if (receiver_map->IsJSGlobalObjectMap()) {
+    if (fill_array) {
+      Handle<JSGlobalObject> global = isolate->global_object();
+      Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+          global, name, PropertyCellType::kInvalidated);
+      DCHECK(cell->value()->IsTheHole(isolate));
+      Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
+      array->set(LoadHandler::kFirstPrototypeIndex + checks_count, *weak_cell);
+    }
+    checks_count++;
+  }
+
+  // Create/count entries for each global or dictionary prototype appeared in
+  // the prototype chain contains from receiver till holder.
+  PrototypeIterator::WhereToEnd end = name->IsPrivate()
+                                          ? PrototypeIterator::END_AT_NON_HIDDEN
+                                          : PrototypeIterator::END_AT_NULL;
+  for (PrototypeIterator iter(receiver_map, end); !iter.IsAtEnd();
+       iter.Advance()) {
+    Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
+    if (holder.is_identical_to(current)) break;
+    Handle<Map> current_map(current->map(), isolate);
+
+    if (current_map->IsJSGlobalObjectMap()) {
+      if (fill_array) {
+        Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(current);
+        Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+            global, name, PropertyCellType::kInvalidated);
+        DCHECK(cell->value()->IsTheHole(isolate));
+        Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
+        array->set(first_index + checks_count, *weak_cell);
+      }
+      checks_count++;
+
+    } else if (current_map->is_dictionary_map()) {
+      DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
+      if (fill_array) {
+        DCHECK_EQ(NameDictionary::kNotFound,
+                  current->property_dictionary()->FindEntry(name));
+        Handle<WeakCell> weak_cell =
+            Map::GetOrCreatePrototypeWeakCell(current, isolate);
+        array->set(first_index + checks_count, *weak_cell);
+      }
+      checks_count++;
+    }
+  }
+  return checks_count;
+}
+
+// Returns 0 if the validity cell check is enough to ensure that the
+// prototype chain from |receiver_map| till |holder| did not change.
+// If the |holder| is an empty handle then the full prototype chain is
+// checked.
+// Returns -1 if the handler has to be compiled or the number of prototype
+// checks otherwise.
+int GetPrototypeCheckCount(Isolate* isolate, Handle<Map> receiver_map,
+                           Handle<JSObject> holder, Handle<Name> name) {
+  return InitPrototypeChecks<false>(isolate, receiver_map, holder, name,
+                                    Handle<FixedArray>(), 0);
+}
+
+}  // namespace
+
+Handle<Object> LoadIC::LoadFromPrototype(Handle<Map> receiver_map,
+                                         Handle<JSObject> holder,
+                                         Handle<Name> name,
+                                         Handle<Object> smi_handler) {
+  int checks_count =
+      GetPrototypeCheckCount(isolate(), receiver_map, holder, name);
+  DCHECK_LE(0, checks_count);
+
+  if (receiver_map->IsPrimitiveMap() || receiver_map->IsJSGlobalProxyMap()) {
+    DCHECK(!receiver_map->is_dictionary_map());
+    DCHECK_LE(1, checks_count);  // For native context.
+    smi_handler =
+        LoadHandler::EnableAccessCheckOnReceiver(isolate(), smi_handler);
+  } else if (receiver_map->is_dictionary_map() &&
+             !receiver_map->IsJSGlobalObjectMap()) {
+    smi_handler =
+        LoadHandler::EnableNegativeLookupOnReceiver(isolate(), smi_handler);
+  }
+
+  Handle<Cell> validity_cell =
+      Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+  DCHECK(!validity_cell.is_null());
+
+  Handle<WeakCell> holder_cell =
+      Map::GetOrCreatePrototypeWeakCell(holder, isolate());
+
+  if (checks_count == 0) {
+    return isolate()->factory()->NewTuple3(holder_cell, smi_handler,
+                                           validity_cell);
+  }
+  Handle<FixedArray> handler_array(isolate()->factory()->NewFixedArray(
+      LoadHandler::kFirstPrototypeIndex + checks_count, TENURED));
+  handler_array->set(LoadHandler::kSmiHandlerIndex, *smi_handler);
+  handler_array->set(LoadHandler::kValidityCellIndex, *validity_cell);
+  handler_array->set(LoadHandler::kHolderCellIndex, *holder_cell);
+  InitPrototypeChecks(isolate(), receiver_map, holder, name, handler_array,
+                      LoadHandler::kFirstPrototypeIndex);
+  return handler_array;
+}
+
+Handle<Object> LoadIC::LoadNonExistent(Handle<Map> receiver_map,
+                                       Handle<Name> name) {
+  Handle<JSObject> holder;  // null handle
+  int checks_count =
+      GetPrototypeCheckCount(isolate(), receiver_map, holder, name);
+  DCHECK_LE(0, checks_count);
+
+  bool do_negative_lookup_on_receiver =
+      receiver_map->is_dictionary_map() && !receiver_map->IsJSGlobalObjectMap();
+  Handle<Object> smi_handler =
+      LoadHandler::LoadNonExistent(isolate(), do_negative_lookup_on_receiver);
+
+  if (receiver_map->IsPrimitiveMap() || receiver_map->IsJSGlobalProxyMap()) {
+    DCHECK(!receiver_map->is_dictionary_map());
+    DCHECK_LE(1, checks_count);  // For native context.
+    smi_handler =
+        LoadHandler::EnableAccessCheckOnReceiver(isolate(), smi_handler);
+  }
+
+  Handle<Object> validity_cell =
+      Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+  if (validity_cell.is_null()) {
+    DCHECK_EQ(0, checks_count);
+    validity_cell = handle(Smi::FromInt(0), isolate());
+  }
+
+  Factory* factory = isolate()->factory();
+  if (checks_count == 0) {
+    return factory->NewTuple3(factory->null_value(), smi_handler,
+                              validity_cell);
+  }
+  Handle<FixedArray> handler_array(factory->NewFixedArray(
+      LoadHandler::kFirstPrototypeIndex + checks_count, TENURED));
+  handler_array->set(LoadHandler::kSmiHandlerIndex, *smi_handler);
+  handler_array->set(LoadHandler::kValidityCellIndex, *validity_cell);
+  handler_array->set(LoadHandler::kHolderCellIndex, *factory->null_value());
+  InitPrototypeChecks(isolate(), receiver_map, holder, name, handler_array,
+                      LoadHandler::kFirstPrototypeIndex);
+  return handler_array;
+}
 
 bool IsCompatibleReceiver(LookupIterator* lookup, Handle<Map> receiver_map) {
   DCHECK(lookup->state() == LookupIterator::ACCESSOR);
@@ -884,6 +1066,7 @@
   if (state() == UNINITIALIZED && kind() != Code::LOAD_GLOBAL_IC) {
     // This is the first time we execute this inline cache. Set the target to
     // the pre monomorphic stub to delay setting the monomorphic state.
+    TRACE_HANDLER_STATS(isolate(), LoadIC_Premonomorphic);
     ConfigureVectorState(PREMONOMORPHIC, Handle<Object>());
     TRACE_IC("LoadIC", lookup->name());
     return;
@@ -894,7 +1077,10 @@
       lookup->state() == LookupIterator::ACCESS_CHECK) {
     code = slow_stub();
   } else if (!lookup->IsFound()) {
-    if (kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC) {
+    if (kind() == Code::LOAD_IC) {
+      TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonexistentDH);
+      code = LoadNonExistent(receiver_map(), lookup->name());
+    } else if (kind() == Code::LOAD_GLOBAL_IC) {
       code = NamedLoadHandlerCompiler::ComputeLoadNonexistent(lookup->name(),
                                                               receiver_map());
       // TODO(jkummerow/verwaest): Introduce a builtin that handles this case.
@@ -964,30 +1150,80 @@
   return nullptr;
 }
 
-void IC::UpdateMegamorphicCache(Map* map, Name* name, Object* code) {
-  if (code->IsSmi()) {
-    // TODO(jkummerow): Support Smis in the code cache.
-    Handle<Map> map_handle(map, isolate());
-    Handle<Name> name_handle(name, isolate());
-    FieldIndex index =
-        FieldIndex::ForLoadByFieldOffset(map, Smi::cast(code)->value());
-    TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldStub);
-    LoadFieldStub stub(isolate(), index);
-    Code* handler = *stub.GetCode();
-    stub_cache()->Set(*name_handle, *map_handle, handler);
-    return;
+void IC::UpdateMegamorphicCache(Map* map, Name* name, Object* handler) {
+  stub_cache()->Set(name, map, handler);
+}
+
+void IC::TraceHandlerCacheHitStats(LookupIterator* lookup) {
+  if (!FLAG_runtime_call_stats) return;
+
+  if (kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC ||
+      kind() == Code::KEYED_LOAD_IC) {
+    switch (lookup->state()) {
+      case LookupIterator::ACCESS_CHECK:
+        TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_AccessCheck);
+        break;
+      case LookupIterator::INTEGER_INDEXED_EXOTIC:
+        TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Exotic);
+        break;
+      case LookupIterator::INTERCEPTOR:
+        TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Interceptor);
+        break;
+      case LookupIterator::JSPROXY:
+        TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_JSProxy);
+        break;
+      case LookupIterator::NOT_FOUND:
+        TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_NonExistent);
+        break;
+      case LookupIterator::ACCESSOR:
+        TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Accessor);
+        break;
+      case LookupIterator::DATA:
+        TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Data);
+        break;
+      case LookupIterator::TRANSITION:
+        TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Transition);
+        break;
+    }
+  } else if (kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC) {
+    switch (lookup->state()) {
+      case LookupIterator::ACCESS_CHECK:
+        TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_AccessCheck);
+        break;
+      case LookupIterator::INTEGER_INDEXED_EXOTIC:
+        TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_Exotic);
+        break;
+      case LookupIterator::INTERCEPTOR:
+        TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_Interceptor);
+        break;
+      case LookupIterator::JSPROXY:
+        TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_JSProxy);
+        break;
+      case LookupIterator::NOT_FOUND:
+        TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_NonExistent);
+        break;
+      case LookupIterator::ACCESSOR:
+        TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_Accessor);
+        break;
+      case LookupIterator::DATA:
+        TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_Data);
+        break;
+      case LookupIterator::TRANSITION:
+        TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_Transition);
+        break;
+    }
+  } else {
+    TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
   }
-  DCHECK(code->IsCode());
-  stub_cache()->Set(name, map, Code::cast(code));
 }
 
 Handle<Object> IC::ComputeHandler(LookupIterator* lookup,
                                   Handle<Object> value) {
   // Try to find a globally shared handler stub.
-  Handle<Object> handler_or_index = GetMapIndependentHandler(lookup);
-  if (!handler_or_index.is_null()) {
-    DCHECK(handler_or_index->IsCode() || handler_or_index->IsSmi());
-    return handler_or_index;
+  Handle<Object> shared_handler = GetMapIndependentHandler(lookup);
+  if (!shared_handler.is_null()) {
+    DCHECK(IC::IsHandler(*shared_handler));
+    return shared_handler;
   }
 
   // Otherwise check the map's handler cache for a map-specific handler, and
@@ -1007,16 +1243,16 @@
     stub_holder_map = receiver_map();
   }
 
-  Handle<Code> code = PropertyHandlerCompiler::Find(
+  Handle<Object> handler = PropertyHandlerCompiler::Find(
       lookup->name(), stub_holder_map, kind(), flag);
   // Use the cached value if it exists, and if it is different from the
   // handler that just missed.
-  if (!code.is_null()) {
-    Handle<Object> handler;
-    if (maybe_handler_.ToHandle(&handler)) {
-      if (!handler.is_identical_to(code)) {
-        TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
-        return code;
+  if (!handler.is_null()) {
+    Handle<Object> current_handler;
+    if (maybe_handler_.ToHandle(&current_handler)) {
+      if (!current_handler.is_identical_to(handler)) {
+        TraceHandlerCacheHitStats(lookup);
+        return handler;
       }
     } else {
       // maybe_handler_ is only populated for MONOMORPHIC and POLYMORPHIC ICs.
@@ -1024,24 +1260,27 @@
       // cache (which just missed) is different from the cached handler.
       if (state() == MEGAMORPHIC && lookup->GetReceiver()->IsHeapObject()) {
         Map* map = Handle<HeapObject>::cast(lookup->GetReceiver())->map();
-        Code* megamorphic_cached_code = stub_cache()->Get(*lookup->name(), map);
-        if (megamorphic_cached_code != *code) {
-          TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
-          return code;
+        Object* megamorphic_cached_handler =
+            stub_cache()->Get(*lookup->name(), map);
+        if (megamorphic_cached_handler != *handler) {
+          TraceHandlerCacheHitStats(lookup);
+          return handler;
         }
       } else {
-        TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
-        return code;
+        TraceHandlerCacheHitStats(lookup);
+        return handler;
       }
     }
   }
 
-  code = CompileHandler(lookup, value, flag);
-  DCHECK(code->is_handler());
-  DCHECK(Code::ExtractCacheHolderFromFlags(code->flags()) == flag);
-  Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
-
-  return code;
+  handler = CompileHandler(lookup, value, flag);
+  DCHECK(IC::IsHandler(*handler));
+  if (handler->IsCode()) {
+    Handle<Code> code = Handle<Code>::cast(handler);
+    DCHECK_EQ(Code::ExtractCacheHolderFromFlags(code->flags()), flag);
+    Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
+  }
+  return handler;
 }
 
 Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
@@ -1111,17 +1350,33 @@
           }
           // Ruled out by IsCompatibleReceiver() above.
           DCHECK(AccessorInfo::IsCompatibleReceiverMap(isolate(), info, map));
-          if (!holder->HasFastProperties()) return slow_stub();
-          if (receiver_is_holder) {
-            TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterStub);
-            int index = lookup->GetAccessorIndex();
-            LoadApiGetterStub stub(isolate(), true, index);
-            return stub.GetCode();
-          }
-          if (info->is_sloppy() && !receiver->IsJSReceiver()) {
+          if (!holder->HasFastProperties() ||
+              (info->is_sloppy() && !receiver->IsJSReceiver())) {
+            DCHECK(!holder->HasFastProperties() || !receiver_is_holder);
             TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
             return slow_stub();
           }
+          if (FLAG_tf_load_ic_stub) {
+            Handle<Object> smi_handler = LoadHandler::LoadApiGetter(
+                isolate(), lookup->GetAccessorIndex());
+            if (receiver_is_holder) {
+              TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterDH);
+              return smi_handler;
+            }
+            if (kind() != Code::LOAD_GLOBAL_IC) {
+              TRACE_HANDLER_STATS(isolate(),
+                                  LoadIC_LoadApiGetterFromPrototypeDH);
+              return LoadFromPrototype(map, holder, lookup->name(),
+                                       smi_handler);
+            }
+          } else {
+            if (receiver_is_holder) {
+              TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterStub);
+              int index = lookup->GetAccessorIndex();
+              LoadApiGetterStub stub(isolate(), true, index);
+              return stub.GetCode();
+            }
+          }
           break;  // Custom-compiled handler.
         }
       }
@@ -1153,18 +1408,36 @@
       // -------------- Fields --------------
       if (lookup->property_details().type() == DATA) {
         FieldIndex field = lookup->GetFieldIndex();
+        Handle<Object> smi_handler = SimpleFieldLoad(field);
         if (receiver_is_holder) {
-          return SimpleFieldLoad(field);
+          return smi_handler;
+        }
+        if (FLAG_tf_load_ic_stub && kind() != Code::LOAD_GLOBAL_IC) {
+          TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldFromPrototypeDH);
+          return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
         }
         break;  // Custom-compiled handler.
       }
 
       // -------------- Constant properties --------------
       DCHECK(lookup->property_details().type() == DATA_CONSTANT);
-      if (receiver_is_holder) {
-        TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantStub);
-        LoadConstantStub stub(isolate(), lookup->GetConstantIndex());
-        return stub.GetCode();
+      if (FLAG_tf_load_ic_stub) {
+        Handle<Object> smi_handler =
+            LoadHandler::LoadConstant(isolate(), lookup->GetConstantIndex());
+        if (receiver_is_holder) {
+          TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantDH);
+          return smi_handler;
+        }
+        if (kind() != Code::LOAD_GLOBAL_IC) {
+          TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantFromPrototypeDH);
+          return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
+        }
+      } else {
+        if (receiver_is_holder) {
+          TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantStub);
+          LoadConstantStub stub(isolate(), lookup->GetConstantIndex());
+          return stub.GetCode();
+        }
       }
       break;  // Custom-compiled handler.
     }
@@ -1182,9 +1455,9 @@
   return Handle<Code>::null();
 }
 
-Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
-                                    Handle<Object> unused,
-                                    CacheHolderFlag cache_holder) {
+Handle<Object> LoadIC::CompileHandler(LookupIterator* lookup,
+                                      Handle<Object> unused,
+                                      CacheHolderFlag cache_holder) {
   Handle<JSObject> holder = lookup->GetHolder<JSObject>();
 #ifdef DEBUG
   // Only used by DCHECKs below.
@@ -1229,6 +1502,10 @@
       DCHECK(IsCompatibleReceiver(lookup, map));
       Handle<Object> accessors = lookup->GetAccessors();
       if (accessors->IsAccessorPair()) {
+        if (lookup->TryLookupCachedProperty()) {
+          DCHECK_EQ(LookupIterator::DATA, lookup->state());
+          return ComputeHandler(lookup);
+        }
         DCHECK(holder->HasFastProperties());
         DCHECK(!GetSharedFunctionInfo()->HasDebugInfo());
         Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
@@ -1421,7 +1698,9 @@
     if ((object->IsJSObject() && key->IsSmi()) ||
         (object->IsString() && key->IsNumber())) {
       UpdateLoadElement(Handle<HeapObject>::cast(object));
-      TRACE_IC("LoadIC", key);
+      if (is_vector_set()) {
+        TRACE_IC("LoadIC", key);
+      }
     }
   }
 
@@ -1580,6 +1859,7 @@
   if (state() == UNINITIALIZED) {
     // This is the first time we execute this inline cache. Set the target to
     // the pre monomorphic stub to delay setting the monomorphic state.
+    TRACE_HANDLER_STATS(isolate(), StoreIC_Premonomorphic);
     ConfigureVectorState(PREMONOMORPHIC, Handle<Object>());
     TRACE_IC("StoreIC", lookup->name());
     return;
@@ -1589,13 +1869,72 @@
   if (!use_ic) {
     TRACE_GENERIC_IC(isolate(), "StoreIC", "LookupForWrite said 'false'");
   }
-  Handle<Code> code =
-      use_ic ? Handle<Code>::cast(ComputeHandler(lookup, value)) : slow_stub();
+  Handle<Object> handler = use_ic ? ComputeHandler(lookup, value)
+                                  : Handle<Object>::cast(slow_stub());
 
-  PatchCache(lookup->name(), code);
+  PatchCache(lookup->name(), handler);
   TRACE_IC("StoreIC", lookup->name());
 }
 
+Handle<Object> StoreIC::StoreTransition(Handle<Map> receiver_map,
+                                        Handle<JSObject> holder,
+                                        Handle<Map> transition,
+                                        Handle<Name> name) {
+  int descriptor = transition->LastAdded();
+  Handle<DescriptorArray> descriptors(transition->instance_descriptors());
+  PropertyDetails details = descriptors->GetDetails(descriptor);
+  Representation representation = details.representation();
+  DCHECK(!representation.IsNone());
+
+  // Declarative handlers don't support access checks.
+  DCHECK(!transition->is_access_check_needed());
+
+  Handle<Object> smi_handler;
+  if (details.type() == DATA_CONSTANT) {
+    smi_handler = StoreHandler::TransitionToConstant(isolate(), descriptor);
+
+  } else {
+    DCHECK_EQ(DATA, details.type());
+    bool extend_storage =
+        Map::cast(transition->GetBackPointer())->unused_property_fields() == 0;
+
+    FieldIndex index = FieldIndex::ForDescriptor(*transition, descriptor);
+    smi_handler = StoreHandler::TransitionToField(
+        isolate(), descriptor, index, representation, extend_storage);
+  }
+  // |holder| is either a receiver if the property is non-existent or
+  // one of the prototypes.
+  DCHECK(!holder.is_null());
+  bool is_nonexistent = holder->map() == transition->GetBackPointer();
+  if (is_nonexistent) holder = Handle<JSObject>::null();
+
+  int checks_count =
+      GetPrototypeCheckCount(isolate(), receiver_map, holder, name);
+  DCHECK_LE(0, checks_count);
+  DCHECK(!receiver_map->IsJSGlobalObjectMap());
+
+  Handle<Object> validity_cell =
+      Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+  if (validity_cell.is_null()) {
+    DCHECK_EQ(0, checks_count);
+    validity_cell = handle(Smi::FromInt(0), isolate());
+  }
+
+  Handle<WeakCell> transition_cell = Map::WeakCellForMap(transition);
+
+  Factory* factory = isolate()->factory();
+  if (checks_count == 0) {
+    return factory->NewTuple3(transition_cell, smi_handler, validity_cell);
+  }
+  Handle<FixedArray> handler_array(factory->NewFixedArray(
+      StoreHandler::kFirstPrototypeIndex + checks_count, TENURED));
+  handler_array->set(StoreHandler::kSmiHandlerIndex, *smi_handler);
+  handler_array->set(StoreHandler::kValidityCellIndex, *validity_cell);
+  handler_array->set(StoreHandler::kTransitionCellIndex, *transition_cell);
+  InitPrototypeChecks(isolate(), receiver_map, holder, name, handler_array,
+                      StoreHandler::kFirstPrototypeIndex);
+  return handler_array;
+}
 
 static Handle<Code> PropertyCellStoreHandler(
     Isolate* isolate, Handle<JSObject> receiver, Handle<JSGlobalObject> holder,
@@ -1632,8 +1971,13 @@
         TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
         return slow_stub();
       }
-
       DCHECK(lookup->IsCacheableTransition());
+      if (FLAG_tf_store_ic_stub) {
+        Handle<Map> transition = lookup->transition_map();
+        TRACE_HANDLER_STATS(isolate(), StoreIC_StoreTransitionDH);
+        return StoreTransition(receiver_map(), holder, transition,
+                               lookup->name());
+      }
       break;  // Custom-compiled handler.
     }
 
@@ -1711,17 +2055,25 @@
 
       // -------------- Fields --------------
       if (lookup->property_details().type() == DATA) {
-        bool use_stub = true;
-        if (lookup->representation().IsHeapObject()) {
-          // Only use a generic stub if no types need to be tracked.
-          Handle<FieldType> field_type = lookup->GetFieldType();
-          use_stub = !field_type->IsClass();
-        }
-        if (use_stub) {
-          TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldStub);
-          StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
-                              lookup->representation());
-          return stub.GetCode();
+        if (FLAG_tf_store_ic_stub) {
+          TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldDH);
+          int descriptor = lookup->GetFieldDescriptorIndex();
+          FieldIndex index = lookup->GetFieldIndex();
+          return StoreHandler::StoreField(isolate(), descriptor, index,
+                                          lookup->representation());
+        } else {
+          bool use_stub = true;
+          if (lookup->representation().IsHeapObject()) {
+            // Only use a generic stub if no types need to be tracked.
+            Handle<FieldType> field_type = lookup->GetFieldType();
+            use_stub = !field_type->IsClass();
+          }
+          if (use_stub) {
+            TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldStub);
+            StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+                                lookup->representation());
+            return stub.GetCode();
+          }
         }
         break;  // Custom-compiled handler.
       }
@@ -1742,9 +2094,9 @@
   return Handle<Code>::null();
 }
 
-Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
-                                     Handle<Object> value,
-                                     CacheHolderFlag cache_holder) {
+Handle<Object> StoreIC::CompileHandler(LookupIterator* lookup,
+                                       Handle<Object> value,
+                                       CacheHolderFlag cache_holder) {
   DCHECK_NE(LookupIterator::JSPROXY, lookup->state());
 
   // This is currently guaranteed by checks in StoreIC::Store.
@@ -1765,6 +2117,7 @@
         cell->set_value(isolate()->heap()->the_hole_value());
         return code;
       }
+      DCHECK(!FLAG_tf_store_ic_stub);
       Handle<Map> transition = lookup->transition_map();
       // Currently not handled by CompileStoreTransition.
       DCHECK(holder->HasFastProperties());
@@ -1836,6 +2189,7 @@
 
       // -------------- Fields --------------
       if (lookup->property_details().type() == DATA) {
+        DCHECK(!FLAG_tf_store_ic_stub);
 #ifdef DEBUG
         bool use_stub = true;
         if (lookup->representation().IsHeapObject()) {
@@ -1981,7 +2335,6 @@
     }
   }
 
-  TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_Polymorphic);
   MapHandleList transitioned_maps(target_receiver_maps.length());
   CodeHandleList handlers(target_receiver_maps.length());
   PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
@@ -2241,7 +2594,6 @@
 
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
-  TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
   DCHECK_EQ(3, args.length());
   // Runtime functions don't follow the IC's calling convention.
@@ -2258,7 +2610,6 @@
 
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
-  TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
   DCHECK_EQ(4, args.length());
   // Runtime functions don't follow the IC's calling convention.
@@ -2279,7 +2630,7 @@
 
   } else if (kind == FeedbackVectorSlotKind::LOAD_GLOBAL_IC) {
     Handle<Name> key(vector->GetName(vector_slot), isolate);
-    DCHECK_NE(*key, *isolate->factory()->empty_string());
+    DCHECK_NE(*key, isolate->heap()->empty_string());
     DCHECK_EQ(*isolate->global_object(), *receiver);
     LoadGlobalICNexus nexus(vector, vector_slot);
     LoadGlobalIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
@@ -2298,7 +2649,6 @@
 
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
-  TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
   // Runtime functions don't follow the IC's calling convention.
@@ -2309,7 +2659,7 @@
   DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
             vector->GetKind(vector_slot));
   Handle<String> name(vector->GetName(vector_slot), isolate);
-  DCHECK_NE(*name, *isolate->factory()->empty_string());
+  DCHECK_NE(*name, isolate->heap()->empty_string());
 
   LoadGlobalICNexus nexus(vector, vector_slot);
   LoadGlobalIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
@@ -2330,7 +2680,7 @@
   DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
             vector->GetKind(vector_slot));
   Handle<String> name(vector->GetName(vector_slot), isolate);
-  DCHECK_NE(*name, *isolate->factory()->empty_string());
+  DCHECK_NE(*name, isolate->heap()->empty_string());
 
   Handle<JSGlobalObject> global = isolate->global_object();
 
@@ -2343,7 +2693,7 @@
         script_contexts, lookup_result.context_index);
     Handle<Object> result =
         FixedArray::get(*script_context, lookup_result.slot_index, isolate);
-    if (*result == *isolate->factory()->the_hole_value()) {
+    if (*result == isolate->heap()->the_hole_value()) {
       THROW_NEW_ERROR_RETURN_FAILURE(
           isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
     }
@@ -2370,7 +2720,6 @@
 
 // Used from ic-<arch>.cc
 RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
-  TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
   DCHECK_EQ(4, args.length());
   // Runtime functions don't follow the IC's calling convention.
@@ -2387,7 +2736,6 @@
 
 
 RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
-  TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
   typedef LoadWithVectorDescriptor Descriptor;
   DCHECK_EQ(Descriptor::kParameterCount, args.length());
@@ -2406,7 +2754,6 @@
 
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
-  TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
   DCHECK_EQ(5, args.length());
   // Runtime functions don't follow the IC's calling convention.
@@ -2434,7 +2781,6 @@
 
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
-  TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
   DCHECK_EQ(5, args.length());
   // Runtime functions don't follow the IC's calling convention.
@@ -2470,7 +2816,6 @@
 
 
 RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
-  TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
   // Runtime functions don't follow the IC's calling convention.
   Handle<Object> object = args.at<Object>(0);
@@ -2609,7 +2954,6 @@
 
 
 RUNTIME_FUNCTION(Runtime_BinaryOpIC_Miss) {
-  TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
   typedef BinaryOpDescriptor Descriptor;
@@ -2622,7 +2966,6 @@
 
 
 RUNTIME_FUNCTION(Runtime_BinaryOpIC_MissWithAllocationSite) {
-  TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
   DCHECK_EQ(3, args.length());
   typedef BinaryOpWithAllocationSiteDescriptor Descriptor;
@@ -2686,7 +3029,6 @@
 
 // Used from CompareICStub::GenerateMiss in code-stubs-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_CompareIC_Miss) {
-  TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
   DCHECK(args.length() == 3);
   CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
@@ -2711,7 +3053,6 @@
 
 
 RUNTIME_FUNCTION(Runtime_ToBooleanIC_Miss) {
-  TimerEventScope<TimerEventIcMiss> timer(isolate);
   DCHECK(args.length() == 1);
   HandleScope scope(isolate);
   Handle<Object> object = args.at<Object>(0);
@@ -2729,7 +3070,7 @@
   CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 5);
   HandleScope scope(isolate);
 
-  if (FLAG_runtime_call_stats) {
+  if (V8_UNLIKELY(FLAG_runtime_stats)) {
     RETURN_RESULT_OR_FAILURE(
         isolate, Runtime::SetObjectProperty(isolate, receiver, name, value,
                                             language_mode));
diff --git a/src/ic/ic.h b/src/ic/ic.h
index bf395f1..9e69cc8 100644
--- a/src/ic/ic.h
+++ b/src/ic/ic.h
@@ -81,6 +81,8 @@
 
   static InlineCacheState StateFromCode(Code* code);
 
+  static inline bool IsHandler(Object* object);
+
  protected:
   Address fp() const { return fp_; }
   Address pc() const { return *pc_address_; }
@@ -138,6 +140,8 @@
   static void OnTypeFeedbackChanged(Isolate* isolate, Code* host);
   static void PostPatching(Address address, Code* target, Code* old_target);
 
+  void TraceHandlerCacheHitStats(LookupIterator* lookup);
+
   // Compute the handler either by compiling or by retrieving a cached version.
   Handle<Object> ComputeHandler(LookupIterator* lookup,
                                 Handle<Object> value = Handle<Code>::null());
@@ -145,11 +149,11 @@
     UNREACHABLE();
     return Handle<Code>::null();
   }
-  virtual Handle<Code> CompileHandler(LookupIterator* lookup,
-                                      Handle<Object> value,
-                                      CacheHolderFlag cache_holder) {
+  virtual Handle<Object> CompileHandler(LookupIterator* lookup,
+                                        Handle<Object> value,
+                                        CacheHolderFlag cache_holder) {
     UNREACHABLE();
-    return Handle<Code>::null();
+    return Handle<Object>::null();
   }
 
   void UpdateMonomorphicIC(Handle<Object> handler, Handle<Name> name);
@@ -303,12 +307,23 @@
 
   Handle<Object> GetMapIndependentHandler(LookupIterator* lookup) override;
 
-  Handle<Code> CompileHandler(LookupIterator* lookup, Handle<Object> unused,
-                              CacheHolderFlag cache_holder) override;
+  Handle<Object> CompileHandler(LookupIterator* lookup, Handle<Object> unused,
+                                CacheHolderFlag cache_holder) override;
 
  private:
+  // Creates a data handler that represents a load of a field by given index.
   Handle<Object> SimpleFieldLoad(FieldIndex index);
 
+  // Creates a data handler that represents a prototype chain check followed
+  // by given Smi-handler that encoded a load from the holder.
+  // Can be used only if GetPrototypeCheckCount() returns non negative value.
+  Handle<Object> LoadFromPrototype(Handle<Map> receiver_map,
+                                   Handle<JSObject> holder, Handle<Name> name,
+                                   Handle<Object> smi_handler);
+
+  // Creates a data handler that represents a load of a non-existent property.
+  Handle<Object> LoadNonExistent(Handle<Map> receiver_map, Handle<Name> name);
+
   friend class IC;
 };
 
@@ -341,10 +356,6 @@
   // Code generator routines.
   static void GenerateMiss(MacroAssembler* masm);
   static void GenerateRuntimeGetProperty(MacroAssembler* masm);
-  static void GenerateMegamorphic(MacroAssembler* masm);
-
-  static Handle<Code> ChooseMegamorphicStub(Isolate* isolate,
-                                            ExtraICState extra_state);
 
   static void Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus);
 
@@ -402,10 +413,14 @@
   void UpdateCaches(LookupIterator* lookup, Handle<Object> value,
                     JSReceiver::StoreFromKeyed store_mode);
   Handle<Object> GetMapIndependentHandler(LookupIterator* lookup) override;
-  Handle<Code> CompileHandler(LookupIterator* lookup, Handle<Object> value,
-                              CacheHolderFlag cache_holder) override;
+  Handle<Object> CompileHandler(LookupIterator* lookup, Handle<Object> value,
+                                CacheHolderFlag cache_holder) override;
 
  private:
+  Handle<Object> StoreTransition(Handle<Map> receiver_map,
+                                 Handle<JSObject> holder,
+                                 Handle<Map> transition, Handle<Name> name);
+
   friend class IC;
 };
 
diff --git a/src/ic/keyed-store-generic.cc b/src/ic/keyed-store-generic.cc
new file mode 100644
index 0000000..30faba8
--- /dev/null
+++ b/src/ic/keyed-store-generic.cc
@@ -0,0 +1,549 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ic/keyed-store-generic.h"
+
+#include "src/compiler/code-assembler.h"
+#include "src/contexts.h"
+#include "src/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+using compiler::Node;
+
+class KeyedStoreGenericAssembler : public CodeStubAssembler {
+ public:
+  void KeyedStoreGeneric(const StoreICParameters* p,
+                         LanguageMode language_mode);
+
+ private:
+  enum UpdateLength {
+    kDontChangeLength,
+    kIncrementLengthByOne,
+    kBumpLengthWithGap
+  };
+
+  void EmitGenericElementStore(Node* receiver, Node* receiver_map,
+                               Node* instance_type, Node* intptr_index,
+                               Node* value, Node* context, Label* slow);
+
+  void EmitGenericPropertyStore(Node* receiver, Node* receiver_map,
+                                const StoreICParameters* p, Label* slow);
+
+  void BranchIfPrototypesHaveNonFastElements(Node* receiver_map,
+                                             Label* non_fast_elements,
+                                             Label* only_fast_elements);
+
+  void TryRewriteElements(Node* receiver, Node* receiver_map, Node* elements,
+                          Node* native_context, ElementsKind from_kind,
+                          ElementsKind to_kind, Label* bailout);
+
+  void StoreElementWithCapacity(Node* receiver, Node* receiver_map,
+                                Node* elements, Node* elements_kind,
+                                Node* intptr_index, Node* value, Node* context,
+                                Label* slow, UpdateLength update_length);
+
+  void MaybeUpdateLengthAndReturn(Node* receiver, Node* index, Node* value,
+                                  UpdateLength update_length);
+
+  void TryChangeToHoleyMapHelper(Node* receiver, Node* receiver_map,
+                                 Node* native_context, ElementsKind packed_kind,
+                                 ElementsKind holey_kind, Label* done,
+                                 Label* map_mismatch, Label* bailout);
+  void TryChangeToHoleyMap(Node* receiver, Node* receiver_map,
+                           Node* current_elements_kind, Node* context,
+                           ElementsKind packed_kind, Label* bailout);
+  void TryChangeToHoleyMapMulti(Node* receiver, Node* receiver_map,
+                                Node* current_elements_kind, Node* context,
+                                ElementsKind packed_kind,
+                                ElementsKind packed_kind_2, Label* bailout);
+
+  // Do not add fields, so that this is safe to reinterpret_cast to CSA.
+};
+
+void KeyedStoreGenericGenerator::Generate(
+    CodeStubAssembler* assembler, const CodeStubAssembler::StoreICParameters* p,
+    LanguageMode language_mode) {
+  STATIC_ASSERT(sizeof(CodeStubAssembler) ==
+                sizeof(KeyedStoreGenericAssembler));
+  auto assm = reinterpret_cast<KeyedStoreGenericAssembler*>(assembler);
+  assm->KeyedStoreGeneric(p, language_mode);
+}
+
+void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
+    Node* receiver_map, Label* non_fast_elements, Label* only_fast_elements) {
+  Variable var_map(this, MachineRepresentation::kTagged);
+  var_map.Bind(receiver_map);
+  Label loop_body(this, &var_map);
+  Goto(&loop_body);
+
+  Bind(&loop_body);
+  {
+    Node* map = var_map.value();
+    Node* prototype = LoadMapPrototype(map);
+    GotoIf(WordEqual(prototype, NullConstant()), only_fast_elements);
+    Node* prototype_map = LoadMap(prototype);
+    var_map.Bind(prototype_map);
+    Node* instance_type = LoadMapInstanceType(prototype_map);
+    STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
+    STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+    GotoIf(Int32LessThanOrEqual(instance_type,
+                                Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
+           non_fast_elements);
+    Node* elements_kind = LoadMapElementsKind(prototype_map);
+    STATIC_ASSERT(FIRST_ELEMENTS_KIND == FIRST_FAST_ELEMENTS_KIND);
+    GotoIf(Int32LessThanOrEqual(elements_kind,
+                                Int32Constant(LAST_FAST_ELEMENTS_KIND)),
+           &loop_body);
+    GotoIf(Word32Equal(elements_kind, Int32Constant(NO_ELEMENTS)), &loop_body);
+    Goto(non_fast_elements);
+  }
+}
+
+void KeyedStoreGenericAssembler::TryRewriteElements(
+    Node* receiver, Node* receiver_map, Node* elements, Node* native_context,
+    ElementsKind from_kind, ElementsKind to_kind, Label* bailout) {
+  DCHECK(IsFastPackedElementsKind(from_kind));
+  ElementsKind holey_from_kind = GetHoleyElementsKind(from_kind);
+  ElementsKind holey_to_kind = GetHoleyElementsKind(to_kind);
+  if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
+    TrapAllocationMemento(receiver, bailout);
+  }
+  Label perform_transition(this), check_holey_map(this);
+  Variable var_target_map(this, MachineType::PointerRepresentation());
+  // Check if the receiver has the default |from_kind| map.
+  {
+    Node* packed_map =
+        LoadContextElement(native_context, Context::ArrayMapIndex(from_kind));
+    GotoIf(WordNotEqual(receiver_map, packed_map), &check_holey_map);
+    var_target_map.Bind(
+        LoadContextElement(native_context, Context::ArrayMapIndex(to_kind)));
+    Goto(&perform_transition);
+  }
+
+  // Check if the receiver has the default |holey_from_kind| map.
+  Bind(&check_holey_map);
+  {
+    Node* holey_map = LoadContextElement(
+        native_context, Context::ArrayMapIndex(holey_from_kind));
+    GotoIf(WordNotEqual(receiver_map, holey_map), bailout);
+    var_target_map.Bind(LoadContextElement(
+        native_context, Context::ArrayMapIndex(holey_to_kind)));
+    Goto(&perform_transition);
+  }
+
+  // Found a supported transition target map, perform the transition!
+  Bind(&perform_transition);
+  {
+    if (IsFastDoubleElementsKind(from_kind) !=
+        IsFastDoubleElementsKind(to_kind)) {
+      Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
+      GrowElementsCapacity(receiver, elements, from_kind, to_kind, capacity,
+                           capacity, INTPTR_PARAMETERS, bailout);
+    }
+    StoreObjectField(receiver, JSObject::kMapOffset, var_target_map.value());
+  }
+}
+
+void KeyedStoreGenericAssembler::TryChangeToHoleyMapHelper(
+    Node* receiver, Node* receiver_map, Node* native_context,
+    ElementsKind packed_kind, ElementsKind holey_kind, Label* done,
+    Label* map_mismatch, Label* bailout) {
+  Node* packed_map =
+      LoadContextElement(native_context, Context::ArrayMapIndex(packed_kind));
+  GotoIf(WordNotEqual(receiver_map, packed_map), map_mismatch);
+  if (AllocationSite::GetMode(packed_kind, holey_kind) ==
+      TRACK_ALLOCATION_SITE) {
+    TrapAllocationMemento(receiver, bailout);
+  }
+  Node* holey_map =
+      LoadContextElement(native_context, Context::ArrayMapIndex(holey_kind));
+  StoreObjectField(receiver, JSObject::kMapOffset, holey_map);
+  Goto(done);
+}
+
+void KeyedStoreGenericAssembler::TryChangeToHoleyMap(
+    Node* receiver, Node* receiver_map, Node* current_elements_kind,
+    Node* context, ElementsKind packed_kind, Label* bailout) {
+  ElementsKind holey_kind = GetHoleyElementsKind(packed_kind);
+  Label already_holey(this);
+
+  GotoIf(Word32Equal(current_elements_kind, Int32Constant(holey_kind)),
+         &already_holey);
+  Node* native_context = LoadNativeContext(context);
+  TryChangeToHoleyMapHelper(receiver, receiver_map, native_context, packed_kind,
+                            holey_kind, &already_holey, bailout, bailout);
+  Bind(&already_holey);
+}
+
+void KeyedStoreGenericAssembler::TryChangeToHoleyMapMulti(
+    Node* receiver, Node* receiver_map, Node* current_elements_kind,
+    Node* context, ElementsKind packed_kind, ElementsKind packed_kind_2,
+    Label* bailout) {
+  ElementsKind holey_kind = GetHoleyElementsKind(packed_kind);
+  ElementsKind holey_kind_2 = GetHoleyElementsKind(packed_kind_2);
+  Label already_holey(this), check_other_kind(this);
+
+  GotoIf(Word32Equal(current_elements_kind, Int32Constant(holey_kind)),
+         &already_holey);
+  GotoIf(Word32Equal(current_elements_kind, Int32Constant(holey_kind_2)),
+         &already_holey);
+
+  Node* native_context = LoadNativeContext(context);
+  TryChangeToHoleyMapHelper(receiver, receiver_map, native_context, packed_kind,
+                            holey_kind, &already_holey, &check_other_kind,
+                            bailout);
+  Bind(&check_other_kind);
+  TryChangeToHoleyMapHelper(receiver, receiver_map, native_context,
+                            packed_kind_2, holey_kind_2, &already_holey,
+                            bailout, bailout);
+  Bind(&already_holey);
+}
+
+void KeyedStoreGenericAssembler::MaybeUpdateLengthAndReturn(
+    Node* receiver, Node* index, Node* value, UpdateLength update_length) {
+  if (update_length != kDontChangeLength) {
+    Node* new_length = SmiTag(IntPtrAdd(index, IntPtrConstant(1)));
+    StoreObjectFieldNoWriteBarrier(receiver, JSArray::kLengthOffset, new_length,
+                                   MachineRepresentation::kTagged);
+  }
+  Return(value);
+}
+
+void KeyedStoreGenericAssembler::StoreElementWithCapacity(
+    Node* receiver, Node* receiver_map, Node* elements, Node* elements_kind,
+    Node* intptr_index, Node* value, Node* context, Label* slow,
+    UpdateLength update_length) {
+  if (update_length != kDontChangeLength) {
+    CSA_ASSERT(this, Word32Equal(LoadMapInstanceType(receiver_map),
+                                 Int32Constant(JS_ARRAY_TYPE)));
+  }
+  STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
+  const int kHeaderSize = FixedArray::kHeaderSize - kHeapObjectTag;
+
+  Label check_double_elements(this), check_cow_elements(this);
+  Node* elements_map = LoadMap(elements);
+  GotoIf(WordNotEqual(elements_map, LoadRoot(Heap::kFixedArrayMapRootIndex)),
+         &check_double_elements);
+
+  // FixedArray backing store -> Smi or object elements.
+  {
+    Node* offset = ElementOffsetFromIndex(intptr_index, FAST_ELEMENTS,
+                                          INTPTR_PARAMETERS, kHeaderSize);
+    // Check if we're about to overwrite the hole. We can safely do that
+    // only if there can be no setters on the prototype chain.
+    // If we know that we're storing beyond the previous array length, we
+    // can skip the hole check (and always assume the hole).
+    {
+      Label hole_check_passed(this);
+      if (update_length == kDontChangeLength) {
+        Node* element = Load(MachineType::AnyTagged(), elements, offset);
+        GotoIf(WordNotEqual(element, TheHoleConstant()), &hole_check_passed);
+      }
+      BranchIfPrototypesHaveNonFastElements(receiver_map, slow,
+                                            &hole_check_passed);
+      Bind(&hole_check_passed);
+    }
+
+    // Check if the value we're storing matches the elements_kind. Smis
+    // can always be stored.
+    {
+      Label non_smi_value(this);
+      GotoUnless(TaggedIsSmi(value), &non_smi_value);
+      // If we're about to introduce holes, ensure holey elements.
+      if (update_length == kBumpLengthWithGap) {
+        TryChangeToHoleyMapMulti(receiver, receiver_map, elements_kind, context,
+                                 FAST_SMI_ELEMENTS, FAST_ELEMENTS, slow);
+      }
+      StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
+                          value);
+      MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
+
+      Bind(&non_smi_value);
+    }
+
+    // Check if we already have object elements; just do the store if so.
+    {
+      Label must_transition(this);
+      STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+      STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+      GotoIf(Int32LessThanOrEqual(elements_kind,
+                                  Int32Constant(FAST_HOLEY_SMI_ELEMENTS)),
+             &must_transition);
+      if (update_length == kBumpLengthWithGap) {
+        TryChangeToHoleyMap(receiver, receiver_map, elements_kind, context,
+                            FAST_ELEMENTS, slow);
+      }
+      Store(MachineRepresentation::kTagged, elements, offset, value);
+      MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
+
+      Bind(&must_transition);
+    }
+
+    // Transition to the required ElementsKind.
+    {
+      Label transition_to_double(this), transition_to_object(this);
+      Node* native_context = LoadNativeContext(context);
+      Branch(WordEqual(LoadMap(value), LoadRoot(Heap::kHeapNumberMapRootIndex)),
+             &transition_to_double, &transition_to_object);
+      Bind(&transition_to_double);
+      {
+        // If we're adding holes at the end, always transition to a holey
+        // elements kind, otherwise try to remain packed.
+        ElementsKind target_kind = update_length == kBumpLengthWithGap
+                                       ? FAST_HOLEY_DOUBLE_ELEMENTS
+                                       : FAST_DOUBLE_ELEMENTS;
+        TryRewriteElements(receiver, receiver_map, elements, native_context,
+                           FAST_SMI_ELEMENTS, target_kind, slow);
+        // Reload migrated elements.
+        Node* double_elements = LoadElements(receiver);
+        Node* double_offset = ElementOffsetFromIndex(
+            intptr_index, FAST_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
+        // Make sure we do not store signalling NaNs into double arrays.
+        Node* double_value = Float64SilenceNaN(LoadHeapNumberValue(value));
+        StoreNoWriteBarrier(MachineRepresentation::kFloat64, double_elements,
+                            double_offset, double_value);
+        MaybeUpdateLengthAndReturn(receiver, intptr_index, value,
+                                   update_length);
+      }
+
+      Bind(&transition_to_object);
+      {
+        // If we're adding holes at the end, always transition to a holey
+        // elements kind, otherwise try to remain packed.
+        ElementsKind target_kind = update_length == kBumpLengthWithGap
+                                       ? FAST_HOLEY_ELEMENTS
+                                       : FAST_ELEMENTS;
+        TryRewriteElements(receiver, receiver_map, elements, native_context,
+                           FAST_SMI_ELEMENTS, target_kind, slow);
+        // The elements backing store didn't change, no reload necessary.
+        CSA_ASSERT(this, WordEqual(elements, LoadElements(receiver)));
+        Store(MachineRepresentation::kTagged, elements, offset, value);
+        MaybeUpdateLengthAndReturn(receiver, intptr_index, value,
+                                   update_length);
+      }
+    }
+  }
+
+  Bind(&check_double_elements);
+  Node* fixed_double_array_map = LoadRoot(Heap::kFixedDoubleArrayMapRootIndex);
+  GotoIf(WordNotEqual(elements_map, fixed_double_array_map),
+         &check_cow_elements);
+  // FixedDoubleArray backing store -> double elements.
+  {
+    Node* offset = ElementOffsetFromIndex(intptr_index, FAST_DOUBLE_ELEMENTS,
+                                          INTPTR_PARAMETERS, kHeaderSize);
+    // Check if we're about to overwrite the hole. We can safely do that
+    // only if there can be no setters on the prototype chain.
+    {
+      Label hole_check_passed(this);
+      // If we know that we're storing beyond the previous array length, we
+      // can skip the hole check (and always assume the hole).
+      if (update_length == kDontChangeLength) {
+        Label found_hole(this);
+        LoadDoubleWithHoleCheck(elements, offset, &found_hole,
+                                MachineType::None());
+        Goto(&hole_check_passed);
+        Bind(&found_hole);
+      }
+      BranchIfPrototypesHaveNonFastElements(receiver_map, slow,
+                                            &hole_check_passed);
+      Bind(&hole_check_passed);
+    }
+
+    // Try to store the value as a double.
+    {
+      Label non_number_value(this);
+      Node* double_value = PrepareValueForWrite(value, Representation::Double(),
+                                                &non_number_value);
+      // Make sure we do not store signalling NaNs into double arrays.
+      double_value = Float64SilenceNaN(double_value);
+      // If we're about to introduce holes, ensure holey elements.
+      if (update_length == kBumpLengthWithGap) {
+        TryChangeToHoleyMap(receiver, receiver_map, elements_kind, context,
+                            FAST_DOUBLE_ELEMENTS, slow);
+      }
+      StoreNoWriteBarrier(MachineRepresentation::kFloat64, elements, offset,
+                          double_value);
+      MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
+
+      Bind(&non_number_value);
+    }
+
+    // Transition to object elements.
+    {
+      Node* native_context = LoadNativeContext(context);
+      ElementsKind target_kind = update_length == kBumpLengthWithGap
+                                     ? FAST_HOLEY_ELEMENTS
+                                     : FAST_ELEMENTS;
+      TryRewriteElements(receiver, receiver_map, elements, native_context,
+                         FAST_DOUBLE_ELEMENTS, target_kind, slow);
+      // Reload migrated elements.
+      Node* fast_elements = LoadElements(receiver);
+      Node* fast_offset = ElementOffsetFromIndex(
+          intptr_index, FAST_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
+      Store(MachineRepresentation::kTagged, fast_elements, fast_offset, value);
+      MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
+    }
+  }
+
+  Bind(&check_cow_elements);
+  {
+    // TODO(jkummerow): Use GrowElementsCapacity instead of bailing out.
+    Goto(slow);
+  }
+}
+
+void KeyedStoreGenericAssembler::EmitGenericElementStore(
+    Node* receiver, Node* receiver_map, Node* instance_type, Node* intptr_index,
+    Node* value, Node* context, Label* slow) {
+  Label if_in_bounds(this), if_increment_length_by_one(this),
+      if_bump_length_with_gap(this), if_grow(this), if_nonfast(this),
+      if_typed_array(this), if_dictionary(this);
+  Node* elements = LoadElements(receiver);
+  Node* elements_kind = LoadMapElementsKind(receiver_map);
+  GotoIf(
+      Int32GreaterThan(elements_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
+      &if_nonfast);
+
+  Label if_array(this);
+  GotoIf(Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE)), &if_array);
+  {
+    Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
+    Branch(UintPtrLessThan(intptr_index, capacity), &if_in_bounds, &if_grow);
+  }
+  Bind(&if_array);
+  {
+    Node* length = SmiUntag(LoadJSArrayLength(receiver));
+    GotoIf(UintPtrLessThan(intptr_index, length), &if_in_bounds);
+    Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
+    GotoIf(UintPtrGreaterThanOrEqual(intptr_index, capacity), &if_grow);
+    Branch(WordEqual(intptr_index, length), &if_increment_length_by_one,
+           &if_bump_length_with_gap);
+  }
+
+  Bind(&if_in_bounds);
+  {
+    StoreElementWithCapacity(receiver, receiver_map, elements, elements_kind,
+                             intptr_index, value, context, slow,
+                             kDontChangeLength);
+  }
+
+  Bind(&if_increment_length_by_one);
+  {
+    StoreElementWithCapacity(receiver, receiver_map, elements, elements_kind,
+                             intptr_index, value, context, slow,
+                             kIncrementLengthByOne);
+  }
+
+  Bind(&if_bump_length_with_gap);
+  {
+    StoreElementWithCapacity(receiver, receiver_map, elements, elements_kind,
+                             intptr_index, value, context, slow,
+                             kBumpLengthWithGap);
+  }
+
+  // Out-of-capacity accesses (index >= capacity) jump here. Additionally,
+  // an ElementsKind transition might be necessary.
+  Bind(&if_grow);
+  {
+    Comment("Grow backing store");
+    // TODO(jkummerow): Support inline backing store growth.
+    Goto(slow);
+  }
+
+  // Any ElementsKind > LAST_FAST_ELEMENTS_KIND jumps here for further dispatch.
+  Bind(&if_nonfast);
+  {
+    STATIC_ASSERT(LAST_ELEMENTS_KIND == LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+    GotoIf(Int32GreaterThanOrEqual(
+               elements_kind,
+               Int32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)),
+           &if_typed_array);
+    GotoIf(Word32Equal(elements_kind, Int32Constant(DICTIONARY_ELEMENTS)),
+           &if_dictionary);
+    Goto(slow);
+  }
+
+  Bind(&if_dictionary);
+  {
+    Comment("Dictionary");
+    // TODO(jkummerow): Support storing to dictionary elements.
+    Goto(slow);
+  }
+
+  Bind(&if_typed_array);
+  {
+    Comment("Typed array");
+    // TODO(jkummerow): Support typed arrays.
+    Goto(slow);
+  }
+}
+
+void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
+    Node* receiver, Node* receiver_map, const StoreICParameters* p,
+    Label* slow) {
+  Comment("stub cache probe");
+  // TODO(jkummerow): Don't rely on the stub cache as much.
+  // - existing properties can be overwritten inline (unless readonly).
+  // - for dictionary mode receivers, we can even add properties inline
+  //   (unless the prototype chain prevents it).
+  Variable var_handler(this, MachineRepresentation::kTagged);
+  Label found_handler(this, &var_handler), stub_cache_miss(this);
+  TryProbeStubCache(isolate()->store_stub_cache(), receiver, p->name,
+                    &found_handler, &var_handler, &stub_cache_miss);
+  Bind(&found_handler);
+  {
+    Comment("KeyedStoreGeneric found handler");
+    HandleStoreICHandlerCase(p, var_handler.value(), slow);
+  }
+  Bind(&stub_cache_miss);
+  {
+    Comment("KeyedStoreGeneric_miss");
+    TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value, p->slot,
+                    p->vector, p->receiver, p->name);
+  }
+}
+
+void KeyedStoreGenericAssembler::KeyedStoreGeneric(const StoreICParameters* p,
+                                                   LanguageMode language_mode) {
+  Variable var_index(this, MachineType::PointerRepresentation());
+  Label if_index(this), if_unique_name(this), slow(this);
+
+  Node* receiver = p->receiver;
+  GotoIf(TaggedIsSmi(receiver), &slow);
+  Node* receiver_map = LoadMap(receiver);
+  Node* instance_type = LoadMapInstanceType(receiver_map);
+  // Receivers requiring non-standard element accesses (interceptors, access
+  // checks, strings and string wrappers, proxies) are handled in the runtime.
+  GotoIf(Int32LessThanOrEqual(instance_type,
+                              Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
+         &slow);
+
+  TryToName(p->name, &if_index, &var_index, &if_unique_name, &slow);
+
+  Bind(&if_index);
+  {
+    Comment("integer index");
+    EmitGenericElementStore(receiver, receiver_map, instance_type,
+                            var_index.value(), p->value, p->context, &slow);
+  }
+
+  Bind(&if_unique_name);
+  {
+    Comment("key is unique name");
+    EmitGenericPropertyStore(receiver, receiver_map, p, &slow);
+  }
+
+  Bind(&slow);
+  {
+    Comment("KeyedStoreGeneric_slow");
+    TailCallRuntime(Runtime::kSetProperty, p->context, p->receiver, p->name,
+                    p->value, SmiConstant(language_mode));
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/ic/keyed-store-generic.h b/src/ic/keyed-store-generic.h
new file mode 100644
index 0000000..daeb61f
--- /dev/null
+++ b/src/ic/keyed-store-generic.h
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SRC_IC_KEYED_STORE_GENERIC_H_
+#define V8_SRC_IC_KEYED_STORE_GENERIC_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class KeyedStoreGenericGenerator {
+ public:
+  static void Generate(CodeStubAssembler* assembler,
+                       const CodeStubAssembler::StoreICParameters* p,
+                       LanguageMode language_mode);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_SRC_IC_KEYED_STORE_GENERIC_H_
diff --git a/src/ic/mips/access-compiler-mips.cc b/src/ic/mips/access-compiler-mips.cc
index 2aa0283..1c97ca3 100644
--- a/src/ic/mips/access-compiler-mips.cc
+++ b/src/ic/mips/access-compiler-mips.cc
@@ -17,24 +17,22 @@
   __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
-
-Register* PropertyAccessCompiler::load_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+    AccessCompilerData* data) {
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, a3, a0, t0};
-  return registers;
-}
 
+  // Load calling convention.
+  // receiver, name, scratch1, scratch2, scratch3.
+  Register load_registers[] = {receiver, name, a3, a0, t0};
 
-Register* PropertyAccessCompiler::store_calling_convention() {
+  // Store calling convention.
   // receiver, name, scratch1, scratch2.
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, a3, t0};
-  return registers;
-}
+  Register store_registers[] = {receiver, name, a3, t0};
 
+  data->Initialize(arraysize(load_registers), load_registers,
+                   arraysize(store_registers), store_registers);
+}
 
 #undef __
 }  // namespace internal
diff --git a/src/ic/mips/handler-compiler-mips.cc b/src/ic/mips/handler-compiler-mips.cc
index df7a0df..b2ddea5 100644
--- a/src/ic/mips/handler-compiler-mips.cc
+++ b/src/ic/mips/handler-compiler-mips.cc
@@ -393,10 +393,30 @@
   }
 }
 
+void PropertyHandlerCompiler::GenerateAccessCheck(
+    Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+    Label* miss, bool compare_native_contexts_only) {
+  Label done;
+  // Load current native context.
+  __ lw(scratch1, NativeContextMemOperand());
+  // Load expected native context.
+  __ LoadWeakValue(scratch2, native_context_cell, miss);
+
+  if (!compare_native_contexts_only) {
+    __ Branch(&done, eq, scratch1, Operand(scratch2));
+
+    // Compare security tokens of current and expected native contexts.
+    __ lw(scratch1, ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+    __ lw(scratch2, ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+  }
+  __ Branch(miss, ne, scratch1, Operand(scratch2));
+
+  __ bind(&done);
+}
 
 Register PropertyHandlerCompiler::CheckPrototypes(
     Register object_reg, Register holder_reg, Register scratch1,
-    Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+    Register scratch2, Handle<Name> name, Label* miss,
     ReturnHolder return_what) {
   Handle<Map> receiver_map = map();
 
@@ -415,17 +435,6 @@
               Operand(Smi::FromInt(Map::kPrototypeChainValid)));
   }
 
-  // The prototype chain of primitives (and their JSValue wrappers) depends
-  // on the native context, which can't be guarded by validity cells.
-  // |object_reg| holds the native context specific prototype in this case;
-  // we need to check its map.
-  if (check == CHECK_ALL_MAPS) {
-    __ lw(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
-    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-    __ GetWeakValue(scratch2, cell);
-    __ Branch(miss, ne, scratch1, Operand(scratch2));
-  }
-
   // Keep track of the current object in register reg.
   Register reg = object_reg;
   int depth = 0;
@@ -435,46 +444,28 @@
     current = isolate()->global_object();
   }
 
-  // Check access rights to the global object.  This has to happen after
-  // the map check so that we know that the object is actually a global
-  // object.
-  // This allows us to install generated handlers for accesses to the
-  // global proxy (as opposed to using slow ICs). See corresponding code
-  // in LookupForRead().
-  if (receiver_map->IsJSGlobalProxyMap()) {
-    __ CheckAccessGlobalProxy(reg, scratch2, miss);
-  }
-
-  Handle<JSObject> prototype = Handle<JSObject>::null();
-  Handle<Map> current_map = receiver_map;
+  Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+                          isolate());
   Handle<Map> holder_map(holder()->map());
   // Traverse the prototype chain and check the maps in the prototype chain for
   // fast and global objects or do negative lookup for normal objects.
   while (!current_map.is_identical_to(holder_map)) {
     ++depth;
 
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    DCHECK(current_map->IsJSGlobalProxyMap() ||
-           !current_map->is_access_check_needed());
-
-    prototype = handle(JSObject::cast(current_map->prototype()));
     if (current_map->IsJSGlobalObjectMap()) {
       GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
                                 name, scratch2, miss);
     } else if (current_map->is_dictionary_map()) {
       DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
-      if (!name->IsUniqueName()) {
-        DCHECK(name->IsString());
-        name = factory()->InternalizeString(Handle<String>::cast(name));
-      }
+      DCHECK(name->IsUniqueName());
       DCHECK(current.is_null() ||
              current->property_dictionary()->FindEntry(name) ==
                  NameDictionary::kNotFound);
 
       if (depth > 1) {
-        // TODO(jkummerow): Cache and re-use weak cell.
-        __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+        Handle<WeakCell> weak_cell =
+            Map::GetOrCreatePrototypeWeakCell(current, isolate());
+        __ LoadWeakValue(reg, weak_cell, miss);
       }
       GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
                                        scratch2);
@@ -482,7 +473,7 @@
 
     reg = holder_reg;  // From now on the object will be in holder_reg.
     // Go to the next object in the prototype chain.
-    current = prototype;
+    current = handle(JSObject::cast(current_map->prototype()));
     current_map = handle(current->map());
   }
 
@@ -493,7 +484,9 @@
 
   bool return_holder = return_what == RETURN_HOLDER;
   if (return_holder && depth != 0) {
-    __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+    Handle<WeakCell> weak_cell =
+        Map::GetOrCreatePrototypeWeakCell(current, isolate());
+    __ LoadWeakValue(reg, weak_cell, miss);
   }
 
   // Return the register containing the holder.
diff --git a/src/ic/mips/ic-mips.cc b/src/ic/mips/ic-mips.cc
index ce9e3d9..561c9d3 100644
--- a/src/ic/mips/ic-mips.cc
+++ b/src/ic/mips/ic-mips.cc
@@ -19,16 +19,6 @@
 
 #define __ ACCESS_MASM(masm)
 
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
-                                            Label* global_object) {
-  // Register usage:
-  //   type: holds the receiver instance type on entry.
-  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
-  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
-}
-
-
 // Helper function used from LoadIC GenerateNormal.
 //
 // elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -129,141 +119,6 @@
                  kDontSaveFPRegs);
 }
 
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
-                                           Register receiver, Register map,
-                                           Register scratch,
-                                           int interceptor_bit, Label* slow) {
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, slow);
-  // Get the map of the receiver.
-  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check bit field.
-  __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
-  __ And(at, scratch,
-         Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
-  __ Branch(slow, ne, at, Operand(zero_reg));
-  // Check that the object is some kind of JS object EXCEPT JS Value type.
-  // In the case that the object is a value-wrapper object,
-  // we enter the runtime system to make sure that indexing into string
-  // objects work as intended.
-  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-  __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
-                                  Register key, Register elements,
-                                  Register scratch1, Register scratch2,
-                                  Register result, Label* slow) {
-  // Register use:
-  //
-  // receiver - holds the receiver on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // key      - holds the smi key on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // result   - holds the result on exit if the load succeeded.
-  //            Allowed to be the the same as 'receiver' or 'key'.
-  //            Unchanged on bailout so 'receiver' and 'key' can be safely
-  //            used by further computation.
-  //
-  // Scratch registers:
-  //
-  // elements - holds the elements of the receiver and its prototypes.
-  //
-  // scratch1 - used to hold elements length, bit fields, base addresses.
-  //
-  // scratch2 - used to hold maps, prototypes, and the loaded value.
-  Label check_prototypes, check_next_prototype;
-  Label done, in_bounds, absent;
-
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ AssertFastElements(elements);
-
-  // Check that the key (index) is within bounds.
-  __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Branch(&in_bounds, lo, key, Operand(scratch1));
-  // Out-of-bounds. Check the prototype chain to see if we can just return
-  // 'undefined'.
-  // Negative keys can't take the fast OOB path.
-  __ Branch(slow, lt, key, Operand(zero_reg));
-  __ bind(&check_prototypes);
-  __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ bind(&check_next_prototype);
-  __ lw(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
-  // scratch2: current prototype
-  __ LoadRoot(at, Heap::kNullValueRootIndex);
-  __ Branch(&absent, eq, scratch2, Operand(at));
-  __ lw(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
-  __ lw(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
-  // elements: elements of current prototype
-  // scratch2: map of current prototype
-  __ lbu(scratch1, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
-  __ Branch(slow, lo, scratch1, Operand(JS_OBJECT_TYPE));
-  __ lbu(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
-  __ And(at, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
-                               (1 << Map::kHasIndexedInterceptor)));
-  __ Branch(slow, ne, at, Operand(zero_reg));
-  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
-  __ Branch(slow, ne, elements, Operand(at));
-  __ Branch(&check_next_prototype);
-
-  __ bind(&absent);
-  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
-  __ Branch(&done);
-
-  __ bind(&in_bounds);
-  // Fast case: Do the load.
-  __ Addu(scratch1, elements,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  // The key is a smi.
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-  __ Lsa(at, scratch1, key, kPointerSizeLog2 - kSmiTagSize);
-  __ lw(scratch2, MemOperand(at));
-
-  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-  // In case the loaded value is the_hole we have to check the prototype chain.
-  __ Branch(&check_prototypes, eq, scratch2, Operand(at));
-  __ Move(result, scratch2);
-  __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
-                                 Register map, Register hash,
-                                 Label* index_string, Label* not_unique) {
-  // The key is not a smi.
-  Label unique;
-  // Is it a name?
-  __ GetObjectType(key, map, hash);
-  __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
-  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
-  __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
-
-  // Is the string an array index, with cached numeric value?
-  __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
-  __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
-  __ Branch(index_string, eq, at, Operand(zero_reg));
-
-  // Is the string internalized? We know it's a string, so a single
-  // bit test is enough.
-  // map: key map
-  __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(kInternalizedTag == 0);
-  __ And(at, hash, Operand(kIsNotInternalizedMask));
-  __ Branch(not_unique, ne, at, Operand(zero_reg));
-
-  __ bind(&unique);
-}
-
 void LoadIC::GenerateNormal(MacroAssembler* masm) {
   Register dictionary = a0;
   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@@ -345,105 +200,6 @@
   __ TailCallRuntime(Runtime::kKeyedGetProperty);
 }
 
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // The return address is in ra.
-  Label slow, check_name, index_smi, index_name, property_array_property;
-  Label probe_dictionary, check_number_dictionary;
-
-  Register key = LoadDescriptor::NameRegister();
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  DCHECK(key.is(a2));
-  DCHECK(receiver.is(a1));
-
-  Isolate* isolate = masm->isolate();
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &check_name);
-  __ bind(&index_smi);
-  // Now the key is known to be a smi. This place is also jumped to from below
-  // where a numeric string is converted to a smi.
-
-  GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
-                                 Map::kHasIndexedInterceptor, &slow);
-
-  // Check the receiver's map to see if it has fast elements.
-  __ CheckFastElements(a0, a3, &check_number_dictionary);
-
-  GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, &slow);
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, t0,
-                      a3);
-  __ Ret();
-
-  __ bind(&check_number_dictionary);
-  __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
-
-  // Check whether the elements is a number dictionary.
-  // a3: elements map
-  // t0: elements
-  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
-  __ Branch(&slow, ne, a3, Operand(at));
-  __ sra(a0, key, kSmiTagSize);
-  __ LoadFromNumberDictionary(&slow, t0, key, v0, a0, a3, t1);
-  __ Ret();
-
-  // Slow case, key and receiver still in a2 and a1.
-  __ bind(&slow);
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, t0,
-                      a3);
-  GenerateRuntimeGetProperty(masm);
-
-  __ bind(&check_name);
-  GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
-
-  GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
-                                 Map::kHasNamedInterceptor, &slow);
-
-
-  // If the receiver is a fast-case object, check the stub cache. Otherwise
-  // probe the dictionary.
-  __ lw(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
-  __ Branch(&probe_dictionary, eq, t0, Operand(at));
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-  Register slot = LoadWithVectorDescriptor::SlotRegister();
-  DCHECK(!AreAliased(vector, slot, t0, t1, t2, t5));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
-  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
-  __ li(slot, Operand(Smi::FromInt(slot_index)));
-
-  masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, t0, t1,
-                                                    t2, t5);
-  // Cache miss.
-  GenerateMiss(masm);
-
-  // Do a quick inline probe of the receiver's dictionary, if it
-  // exists.
-  __ bind(&probe_dictionary);
-  // a3: elements
-  __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
-  GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
-  // Load the property to v0.
-  GenerateDictionaryLoad(masm, &slow, a3, key, v0, t1, t0);
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
-                      t0, a3);
-  __ Ret();
-
-  __ bind(&index_name);
-  __ IndexFromHash(a3, key);
-  // Now jump to the place where smi keys are handled.
-  __ Branch(&index_smi);
-}
-
-
 static void KeyedStoreGenerateMegamorphicHelper(
     MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
     KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
diff --git a/src/ic/mips64/access-compiler-mips64.cc b/src/ic/mips64/access-compiler-mips64.cc
index bf6c73e..16d7a3d 100644
--- a/src/ic/mips64/access-compiler-mips64.cc
+++ b/src/ic/mips64/access-compiler-mips64.cc
@@ -17,24 +17,22 @@
   __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
-
-Register* PropertyAccessCompiler::load_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+    AccessCompilerData* data) {
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, a3, a0, a4};
-  return registers;
-}
 
+  // Load calling convention.
+  // receiver, name, scratch1, scratch2, scratch3.
+  Register load_registers[] = {receiver, name, a3, a0, a4};
 
-Register* PropertyAccessCompiler::store_calling_convention() {
+  // Store calling convention.
   // receiver, name, scratch1, scratch2.
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, a3, a4};
-  return registers;
-}
+  Register store_registers[] = {receiver, name, a3, a4};
 
+  data->Initialize(arraysize(load_registers), load_registers,
+                   arraysize(store_registers), store_registers);
+}
 
 #undef __
 }  // namespace internal
diff --git a/src/ic/mips64/handler-compiler-mips64.cc b/src/ic/mips64/handler-compiler-mips64.cc
index 2190f6d..249f8fe 100644
--- a/src/ic/mips64/handler-compiler-mips64.cc
+++ b/src/ic/mips64/handler-compiler-mips64.cc
@@ -393,10 +393,30 @@
   }
 }
 
+void PropertyHandlerCompiler::GenerateAccessCheck(
+    Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+    Label* miss, bool compare_native_contexts_only) {
+  Label done;
+  // Load current native context.
+  __ ld(scratch1, NativeContextMemOperand());
+  // Load expected native context.
+  __ LoadWeakValue(scratch2, native_context_cell, miss);
+
+  if (!compare_native_contexts_only) {
+    __ Branch(&done, eq, scratch1, Operand(scratch2));
+
+    // Compare security tokens of current and expected native contexts.
+    __ ld(scratch1, ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+    __ ld(scratch2, ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+  }
+  __ Branch(miss, ne, scratch1, Operand(scratch2));
+
+  __ bind(&done);
+}
 
 Register PropertyHandlerCompiler::CheckPrototypes(
     Register object_reg, Register holder_reg, Register scratch1,
-    Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+    Register scratch2, Handle<Name> name, Label* miss,
     ReturnHolder return_what) {
   Handle<Map> receiver_map = map();
 
@@ -415,17 +435,6 @@
               Operand(Smi::FromInt(Map::kPrototypeChainValid)));
   }
 
-  // The prototype chain of primitives (and their JSValue wrappers) depends
-  // on the native context, which can't be guarded by validity cells.
-  // |object_reg| holds the native context specific prototype in this case;
-  // we need to check its map.
-  if (check == CHECK_ALL_MAPS) {
-    __ ld(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
-    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-    __ GetWeakValue(scratch2, cell);
-    __ Branch(miss, ne, scratch1, Operand(scratch2));
-  }
-
   // Keep track of the current object in register reg.
   Register reg = object_reg;
   int depth = 0;
@@ -435,46 +444,28 @@
     current = isolate()->global_object();
   }
 
-  // Check access rights to the global object.  This has to happen after
-  // the map check so that we know that the object is actually a global
-  // object.
-  // This allows us to install generated handlers for accesses to the
-  // global proxy (as opposed to using slow ICs). See corresponding code
-  // in LookupForRead().
-  if (receiver_map->IsJSGlobalProxyMap()) {
-    __ CheckAccessGlobalProxy(reg, scratch2, miss);
-  }
-
-  Handle<JSObject> prototype = Handle<JSObject>::null();
-  Handle<Map> current_map = receiver_map;
+  Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+                          isolate());
   Handle<Map> holder_map(holder()->map());
   // Traverse the prototype chain and check the maps in the prototype chain for
   // fast and global objects or do negative lookup for normal objects.
   while (!current_map.is_identical_to(holder_map)) {
     ++depth;
 
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    DCHECK(current_map->IsJSGlobalProxyMap() ||
-           !current_map->is_access_check_needed());
-
-    prototype = handle(JSObject::cast(current_map->prototype()));
     if (current_map->IsJSGlobalObjectMap()) {
       GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
                                 name, scratch2, miss);
     } else if (current_map->is_dictionary_map()) {
       DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
-      if (!name->IsUniqueName()) {
-        DCHECK(name->IsString());
-        name = factory()->InternalizeString(Handle<String>::cast(name));
-      }
+      DCHECK(name->IsUniqueName());
       DCHECK(current.is_null() ||
              current->property_dictionary()->FindEntry(name) ==
                  NameDictionary::kNotFound);
 
       if (depth > 1) {
-        // TODO(jkummerow): Cache and re-use weak cell.
-        __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+        Handle<WeakCell> weak_cell =
+            Map::GetOrCreatePrototypeWeakCell(current, isolate());
+        __ LoadWeakValue(reg, weak_cell, miss);
       }
       GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
                                        scratch2);
@@ -482,7 +473,7 @@
 
     reg = holder_reg;  // From now on the object will be in holder_reg.
     // Go to the next object in the prototype chain.
-    current = prototype;
+    current = handle(JSObject::cast(current_map->prototype()));
     current_map = handle(current->map());
   }
 
@@ -493,7 +484,9 @@
 
   bool return_holder = return_what == RETURN_HOLDER;
   if (return_holder && depth != 0) {
-    __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+    Handle<WeakCell> weak_cell =
+        Map::GetOrCreatePrototypeWeakCell(current, isolate());
+    __ LoadWeakValue(reg, weak_cell, miss);
   }
 
   // Return the register containing the holder.
diff --git a/src/ic/mips64/ic-mips64.cc b/src/ic/mips64/ic-mips64.cc
index c2f3cb6..57efa35 100644
--- a/src/ic/mips64/ic-mips64.cc
+++ b/src/ic/mips64/ic-mips64.cc
@@ -19,16 +19,6 @@
 
 #define __ ACCESS_MASM(masm)
 
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
-                                            Label* global_object) {
-  // Register usage:
-  //   type: holds the receiver instance type on entry.
-  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
-  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
-}
-
-
 // Helper function used from LoadIC GenerateNormal.
 //
 // elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -128,142 +118,6 @@
                  kDontSaveFPRegs);
 }
 
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
-                                           Register receiver, Register map,
-                                           Register scratch,
-                                           int interceptor_bit, Label* slow) {
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, slow);
-  // Get the map of the receiver.
-  __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check bit field.
-  __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
-  __ And(at, scratch,
-         Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
-  __ Branch(slow, ne, at, Operand(zero_reg));
-  // Check that the object is some kind of JS object EXCEPT JS Value type.
-  // In the case that the object is a value-wrapper object,
-  // we enter the runtime system to make sure that indexing into string
-  // objects work as intended.
-  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-  __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
-                                  Register key, Register elements,
-                                  Register scratch1, Register scratch2,
-                                  Register result, Label* slow) {
-  // Register use:
-  //
-  // receiver - holds the receiver on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // key      - holds the smi key on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // result   - holds the result on exit if the load succeeded.
-  //            Allowed to be the the same as 'receiver' or 'key'.
-  //            Unchanged on bailout so 'receiver' and 'key' can be safely
-  //            used by further computation.
-  //
-  // Scratch registers:
-  //
-  // elements - holds the elements of the receiver and its prototypes.
-  //
-  // scratch1 - used to hold elements length, bit fields, base addresses.
-  //
-  // scratch2 - used to hold maps, prototypes, and the loaded value.
-  Label check_prototypes, check_next_prototype;
-  Label done, in_bounds, absent;
-
-  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ AssertFastElements(elements);
-
-  // Check that the key (index) is within bounds.
-  __ ld(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Branch(&in_bounds, lo, key, Operand(scratch1));
-  // Out-of-bounds. Check the prototype chain to see if we can just return
-  // 'undefined'.
-  // Negative keys can't take the fast OOB path.
-  __ Branch(slow, lt, key, Operand(zero_reg));
-  __ bind(&check_prototypes);
-  __ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ bind(&check_next_prototype);
-  __ ld(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
-  // scratch2: current prototype
-  __ LoadRoot(at, Heap::kNullValueRootIndex);
-  __ Branch(&absent, eq, scratch2, Operand(at));
-  __ ld(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
-  __ ld(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
-  // elements: elements of current prototype
-  // scratch2: map of current prototype
-  __ lbu(scratch1, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
-  __ Branch(slow, lo, scratch1, Operand(JS_OBJECT_TYPE));
-  __ lbu(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
-  __ And(at, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
-                               (1 << Map::kHasIndexedInterceptor)));
-  __ Branch(slow, ne, at, Operand(zero_reg));
-  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
-  __ Branch(slow, ne, elements, Operand(at));
-  __ Branch(&check_next_prototype);
-
-  __ bind(&absent);
-  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
-  __ Branch(&done);
-
-  __ bind(&in_bounds);
-  // Fast case: Do the load.
-  __ Daddu(scratch1, elements,
-           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  // The key is a smi.
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-  __ SmiScale(at, key, kPointerSizeLog2);
-  __ daddu(at, at, scratch1);
-  __ ld(scratch2, MemOperand(at));
-
-  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-  // In case the loaded value is the_hole we have to check the prototype chain.
-  __ Branch(&check_prototypes, eq, scratch2, Operand(at));
-  __ Move(result, scratch2);
-  __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
-                                 Register map, Register hash,
-                                 Label* index_string, Label* not_unique) {
-  // The key is not a smi.
-  Label unique;
-  // Is it a name?
-  __ GetObjectType(key, map, hash);
-  __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
-  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
-  __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
-
-  // Is the string an array index, with cached numeric value?
-  __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
-  __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
-  __ Branch(index_string, eq, at, Operand(zero_reg));
-
-  // Is the string internalized? We know it's a string, so a single
-  // bit test is enough.
-  // map: key map
-  __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(kInternalizedTag == 0);
-  __ And(at, hash, Operand(kIsNotInternalizedMask));
-  __ Branch(not_unique, ne, at, Operand(zero_reg));
-
-  __ bind(&unique);
-}
-
 void LoadIC::GenerateNormal(MacroAssembler* masm) {
   Register dictionary = a0;
   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@@ -344,105 +198,6 @@
   __ TailCallRuntime(Runtime::kKeyedGetProperty);
 }
 
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // The return address is in ra.
-  Label slow, check_name, index_smi, index_name, property_array_property;
-  Label probe_dictionary, check_number_dictionary;
-
-  Register key = LoadDescriptor::NameRegister();
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  DCHECK(key.is(a2));
-  DCHECK(receiver.is(a1));
-
-  Isolate* isolate = masm->isolate();
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &check_name);
-  __ bind(&index_smi);
-  // Now the key is known to be a smi. This place is also jumped to from below
-  // where a numeric string is converted to a smi.
-
-  GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
-                                 Map::kHasIndexedInterceptor, &slow);
-
-  // Check the receiver's map to see if it has fast elements.
-  __ CheckFastElements(a0, a3, &check_number_dictionary);
-
-  GenerateFastArrayLoad(masm, receiver, key, a0, a3, a4, v0, &slow);
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, a4,
-                      a3);
-  __ Ret();
-
-  __ bind(&check_number_dictionary);
-  __ ld(a4, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ ld(a3, FieldMemOperand(a4, JSObject::kMapOffset));
-
-  // Check whether the elements is a number dictionary.
-  // a3: elements map
-  // a4: elements
-  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
-  __ Branch(&slow, ne, a3, Operand(at));
-  __ dsra32(a0, key, 0);
-  __ LoadFromNumberDictionary(&slow, a4, key, v0, a0, a3, a5);
-  __ Ret();
-
-  // Slow case, key and receiver still in a2 and a1.
-  __ bind(&slow);
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, a4,
-                      a3);
-  GenerateRuntimeGetProperty(masm);
-
-  __ bind(&check_name);
-  GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
-
-  GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
-                                 Map::kHasNamedInterceptor, &slow);
-
-
-  // If the receiver is a fast-case object, check the stub cache. Otherwise
-  // probe the dictionary.
-  __ ld(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  __ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
-  __ Branch(&probe_dictionary, eq, a4, Operand(at));
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-  Register slot = LoadWithVectorDescriptor::SlotRegister();
-  DCHECK(!AreAliased(vector, slot, a4, a5, a6, t1));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
-  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
-  __ li(slot, Operand(Smi::FromInt(slot_index)));
-
-  masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, a4, a5,
-                                                    a6, t1);
-  // Cache miss.
-  GenerateMiss(masm);
-
-  // Do a quick inline probe of the receiver's dictionary, if it
-  // exists.
-  __ bind(&probe_dictionary);
-  // a3: elements
-  __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
-  GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
-  // Load the property to v0.
-  GenerateDictionaryLoad(masm, &slow, a3, key, v0, a5, a4);
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
-                      a4, a3);
-  __ Ret();
-
-  __ bind(&index_name);
-  __ IndexFromHash(a3, key);
-  // Now jump to the place where smi keys are handled.
-  __ Branch(&index_smi);
-}
-
-
 static void KeyedStoreGenerateMegamorphicHelper(
     MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
     KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
diff --git a/src/ic/ppc/access-compiler-ppc.cc b/src/ic/ppc/access-compiler-ppc.cc
index 6143b4c..f78ef57 100644
--- a/src/ic/ppc/access-compiler-ppc.cc
+++ b/src/ic/ppc/access-compiler-ppc.cc
@@ -17,24 +17,22 @@
   __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
-
-Register* PropertyAccessCompiler::load_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+    AccessCompilerData* data) {
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, r6, r3, r7};
-  return registers;
-}
 
+  // Load calling convention.
+  // receiver, name, scratch1, scratch2, scratch3.
+  Register load_registers[] = {receiver, name, r6, r3, r7};
 
-Register* PropertyAccessCompiler::store_calling_convention() {
+  // Store calling convention.
   // receiver, name, scratch1, scratch2.
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, r6, r7};
-  return registers;
-}
+  Register store_registers[] = {receiver, name, r6, r7};
 
+  data->Initialize(arraysize(load_registers), load_registers,
+                   arraysize(store_registers), store_registers);
+}
 
 #undef __
 }  // namespace internal
diff --git a/src/ic/ppc/handler-compiler-ppc.cc b/src/ic/ppc/handler-compiler-ppc.cc
index aafdc77..e0caaa6 100644
--- a/src/ic/ppc/handler-compiler-ppc.cc
+++ b/src/ic/ppc/handler-compiler-ppc.cc
@@ -402,10 +402,34 @@
   }
 }
 
+void PropertyHandlerCompiler::GenerateAccessCheck(
+    Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+    Label* miss, bool compare_native_contexts_only) {
+  Label done;
+  // Load current native context.
+  __ LoadP(scratch1, NativeContextMemOperand());
+  // Load expected native context.
+  __ LoadWeakValue(scratch2, native_context_cell, miss);
+  __ cmp(scratch1, scratch2);
+
+  if (!compare_native_contexts_only) {
+    __ beq(&done);
+
+    // Compare security tokens of current and expected native contexts.
+    __ LoadP(scratch1,
+             ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+    __ LoadP(scratch2,
+             ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+    __ cmp(scratch1, scratch2);
+  }
+  __ bne(miss);
+
+  __ bind(&done);
+}
 
 Register PropertyHandlerCompiler::CheckPrototypes(
     Register object_reg, Register holder_reg, Register scratch1,
-    Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+    Register scratch2, Handle<Name> name, Label* miss,
     ReturnHolder return_what) {
   Handle<Map> receiver_map = map();
 
@@ -424,17 +448,6 @@
     __ bne(miss);
   }
 
-  // The prototype chain of primitives (and their JSValue wrappers) depends
-  // on the native context, which can't be guarded by validity cells.
-  // |object_reg| holds the native context specific prototype in this case;
-  // we need to check its map.
-  if (check == CHECK_ALL_MAPS) {
-    __ LoadP(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
-    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-    __ CmpWeakValue(scratch1, cell, scratch2);
-    __ b(ne, miss);
-  }
-
   // Keep track of the current object in register reg.
   Register reg = object_reg;
   int depth = 0;
@@ -443,18 +456,9 @@
   if (receiver_map->IsJSGlobalObjectMap()) {
     current = isolate()->global_object();
   }
-  // Check access rights to the global object.  This has to happen after
-  // the map check so that we know that the object is actually a global
-  // object.
-  // This allows us to install generated handlers for accesses to the
-  // global proxy (as opposed to using slow ICs). See corresponding code
-  // in LookupForRead().
-  if (receiver_map->IsJSGlobalProxyMap()) {
-    __ CheckAccessGlobalProxy(reg, scratch2, miss);
-  }
 
-  Handle<JSObject> prototype = Handle<JSObject>::null();
-  Handle<Map> current_map = receiver_map;
+  Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+                          isolate());
   Handle<Map> holder_map(holder()->map());
   // Traverse the prototype chain and check the maps in the prototype chain for
   // fast and global objects or do negative lookup for normal objects.
@@ -466,23 +470,20 @@
     DCHECK(current_map->IsJSGlobalProxyMap() ||
            !current_map->is_access_check_needed());
 
-    prototype = handle(JSObject::cast(current_map->prototype()));
     if (current_map->IsJSGlobalObjectMap()) {
       GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
                                 name, scratch2, miss);
     } else if (current_map->is_dictionary_map()) {
       DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
-      if (!name->IsUniqueName()) {
-        DCHECK(name->IsString());
-        name = factory()->InternalizeString(Handle<String>::cast(name));
-      }
+      DCHECK(name->IsUniqueName());
       DCHECK(current.is_null() ||
              current->property_dictionary()->FindEntry(name) ==
                  NameDictionary::kNotFound);
 
       if (depth > 1) {
-        // TODO(jkummerow): Cache and re-use weak cell.
-        __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+        Handle<WeakCell> weak_cell =
+            Map::GetOrCreatePrototypeWeakCell(current, isolate());
+        __ LoadWeakValue(reg, weak_cell, miss);
       }
       GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
                                        scratch2);
@@ -490,7 +491,7 @@
 
     reg = holder_reg;  // From now on the object will be in holder_reg.
     // Go to the next object in the prototype chain.
-    current = prototype;
+    current = handle(JSObject::cast(current_map->prototype()));
     current_map = handle(current->map());
   }
 
@@ -501,7 +502,9 @@
 
   bool return_holder = return_what == RETURN_HOLDER;
   if (return_holder && depth != 0) {
-    __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+    Handle<WeakCell> weak_cell =
+        Map::GetOrCreatePrototypeWeakCell(current, isolate());
+    __ LoadWeakValue(reg, weak_cell, miss);
   }
 
   // Return the register containing the holder.
diff --git a/src/ic/ppc/ic-ppc.cc b/src/ic/ppc/ic-ppc.cc
index 6dd7881..359a6a4 100644
--- a/src/ic/ppc/ic-ppc.cc
+++ b/src/ic/ppc/ic-ppc.cc
@@ -19,18 +19,6 @@
 
 #define __ ACCESS_MASM(masm)
 
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
-                                            Label* global_object) {
-  // Register usage:
-  //   type: holds the receiver instance type on entry.
-  __ cmpi(type, Operand(JS_GLOBAL_OBJECT_TYPE));
-  __ beq(global_object);
-  __ cmpi(type, Operand(JS_GLOBAL_PROXY_TYPE));
-  __ beq(global_object);
-}
-
-
 // Helper function used from LoadIC GenerateNormal.
 //
 // elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -131,143 +119,6 @@
                  kDontSaveFPRegs);
 }
 
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
-                                           Register receiver, Register map,
-                                           Register scratch,
-                                           int interceptor_bit, Label* slow) {
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, slow);
-  // Get the map of the receiver.
-  __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check bit field.
-  __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
-  DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000);
-  __ andi(r0, scratch,
-          Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
-  __ bne(slow, cr0);
-  // Check that the object is some kind of JS object EXCEPT JS Value type.
-  // In the case that the object is a value-wrapper object,
-  // we enter the runtime system to make sure that indexing into string
-  // objects work as intended.
-  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-  __ lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  __ cmpi(scratch, Operand(JS_OBJECT_TYPE));
-  __ blt(slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
-                                  Register key, Register elements,
-                                  Register scratch1, Register scratch2,
-                                  Register result, Label* slow) {
-  // Register use:
-  //
-  // receiver - holds the receiver on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // key      - holds the smi key on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // result   - holds the result on exit if the load succeeded.
-  //            Allowed to be the the same as 'receiver' or 'key'.
-  //            Unchanged on bailout so 'receiver' and 'key' can be safely
-  //            used by further computation.
-  //
-  // Scratch registers:
-  //
-  // elements - holds the elements of the receiver and its protoypes.
-  //
-  // scratch1 - used to hold elements length, bit fields, base addresses.
-  //
-  // scratch2 - used to hold maps, prototypes, and the loaded value.
-  Label check_prototypes, check_next_prototype;
-  Label done, in_bounds, absent;
-
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ AssertFastElements(elements);
-
-  // Check that the key (index) is within bounds.
-  __ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ cmpl(key, scratch1);
-  __ blt(&in_bounds);
-  // Out-of-bounds. Check the prototype chain to see if we can just return
-  // 'undefined'.
-  __ cmpi(key, Operand::Zero());
-  __ blt(slow);  // Negative keys can't take the fast OOB path.
-  __ bind(&check_prototypes);
-  __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ bind(&check_next_prototype);
-  __ LoadP(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
-  // scratch2: current prototype
-  __ CompareRoot(scratch2, Heap::kNullValueRootIndex);
-  __ beq(&absent);
-  __ LoadP(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
-  __ LoadP(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
-  // elements: elements of current prototype
-  // scratch2: map of current prototype
-  __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
-  __ blt(slow);
-  __ lbz(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
-  __ andi(r0, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
-                                (1 << Map::kHasIndexedInterceptor)));
-  __ bne(slow, cr0);
-  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
-  __ bne(slow);
-  __ jmp(&check_next_prototype);
-
-  __ bind(&absent);
-  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
-  __ jmp(&done);
-
-  __ bind(&in_bounds);
-  // Fast case: Do the load.
-  __ addi(scratch1, elements,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  // The key is a smi.
-  __ SmiToPtrArrayOffset(scratch2, key);
-  __ LoadPX(scratch2, MemOperand(scratch2, scratch1));
-  __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
-  // In case the loaded value is the_hole we have to check the prototype chain.
-  __ beq(&check_prototypes);
-  __ mr(result, scratch2);
-  __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
-                                 Register map, Register hash,
-                                 Label* index_string, Label* not_unique) {
-  // The key is not a smi.
-  Label unique;
-  // Is it a name?
-  __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
-  __ bgt(not_unique);
-  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
-  __ beq(&unique);
-
-  // Is the string an array index, with cached numeric value?
-  __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset));
-  __ mov(r8, Operand(Name::kContainsCachedArrayIndexMask));
-  __ and_(r0, hash, r8, SetRC);
-  __ beq(index_string, cr0);
-
-  // Is the string internalized? We know it's a string, so a single
-  // bit test is enough.
-  // map: key map
-  __ lbz(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(kInternalizedTag == 0);
-  __ andi(r0, hash, Operand(kIsNotInternalizedMask));
-  __ bne(not_unique, cr0);
-
-  __ bind(&unique);
-}
-
 void LoadIC::GenerateNormal(MacroAssembler* masm) {
   Register dictionary = r3;
   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@@ -349,107 +200,6 @@
   __ TailCallRuntime(Runtime::kKeyedGetProperty);
 }
 
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // The return address is in lr.
-  Label slow, check_name, index_smi, index_name, property_array_property;
-  Label probe_dictionary, check_number_dictionary;
-
-  Register key = LoadDescriptor::NameRegister();
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  DCHECK(key.is(r5));
-  DCHECK(receiver.is(r4));
-
-  Isolate* isolate = masm->isolate();
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &check_name);
-  __ bind(&index_smi);
-  // Now the key is known to be a smi. This place is also jumped to from below
-  // where a numeric string is converted to a smi.
-
-  GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6,
-                                 Map::kHasIndexedInterceptor, &slow);
-
-  // Check the receiver's map to see if it has fast elements.
-  __ CheckFastElements(r3, r6, &check_number_dictionary);
-
-  GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, &slow);
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r7,
-                      r6);
-  __ Ret();
-
-  __ bind(&check_number_dictionary);
-  __ LoadP(r7, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ LoadP(r6, FieldMemOperand(r7, JSObject::kMapOffset));
-
-  // Check whether the elements is a number dictionary.
-  // r6: elements map
-  // r7: elements
-  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
-  __ cmp(r6, ip);
-  __ bne(&slow);
-  __ SmiUntag(r3, key);
-  __ LoadFromNumberDictionary(&slow, r7, key, r3, r3, r6, r8);
-  __ Ret();
-
-  // Slow case, key and receiver still in r3 and r4.
-  __ bind(&slow);
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r7,
-                      r6);
-  GenerateRuntimeGetProperty(masm);
-
-  __ bind(&check_name);
-  GenerateKeyNameCheck(masm, key, r3, r6, &index_name, &slow);
-
-  GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6,
-                                 Map::kHasNamedInterceptor, &slow);
-
-  // If the receiver is a fast-case object, check the stub cache. Otherwise
-  // probe the dictionary.
-  __ LoadP(r6, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
-  __ cmp(r7, ip);
-  __ beq(&probe_dictionary);
-
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-  Register slot = LoadWithVectorDescriptor::SlotRegister();
-  DCHECK(!AreAliased(vector, slot, r7, r8, r9, r10));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
-  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
-  __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
-
-  masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, r7, r8,
-                                                    r9, r10);
-  // Cache miss.
-  GenerateMiss(masm);
-
-  // Do a quick inline probe of the receiver's dictionary, if it
-  // exists.
-  __ bind(&probe_dictionary);
-  // r6: elements
-  __ LoadP(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
-  GenerateGlobalInstanceTypeCheck(masm, r3, &slow);
-  // Load the property to r3.
-  GenerateDictionaryLoad(masm, &slow, r6, key, r3, r8, r7);
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
-                      r7, r6);
-  __ Ret();
-
-  __ bind(&index_name);
-  __ IndexFromHash(r6, key);
-  // Now jump to the place where smi keys are handled.
-  __ b(&index_smi);
-}
-
-
 static void StoreIC_PushArgs(MacroAssembler* masm) {
   __ Push(StoreWithVectorDescriptor::ValueRegister(),
           StoreWithVectorDescriptor::SlotRegister(),
diff --git a/src/ic/s390/access-compiler-s390.cc b/src/ic/s390/access-compiler-s390.cc
index 0a3285d..ed8c089 100644
--- a/src/ic/s390/access-compiler-s390.cc
+++ b/src/ic/s390/access-compiler-s390.cc
@@ -18,20 +18,21 @@
   __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
-Register* PropertyAccessCompiler::load_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+    AccessCompilerData* data) {
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, r5, r2, r6};
-  return registers;
-}
 
-Register* PropertyAccessCompiler::store_calling_convention() {
+  // Load calling convention.
+  // receiver, name, scratch1, scratch2, scratch3.
+  Register load_registers[] = {receiver, name, r5, r2, r6};
+
+  // Store calling convention.
   // receiver, name, scratch1, scratch2.
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, r5, r6};
-  return registers;
+  Register store_registers[] = {receiver, name, r5, r6};
+
+  data->Initialize(arraysize(load_registers), load_registers,
+                   arraysize(store_registers), store_registers);
 }
 
 #undef __
diff --git a/src/ic/s390/handler-compiler-s390.cc b/src/ic/s390/handler-compiler-s390.cc
index 504bace..72658ec 100644
--- a/src/ic/s390/handler-compiler-s390.cc
+++ b/src/ic/s390/handler-compiler-s390.cc
@@ -383,9 +383,34 @@
   }
 }
 
+void PropertyHandlerCompiler::GenerateAccessCheck(
+    Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+    Label* miss, bool compare_native_contexts_only) {
+  Label done;
+  // Load current native context.
+  __ LoadP(scratch1, NativeContextMemOperand());
+  // Load expected native context.
+  __ LoadWeakValue(scratch2, native_context_cell, miss);
+  __ CmpP(scratch1, scratch2);
+
+  if (!compare_native_contexts_only) {
+    __ beq(&done);
+
+    // Compare security tokens of current and expected native contexts.
+    __ LoadP(scratch1,
+             ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+    __ LoadP(scratch2,
+             ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+    __ CmpP(scratch1, scratch2);
+  }
+  __ bne(miss);
+
+  __ bind(&done);
+}
+
 Register PropertyHandlerCompiler::CheckPrototypes(
     Register object_reg, Register holder_reg, Register scratch1,
-    Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+    Register scratch2, Handle<Name> name, Label* miss,
     ReturnHolder return_what) {
   Handle<Map> receiver_map = map();
 
@@ -404,17 +429,6 @@
     __ bne(miss);
   }
 
-  // The prototype chain of primitives (and their JSValue wrappers) depends
-  // on the native context, which can't be guarded by validity cells.
-  // |object_reg| holds the native context specific prototype in this case;
-  // we need to check its map.
-  if (check == CHECK_ALL_MAPS) {
-    __ LoadP(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
-    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-    __ CmpWeakValue(scratch1, cell, scratch2);
-    __ b(ne, miss);
-  }
-
   // Keep track of the current object in register reg.
   Register reg = object_reg;
   int depth = 0;
@@ -423,46 +437,29 @@
   if (receiver_map->IsJSGlobalObjectMap()) {
     current = isolate()->global_object();
   }
-  // Check access rights to the global object.  This has to happen after
-  // the map check so that we know that the object is actually a global
-  // object.
-  // This allows us to install generated handlers for accesses to the
-  // global proxy (as opposed to using slow ICs). See corresponding code
-  // in LookupForRead().
-  if (receiver_map->IsJSGlobalProxyMap()) {
-    __ CheckAccessGlobalProxy(reg, scratch2, miss);
-  }
 
-  Handle<JSObject> prototype = Handle<JSObject>::null();
-  Handle<Map> current_map = receiver_map;
+  Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+                          isolate());
   Handle<Map> holder_map(holder()->map());
   // Traverse the prototype chain and check the maps in the prototype chain for
   // fast and global objects or do negative lookup for normal objects.
   while (!current_map.is_identical_to(holder_map)) {
     ++depth;
 
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    DCHECK(current_map->IsJSGlobalProxyMap() ||
-           !current_map->is_access_check_needed());
-
-    prototype = handle(JSObject::cast(current_map->prototype()));
     if (current_map->IsJSGlobalObjectMap()) {
       GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
                                 name, scratch2, miss);
     } else if (current_map->is_dictionary_map()) {
       DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
-      if (!name->IsUniqueName()) {
-        DCHECK(name->IsString());
-        name = factory()->InternalizeString(Handle<String>::cast(name));
-      }
+      DCHECK(name->IsUniqueName());
       DCHECK(current.is_null() ||
              current->property_dictionary()->FindEntry(name) ==
                  NameDictionary::kNotFound);
 
       if (depth > 1) {
-        // TODO(jkummerow): Cache and re-use weak cell.
-        __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+        Handle<WeakCell> weak_cell =
+            Map::GetOrCreatePrototypeWeakCell(current, isolate());
+        __ LoadWeakValue(reg, weak_cell, miss);
       }
       GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
                                        scratch2);
@@ -470,7 +467,7 @@
 
     reg = holder_reg;  // From now on the object will be in holder_reg.
     // Go to the next object in the prototype chain.
-    current = prototype;
+    current = handle(JSObject::cast(current_map->prototype()));
     current_map = handle(current->map());
   }
 
@@ -481,7 +478,9 @@
 
   bool return_holder = return_what == RETURN_HOLDER;
   if (return_holder && depth != 0) {
-    __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+    Handle<WeakCell> weak_cell =
+        Map::GetOrCreatePrototypeWeakCell(current, isolate());
+    __ LoadWeakValue(reg, weak_cell, miss);
   }
 
   // Return the register containing the holder.
diff --git a/src/ic/s390/ic-s390.cc b/src/ic/s390/ic-s390.cc
index 08eb3e4..bd83af1 100644
--- a/src/ic/s390/ic-s390.cc
+++ b/src/ic/s390/ic-s390.cc
@@ -18,16 +18,6 @@
 
 #define __ ACCESS_MASM(masm)
 
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
-                                            Label* global_object) {
-  // Register usage:
-  //   type: holds the receiver instance type on entry.
-  __ CmpP(type, Operand(JS_GLOBAL_OBJECT_TYPE));
-  __ beq(global_object);
-  __ CmpP(type, Operand(JS_GLOBAL_PROXY_TYPE));
-  __ beq(global_object);
-}
-
 // Helper function used from LoadIC GenerateNormal.
 //
 // elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -127,141 +117,6 @@
                  kDontSaveFPRegs);
 }
 
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
-                                           Register receiver, Register map,
-                                           Register scratch,
-                                           int interceptor_bit, Label* slow) {
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, slow);
-  // Get the map of the receiver.
-  __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check bit field.
-  __ LoadlB(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
-  DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000);
-  __ mov(r0,
-         Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
-  __ AndP(r0, scratch);
-  __ bne(slow /*, cr0*/);
-  // Check that the object is some kind of JS object EXCEPT JS Value type.
-  // In the case that the object is a value-wrapper object,
-  // we enter the runtime system to make sure that indexing into string
-  // objects work as intended.
-  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-  __ LoadlB(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  __ CmpP(scratch, Operand(JS_OBJECT_TYPE));
-  __ blt(slow);
-}
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
-                                  Register key, Register elements,
-                                  Register scratch1, Register scratch2,
-                                  Register result, Label* slow) {
-  // Register use:
-  //
-  // receiver - holds the receiver on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // key      - holds the smi key on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // result   - holds the result on exit if the load succeeded.
-  //            Allowed to be the the same as 'receiver' or 'key'.
-  //            Unchanged on bailout so 'receiver' and 'key' can be safely
-  //            used by further computation.
-  //
-  // Scratch registers:
-  //
-  // elements - holds the elements of the receiver and its protoypes.
-  //
-  // scratch1 - used to hold elements length, bit fields, base addresses.
-  //
-  // scratch2 - used to hold maps, prototypes, and the loaded value.
-  Label check_prototypes, check_next_prototype;
-  Label done, in_bounds, absent;
-
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ AssertFastElements(elements);
-
-  // Check that the key (index) is within bounds.
-  __ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ CmpLogicalP(key, scratch1);
-  __ blt(&in_bounds, Label::kNear);
-  // Out-of-bounds. Check the prototype chain to see if we can just return
-  // 'undefined'.
-  __ CmpP(key, Operand::Zero());
-  __ blt(slow);  // Negative keys can't take the fast OOB path.
-  __ bind(&check_prototypes);
-  __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ bind(&check_next_prototype);
-  __ LoadP(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
-  // scratch2: current prototype
-  __ CompareRoot(scratch2, Heap::kNullValueRootIndex);
-  __ beq(&absent, Label::kNear);
-  __ LoadP(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
-  __ LoadP(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
-  // elements: elements of current prototype
-  // scratch2: map of current prototype
-  __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
-  __ blt(slow);
-  __ LoadlB(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
-  __ AndP(r0, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
-                                (1 << Map::kHasIndexedInterceptor)));
-  __ bne(slow);
-  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
-  __ bne(slow);
-  __ jmp(&check_next_prototype);
-
-  __ bind(&absent);
-  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
-  __ jmp(&done);
-
-  __ bind(&in_bounds);
-  // Fast case: Do the load.
-  __ AddP(scratch1, elements,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  // The key is a smi.
-  __ SmiToPtrArrayOffset(scratch2, key);
-  __ LoadP(scratch2, MemOperand(scratch2, scratch1));
-  __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
-  // In case the loaded value is the_hole we have to check the prototype chain.
-  __ beq(&check_prototypes);
-  __ LoadRR(result, scratch2);
-  __ bind(&done);
-}
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
-                                 Register map, Register hash,
-                                 Label* index_string, Label* not_unique) {
-  // The key is not a smi.
-  Label unique;
-  // Is it a name?
-  __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
-  __ bgt(not_unique);
-  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
-  __ beq(&unique, Label::kNear);
-
-  // Is the string an array index, with cached numeric value?
-  __ LoadlW(hash, FieldMemOperand(key, Name::kHashFieldOffset));
-  __ mov(r7, Operand(Name::kContainsCachedArrayIndexMask));
-  __ AndP(r0, hash, r7);
-  __ beq(index_string);
-
-  // Is the string internalized? We know it's a string, so a single
-  // bit test is enough.
-  // map: key map
-  __ LoadlB(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(kInternalizedTag == 0);
-  __ tmll(hash, Operand(kIsNotInternalizedMask));
-  __ bne(not_unique);
-
-  __ bind(&unique);
-}
-
 void LoadIC::GenerateNormal(MacroAssembler* masm) {
   Register dictionary = r2;
   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@@ -339,103 +194,6 @@
   __ TailCallRuntime(Runtime::kKeyedGetProperty);
 }
 
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // The return address is in lr.
-  Label slow, check_name, index_smi, index_name, property_array_property;
-  Label probe_dictionary, check_number_dictionary;
-
-  Register key = LoadDescriptor::NameRegister();
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  DCHECK(key.is(r4));
-  DCHECK(receiver.is(r3));
-
-  Isolate* isolate = masm->isolate();
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &check_name);
-  __ bind(&index_smi);
-  // Now the key is known to be a smi. This place is also jumped to from below
-  // where a numeric string is converted to a smi.
-
-  GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r5,
-                                 Map::kHasIndexedInterceptor, &slow);
-
-  // Check the receiver's map to see if it has fast elements.
-  __ CheckFastElements(r2, r5, &check_number_dictionary);
-
-  GenerateFastArrayLoad(masm, receiver, key, r2, r5, r6, r2, &slow);
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r6,
-                      r5);
-  __ Ret();
-
-  __ bind(&check_number_dictionary);
-  __ LoadP(r6, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ LoadP(r5, FieldMemOperand(r6, JSObject::kMapOffset));
-
-  // Check whether the elements is a number dictionary.
-  // r5: elements map
-  // r6: elements
-  __ CompareRoot(r5, Heap::kHashTableMapRootIndex);
-  __ bne(&slow, Label::kNear);
-  __ SmiUntag(r2, key);
-  __ LoadFromNumberDictionary(&slow, r6, key, r2, r2, r5, r7);
-  __ Ret();
-
-  // Slow case, key and receiver still in r2 and r3.
-  __ bind(&slow);
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r6,
-                      r5);
-  GenerateRuntimeGetProperty(masm);
-
-  __ bind(&check_name);
-  GenerateKeyNameCheck(masm, key, r2, r5, &index_name, &slow);
-
-  GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r5,
-                                 Map::kHasNamedInterceptor, &slow);
-
-  // If the receiver is a fast-case object, check the stub cache. Otherwise
-  // probe the dictionary.
-  __ LoadP(r5, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  __ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset));
-  __ CompareRoot(r6, Heap::kHashTableMapRootIndex);
-  __ beq(&probe_dictionary);
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-  Register slot = LoadWithVectorDescriptor::SlotRegister();
-  DCHECK(!AreAliased(vector, slot, r6, r7, r8, r9));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
-  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
-  __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
-
-  masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, r6, r7,
-                                                    r8, r9);
-  // Cache miss.
-  GenerateMiss(masm);
-
-  // Do a quick inline probe of the receiver's dictionary, if it
-  // exists.
-  __ bind(&probe_dictionary);
-  // r5: elements
-  __ LoadP(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
-  GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
-  // Load the property to r2.
-  GenerateDictionaryLoad(masm, &slow, r5, key, r2, r7, r6);
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
-                      r6, r5);
-  __ Ret();
-
-  __ bind(&index_name);
-  __ IndexFromHash(r5, key);
-  // Now jump to the place where smi keys are handled.
-  __ b(&index_smi);
-}
-
 static void StoreIC_PushArgs(MacroAssembler* masm) {
   __ Push(StoreWithVectorDescriptor::ValueRegister(),
           StoreWithVectorDescriptor::SlotRegister(),
diff --git a/src/ic/stub-cache.cc b/src/ic/stub-cache.cc
index fe1adaa..84dbf48 100644
--- a/src/ic/stub-cache.cc
+++ b/src/ic/stub-cache.cc
@@ -6,13 +6,18 @@
 
 #include "src/ast/ast.h"
 #include "src/base/bits.h"
+#include "src/ic/ic-inl.h"
 #include "src/type-info.h"
 
 namespace v8 {
 namespace internal {
 
 StubCache::StubCache(Isolate* isolate, Code::Kind ic_kind)
-    : isolate_(isolate), ic_kind_(ic_kind) {}
+    : isolate_(isolate), ic_kind_(ic_kind) {
+  // Ensure the nullptr (aka Smi::kZero) which StubCache::Get() returns
+  // when the entry is not found is not considered as a handler.
+  DCHECK(!IC::IsHandler(nullptr));
+}
 
 void StubCache::Initialize() {
   DCHECK(base::bits::IsPowerOfTwo32(kPrimaryTableSize));
@@ -24,18 +29,23 @@
 namespace {
 
 bool CommonStubCacheChecks(StubCache* stub_cache, Name* name, Map* map,
-                           Code* code) {
-  // Validate that the name does not move on scavenge, and that we
+                           Object* handler) {
+  // Validate that the name and handler do not move on scavenge, and that we
   // can use identity checks instead of structural equality checks.
   DCHECK(!name->GetHeap()->InNewSpace(name));
+  DCHECK(!name->GetHeap()->InNewSpace(handler));
   DCHECK(name->IsUniqueName());
   DCHECK(name->HasHashCode());
-  if (code) {
-    Code::Flags expected_flags = Code::RemoveHolderFromFlags(
-        Code::ComputeHandlerFlags(stub_cache->ic_kind()));
-    Code::Flags flags = Code::RemoveHolderFromFlags(code->flags());
-    DCHECK_EQ(expected_flags, flags);
-    DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(code->flags()));
+  if (handler) {
+    DCHECK(IC::IsHandler(handler));
+    if (handler->IsCode()) {
+      Code* code = Code::cast(handler);
+      Code::Flags expected_flags = Code::RemoveHolderFromFlags(
+          Code::ComputeHandlerFlags(stub_cache->ic_kind()));
+      Code::Flags flags = Code::RemoveHolderFromFlags(code->flags());
+      DCHECK_EQ(expected_flags, flags);
+      DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(code->flags()));
+    }
   }
   return true;
 }
@@ -43,17 +53,17 @@
 }  // namespace
 #endif
 
-Code* StubCache::Set(Name* name, Map* map, Code* code) {
-  DCHECK(CommonStubCacheChecks(this, name, map, code));
+Object* StubCache::Set(Name* name, Map* map, Object* handler) {
+  DCHECK(CommonStubCacheChecks(this, name, map, handler));
 
   // Compute the primary entry.
   int primary_offset = PrimaryOffset(name, map);
   Entry* primary = entry(primary_, primary_offset);
-  Code* old_code = primary->value;
+  Object* old_handler = primary->value;
 
   // If the primary entry has useful data in it, we retire it to the
   // secondary cache before overwriting it.
-  if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) {
+  if (old_handler != isolate_->builtins()->builtin(Builtins::kIllegal)) {
     Map* old_map = primary->map;
     int seed = PrimaryOffset(primary->key, old_map);
     int secondary_offset = SecondaryOffset(primary->key, seed);
@@ -63,13 +73,13 @@
 
   // Update primary cache.
   primary->key = name;
-  primary->value = code;
+  primary->value = handler;
   primary->map = map;
   isolate()->counters()->megamorphic_stub_cache_updates()->Increment();
-  return code;
+  return handler;
 }
 
-Code* StubCache::Get(Name* name, Map* map) {
+Object* StubCache::Get(Name* name, Map* map) {
   DCHECK(CommonStubCacheChecks(this, name, map, nullptr));
   int primary_offset = PrimaryOffset(name, map);
   Entry* primary = entry(primary_, primary_offset);
@@ -81,7 +91,7 @@
   if (secondary->key == name && secondary->map == map) {
     return secondary->value;
   }
-  return NULL;
+  return nullptr;
 }
 
 
diff --git a/src/ic/stub-cache.h b/src/ic/stub-cache.h
index ebcff44..bdd7f4a 100644
--- a/src/ic/stub-cache.h
+++ b/src/ic/stub-cache.h
@@ -35,14 +35,14 @@
  public:
   struct Entry {
     Name* key;
-    Code* value;
+    Object* value;
     Map* map;
   };
 
   void Initialize();
   // Access cache for entry hash(name, map).
-  Code* Set(Name* name, Map* map, Code* code);
-  Code* Get(Name* name, Map* map);
+  Object* Set(Name* name, Map* map, Object* handler);
+  Object* Get(Name* name, Map* map);
   // Clear the lookup table (@ mark compact collection).
   void Clear();
   // Collect all maps that match the name.
diff --git a/src/ic/x64/access-compiler-x64.cc b/src/ic/x64/access-compiler-x64.cc
index 2b29252..9e95b95 100644
--- a/src/ic/x64/access-compiler-x64.cc
+++ b/src/ic/x64/access-compiler-x64.cc
@@ -11,30 +11,27 @@
 
 #define __ ACCESS_MASM(masm)
 
-
 void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
                                               Handle<Code> code) {
   __ jmp(code, RelocInfo::CODE_TARGET);
 }
 
-
-Register* PropertyAccessCompiler::load_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+    AccessCompilerData* data) {
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, rax, rbx, rdi};
-  return registers;
-}
 
+  // Load calling convention.
+  // receiver, name, scratch1, scratch2, scratch3.
+  Register load_registers[] = {receiver, name, rax, rbx, rdi};
 
-Register* PropertyAccessCompiler::store_calling_convention() {
+  // Store calling convention.
   // receiver, name, scratch1, scratch2.
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, rbx, rdi};
-  return registers;
-}
+  Register store_registers[] = {receiver, name, rbx, rdi};
 
+  data->Initialize(arraysize(load_registers), load_registers,
+                   arraysize(store_registers), store_registers);
+}
 
 #undef __
 }  // namespace internal
diff --git a/src/ic/x64/handler-compiler-x64.cc b/src/ic/x64/handler-compiler-x64.cc
index f386fc5..36acccc 100644
--- a/src/ic/x64/handler-compiler-x64.cc
+++ b/src/ic/x64/handler-compiler-x64.cc
@@ -401,10 +401,32 @@
   }
 }
 
+void PropertyHandlerCompiler::GenerateAccessCheck(
+    Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+    Label* miss, bool compare_native_contexts_only) {
+  Label done;
+  // Load current native context.
+  __ movp(scratch1, NativeContextOperand());
+  // Load expected native context.
+  __ LoadWeakValue(scratch2, native_context_cell, miss);
+  __ cmpp(scratch1, scratch2);
+
+  if (!compare_native_contexts_only) {
+    __ j(equal, &done);
+
+    // Compare security tokens of current and expected native contexts.
+    __ movp(scratch1, ContextOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+    __ movp(scratch2, ContextOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+    __ cmpp(scratch1, scratch2);
+  }
+  __ j(not_equal, miss);
+
+  __ bind(&done);
+}
 
 Register PropertyHandlerCompiler::CheckPrototypes(
     Register object_reg, Register holder_reg, Register scratch1,
-    Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+    Register scratch2, Handle<Name> name, Label* miss,
     ReturnHolder return_what) {
   Handle<Map> receiver_map = map();
 
@@ -424,17 +446,6 @@
     __ j(not_equal, miss);
   }
 
-  // The prototype chain of primitives (and their JSValue wrappers) depends
-  // on the native context, which can't be guarded by validity cells.
-  // |object_reg| holds the native context specific prototype in this case;
-  // we need to check its map.
-  if (check == CHECK_ALL_MAPS) {
-    __ movp(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
-    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-    __ CmpWeakValue(scratch1, cell, scratch2);
-    __ j(not_equal, miss);
-  }
-
   // Keep track of the current object in register reg.  On the first
   // iteration, reg is an alias for object_reg, on later iterations,
   // it is an alias for holder_reg.
@@ -446,46 +457,28 @@
     current = isolate()->global_object();
   }
 
-  // Check access rights to the global object.  This has to happen after
-  // the map check so that we know that the object is actually a global
-  // object.
-  // This allows us to install generated handlers for accesses to the
-  // global proxy (as opposed to using slow ICs). See corresponding code
-  // in LookupForRead().
-  if (receiver_map->IsJSGlobalProxyMap()) {
-    __ CheckAccessGlobalProxy(reg, scratch2, miss);
-  }
-
-  Handle<JSObject> prototype = Handle<JSObject>::null();
-  Handle<Map> current_map = receiver_map;
+  Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+                          isolate());
   Handle<Map> holder_map(holder()->map());
   // Traverse the prototype chain and check the maps in the prototype chain for
   // fast and global objects or do negative lookup for normal objects.
   while (!current_map.is_identical_to(holder_map)) {
     ++depth;
 
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    DCHECK(current_map->IsJSGlobalProxyMap() ||
-           !current_map->is_access_check_needed());
-
-    prototype = handle(JSObject::cast(current_map->prototype()));
     if (current_map->IsJSGlobalObjectMap()) {
       GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
                                 name, scratch2, miss);
     } else if (current_map->is_dictionary_map()) {
       DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
-      if (!name->IsUniqueName()) {
-        DCHECK(name->IsString());
-        name = factory()->InternalizeString(Handle<String>::cast(name));
-      }
+      DCHECK(name->IsUniqueName());
       DCHECK(current.is_null() ||
              current->property_dictionary()->FindEntry(name) ==
                  NameDictionary::kNotFound);
 
       if (depth > 1) {
-        // TODO(jkummerow): Cache and re-use weak cell.
-        __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+        Handle<WeakCell> weak_cell =
+            Map::GetOrCreatePrototypeWeakCell(current, isolate());
+        __ LoadWeakValue(reg, weak_cell, miss);
       }
       GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
                                        scratch2);
@@ -493,7 +486,7 @@
 
     reg = holder_reg;  // From now on the object will be in holder_reg.
     // Go to the next object in the prototype chain.
-    current = prototype;
+    current = handle(JSObject::cast(current_map->prototype()));
     current_map = handle(current->map());
   }
 
@@ -504,7 +497,9 @@
 
   bool return_holder = return_what == RETURN_HOLDER;
   if (return_holder && depth != 0) {
-    __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+    Handle<WeakCell> weak_cell =
+        Map::GetOrCreatePrototypeWeakCell(current, isolate());
+    __ LoadWeakValue(reg, weak_cell, miss);
   }
 
   // Return the register containing the holder.
diff --git a/src/ic/x64/ic-x64.cc b/src/ic/x64/ic-x64.cc
index d0445a2..a916e22 100644
--- a/src/ic/x64/ic-x64.cc
+++ b/src/ic/x64/ic-x64.cc
@@ -18,18 +18,6 @@
 
 #define __ ACCESS_MASM(masm)
 
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
-                                            Label* global_object) {
-  // Register usage:
-  //   type: holds the receiver instance type on entry.
-  __ cmpb(type, Immediate(JS_GLOBAL_OBJECT_TYPE));
-  __ j(equal, global_object);
-  __ cmpb(type, Immediate(JS_GLOBAL_PROXY_TYPE));
-  __ j(equal, global_object);
-}
-
-
 // Helper function used to load a property from a dictionary backing storage.
 // This function may return false negatives, so miss_label
 // must always call a backup property load that is complete.
@@ -133,237 +121,6 @@
   __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
 }
 
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
-                                           Register receiver, Register map,
-                                           int interceptor_bit, Label* slow) {
-  // Register use:
-  //   receiver - holds the receiver and is unchanged.
-  // Scratch registers:
-  //   map - used to hold the map of the receiver.
-
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, slow);
-
-  // Check that the object is some kind of JS object EXCEPT JS Value type.
-  // In the case that the object is a value-wrapper object,
-  // we enter the runtime system to make sure that indexing
-  // into string objects work as intended.
-  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-  __ CmpObjectType(receiver, JS_OBJECT_TYPE, map);
-  __ j(below, slow);
-
-  // Check bit field.
-  __ testb(
-      FieldOperand(map, Map::kBitFieldOffset),
-      Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
-  __ j(not_zero, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
-                                  Register key, Register elements,
-                                  Register scratch, Register result,
-                                  Label* slow) {
-  // Register use:
-  //
-  // receiver - holds the receiver on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // key      - holds the smi key on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // result   - holds the result on exit if the load succeeded.
-  //            Allowed to be the the same as 'receiver' or 'key'.
-  //            Unchanged on bailout so 'receiver' and 'key' can be safely
-  //            used by further computation.
-  //
-  // Scratch registers:
-  //
-  // elements - holds the elements of the receiver and its prototypes.
-  //
-  // scratch  - used to hold maps, prototypes, and the loaded value.
-  Label check_prototypes, check_next_prototype;
-  Label done, in_bounds, absent;
-
-  __ movp(elements, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ AssertFastElements(elements);
-  // Check that the key (index) is within bounds.
-  __ SmiCompare(key, FieldOperand(elements, FixedArray::kLengthOffset));
-  // Unsigned comparison rejects negative indices.
-  __ j(below, &in_bounds);
-
-  // Out-of-bounds. Check the prototype chain to see if we can just return
-  // 'undefined'.
-  __ SmiCompare(key, Smi::FromInt(0));
-  __ j(less, slow);  // Negative keys can't take the fast OOB path.
-  __ bind(&check_prototypes);
-  __ movp(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ bind(&check_next_prototype);
-  __ movp(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
-  // scratch: current prototype
-  __ CompareRoot(scratch, Heap::kNullValueRootIndex);
-  __ j(equal, &absent);
-  __ movp(elements, FieldOperand(scratch, JSObject::kElementsOffset));
-  __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
-  // elements: elements of current prototype
-  // scratch: map of current prototype
-  __ CmpInstanceType(scratch, JS_OBJECT_TYPE);
-  __ j(below, slow);
-  __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
-           Immediate((1 << Map::kIsAccessCheckNeeded) |
-                     (1 << Map::kHasIndexedInterceptor)));
-  __ j(not_zero, slow);
-  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
-  __ j(not_equal, slow);
-  __ jmp(&check_next_prototype);
-
-  __ bind(&absent);
-  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
-  __ jmp(&done);
-
-  __ bind(&in_bounds);
-  // Fast case: Do the load.
-  SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
-  __ movp(scratch, FieldOperand(elements, index.reg, index.scale,
-                                FixedArray::kHeaderSize));
-  __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
-  // In case the loaded value is the_hole we have to check the prototype chain.
-  __ j(equal, &check_prototypes);
-  __ Move(result, scratch);
-  __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if the key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
-                                 Register map, Register hash,
-                                 Label* index_string, Label* not_unique) {
-  // Register use:
-  //   key - holds the key and is unchanged. Assumed to be non-smi.
-  // Scratch registers:
-  //   map - used to hold the map of the key.
-  //   hash - used to hold the hash of the key.
-  Label unique;
-  __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
-  __ j(above, not_unique);
-  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
-  __ j(equal, &unique);
-
-  // Is the string an array index, with cached numeric value?
-  __ movl(hash, FieldOperand(key, Name::kHashFieldOffset));
-  __ testl(hash, Immediate(Name::kContainsCachedArrayIndexMask));
-  __ j(zero, index_string);  // The value in hash is used at jump target.
-
-  // Is the string internalized? We already know it's a string so a single
-  // bit test is enough.
-  STATIC_ASSERT(kNotInternalizedTag != 0);
-  __ testb(FieldOperand(map, Map::kInstanceTypeOffset),
-           Immediate(kIsNotInternalizedMask));
-  __ j(not_zero, not_unique);
-
-  __ bind(&unique);
-}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // The return address is on the stack.
-  Label slow, check_name, index_smi, index_name, property_array_property;
-  Label probe_dictionary, check_number_dictionary;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register key = LoadDescriptor::NameRegister();
-  DCHECK(receiver.is(rdx));
-  DCHECK(key.is(rcx));
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &check_name);
-  __ bind(&index_smi);
-  // Now the key is known to be a smi. This place is also jumped to from below
-  // where a numeric string is converted to a smi.
-
-  GenerateKeyedLoadReceiverCheck(masm, receiver, rax,
-                                 Map::kHasIndexedInterceptor, &slow);
-
-  // Check the receiver's map to see if it has fast elements.
-  __ CheckFastElements(rax, &check_number_dictionary);
-
-  GenerateFastArrayLoad(masm, receiver, key, rax, rbx, rax, &slow);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_keyed_load_generic_smi(), 1);
-  __ ret(0);
-
-  __ bind(&check_number_dictionary);
-  __ SmiToInteger32(rbx, key);
-  __ movp(rax, FieldOperand(receiver, JSObject::kElementsOffset));
-
-  // Check whether the elements is a number dictionary.
-  // rbx: key as untagged int32
-  // rax: elements
-  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
-                 Heap::kHashTableMapRootIndex);
-  __ j(not_equal, &slow);
-  __ LoadFromNumberDictionary(&slow, rax, key, rbx, r9, rdi, rax);
-  __ ret(0);
-
-  __ bind(&slow);
-  // Slow case: Jump to runtime.
-  __ IncrementCounter(counters->ic_keyed_load_generic_slow(), 1);
-  KeyedLoadIC::GenerateRuntimeGetProperty(masm);
-
-  __ bind(&check_name);
-  GenerateKeyNameCheck(masm, key, rax, rbx, &index_name, &slow);
-
-  GenerateKeyedLoadReceiverCheck(masm, receiver, rax, Map::kHasNamedInterceptor,
-                                 &slow);
-
-  // If the receiver is a fast-case object, check the stub cache. Otherwise
-  // probe the dictionary.
-  __ movp(rbx, FieldOperand(receiver, JSObject::kPropertiesOffset));
-  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
-                 Heap::kHashTableMapRootIndex);
-  __ j(equal, &probe_dictionary);
-
-  Register megamorphic_scratch = rdi;
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-  Register slot = LoadDescriptor::SlotRegister();
-  DCHECK(!AreAliased(megamorphic_scratch, vector, slot));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
-  __ Move(vector, dummy_vector);
-  __ Move(slot, Smi::FromInt(slot_index));
-
-  masm->isolate()->load_stub_cache()->GenerateProbe(
-      masm, receiver, key, megamorphic_scratch, no_reg);
-  // Cache miss.
-  GenerateMiss(masm);
-
-  // Do a quick inline probe of the receiver's dictionary, if it
-  // exists.
-  __ bind(&probe_dictionary);
-  // rbx: elements
-
-  __ movp(rax, FieldOperand(receiver, JSObject::kMapOffset));
-  __ movb(rax, FieldOperand(rax, Map::kInstanceTypeOffset));
-  GenerateGlobalInstanceTypeCheck(masm, rax, &slow);
-
-  GenerateDictionaryLoad(masm, &slow, rbx, key, rax, rdi, rax);
-  __ IncrementCounter(counters->ic_keyed_load_generic_symbol(), 1);
-  __ ret(0);
-
-  __ bind(&index_name);
-  __ IndexFromHash(rbx, key);
-  __ jmp(&index_smi);
-}
-
-
 static void KeyedStoreGenerateMegamorphicHelper(
     MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
     KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
diff --git a/src/ic/x87/access-compiler-x87.cc b/src/ic/x87/access-compiler-x87.cc
index e528de6..d186755 100644
--- a/src/ic/x87/access-compiler-x87.cc
+++ b/src/ic/x87/access-compiler-x87.cc
@@ -16,22 +16,21 @@
   __ jmp(code, RelocInfo::CODE_TARGET);
 }
 
-
-Register* PropertyAccessCompiler::load_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+    AccessCompilerData* data) {
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, ebx, eax, edi};
-  return registers;
-}
 
+  // Load calling convention.
+  // receiver, name, scratch1, scratch2, scratch3.
+  Register load_registers[] = {receiver, name, ebx, eax, edi};
 
-Register* PropertyAccessCompiler::store_calling_convention() {
+  // Store calling convention.
   // receiver, name, scratch1, scratch2.
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, ebx, edi};
-  return registers;
+  Register store_registers[] = {receiver, name, ebx, edi};
+
+  data->Initialize(arraysize(load_registers), load_registers,
+                   arraysize(store_registers), store_registers);
 }
 
 #undef __
diff --git a/src/ic/x87/handler-compiler-x87.cc b/src/ic/x87/handler-compiler-x87.cc
index 5eca3dc..a5c32d3 100644
--- a/src/ic/x87/handler-compiler-x87.cc
+++ b/src/ic/x87/handler-compiler-x87.cc
@@ -411,10 +411,32 @@
   }
 }
 
+void PropertyHandlerCompiler::GenerateAccessCheck(
+    Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+    Label* miss, bool compare_native_contexts_only) {
+  Label done;
+  // Load current native context.
+  __ mov(scratch1, NativeContextOperand());
+  // Load expected native context.
+  __ LoadWeakValue(scratch2, native_context_cell, miss);
+  __ cmp(scratch1, scratch2);
+
+  if (!compare_native_contexts_only) {
+    __ j(equal, &done);
+
+    // Compare security tokens of current and expected native contexts.
+    __ mov(scratch1, ContextOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+    __ mov(scratch2, ContextOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+    __ cmp(scratch1, scratch2);
+  }
+  __ j(not_equal, miss);
+
+  __ bind(&done);
+}
 
 Register PropertyHandlerCompiler::CheckPrototypes(
     Register object_reg, Register holder_reg, Register scratch1,
-    Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+    Register scratch2, Handle<Name> name, Label* miss,
     ReturnHolder return_what) {
   Handle<Map> receiver_map = map();
 
@@ -433,17 +455,6 @@
     __ j(not_equal, miss);
   }
 
-  // The prototype chain of primitives (and their JSValue wrappers) depends
-  // on the native context, which can't be guarded by validity cells.
-  // |object_reg| holds the native context specific prototype in this case;
-  // we need to check its map.
-  if (check == CHECK_ALL_MAPS) {
-    __ mov(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
-    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-    __ CmpWeakValue(scratch1, cell, scratch2);
-    __ j(not_equal, miss);
-  }
-
   // Keep track of the current object in register reg.
   Register reg = object_reg;
   int depth = 0;
@@ -453,46 +464,28 @@
     current = isolate()->global_object();
   }
 
-  // Check access rights to the global object.  This has to happen after
-  // the map check so that we know that the object is actually a global
-  // object.
-  // This allows us to install generated handlers for accesses to the
-  // global proxy (as opposed to using slow ICs). See corresponding code
-  // in LookupForRead().
-  if (receiver_map->IsJSGlobalProxyMap()) {
-    __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
-  }
-
-  Handle<JSObject> prototype = Handle<JSObject>::null();
-  Handle<Map> current_map = receiver_map;
+  Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+                          isolate());
   Handle<Map> holder_map(holder()->map());
   // Traverse the prototype chain and check the maps in the prototype chain for
   // fast and global objects or do negative lookup for normal objects.
   while (!current_map.is_identical_to(holder_map)) {
     ++depth;
 
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    DCHECK(current_map->IsJSGlobalProxyMap() ||
-           !current_map->is_access_check_needed());
-
-    prototype = handle(JSObject::cast(current_map->prototype()));
     if (current_map->IsJSGlobalObjectMap()) {
       GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
                                 name, scratch2, miss);
     } else if (current_map->is_dictionary_map()) {
       DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
-      if (!name->IsUniqueName()) {
-        DCHECK(name->IsString());
-        name = factory()->InternalizeString(Handle<String>::cast(name));
-      }
+      DCHECK(name->IsUniqueName());
       DCHECK(current.is_null() ||
              current->property_dictionary()->FindEntry(name) ==
                  NameDictionary::kNotFound);
 
       if (depth > 1) {
-        // TODO(jkummerow): Cache and re-use weak cell.
-        __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+        Handle<WeakCell> weak_cell =
+            Map::GetOrCreatePrototypeWeakCell(current, isolate());
+        __ LoadWeakValue(reg, weak_cell, miss);
       }
       GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
                                        scratch2);
@@ -500,7 +493,7 @@
 
     reg = holder_reg;  // From now on the object will be in holder_reg.
     // Go to the next object in the prototype chain.
-    current = prototype;
+    current = handle(JSObject::cast(current_map->prototype()));
     current_map = handle(current->map());
   }
 
@@ -511,7 +504,9 @@
 
   bool return_holder = return_what == RETURN_HOLDER;
   if (return_holder && depth != 0) {
-    __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+    Handle<WeakCell> weak_cell =
+        Map::GetOrCreatePrototypeWeakCell(current, isolate());
+    __ LoadWeakValue(reg, weak_cell, miss);
   }
 
   // Return the register containing the holder.
diff --git a/src/ic/x87/ic-x87.cc b/src/ic/x87/ic-x87.cc
index baf435e..f96e509 100644
--- a/src/ic/x87/ic-x87.cc
+++ b/src/ic/x87/ic-x87.cc
@@ -18,18 +18,6 @@
 
 #define __ ACCESS_MASM(masm)
 
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
-                                            Label* global_object) {
-  // Register usage:
-  //   type: holds the receiver instance type on entry.
-  __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
-  __ j(equal, global_object);
-  __ cmp(type, JS_GLOBAL_PROXY_TYPE);
-  __ j(equal, global_object);
-}
-
-
 // Helper function used to load a property from a dictionary backing
 // storage. This function may fail to load a property even though it is
 // in the dictionary, so code at miss_label must always call a backup
@@ -132,238 +120,6 @@
   __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
 }
 
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
-                                           Register receiver, Register map,
-                                           int interceptor_bit, Label* slow) {
-  // Register use:
-  //   receiver - holds the receiver and is unchanged.
-  // Scratch registers:
-  //   map - used to hold the map of the receiver.
-
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, slow);
-
-  // Get the map of the receiver.
-  __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
-
-  // Check bit field.
-  __ test_b(
-      FieldOperand(map, Map::kBitFieldOffset),
-      Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
-  __ j(not_zero, slow);
-  // Check that the object is some kind of JS object EXCEPT JS Value type. In
-  // the case that the object is a value-wrapper object, we enter the runtime
-  // system to make sure that indexing into string objects works as intended.
-  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-
-  __ CmpInstanceType(map, JS_OBJECT_TYPE);
-  __ j(below, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
-                                  Register key, Register scratch,
-                                  Register scratch2, Register result,
-                                  Label* slow) {
-  // Register use:
-  //   receiver - holds the receiver and is unchanged.
-  //   key - holds the key and is unchanged (must be a smi).
-  // Scratch registers:
-  //   scratch - used to hold elements of the receiver and the loaded value.
-  //   scratch2 - holds maps and prototypes during prototype chain check.
-  //   result - holds the result on exit if the load succeeds and
-  //            we fall through.
-  Label check_prototypes, check_next_prototype;
-  Label done, in_bounds, absent;
-
-  __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ AssertFastElements(scratch);
-
-  // Check that the key (index) is within bounds.
-  __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
-  __ j(below, &in_bounds);
-  // Out-of-bounds. Check the prototype chain to see if we can just return
-  // 'undefined'.
-  __ cmp(key, 0);
-  __ j(less, slow);  // Negative keys can't take the fast OOB path.
-  __ bind(&check_prototypes);
-  __ mov(scratch2, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ bind(&check_next_prototype);
-  __ mov(scratch2, FieldOperand(scratch2, Map::kPrototypeOffset));
-  // scratch2: current prototype
-  __ cmp(scratch2, masm->isolate()->factory()->null_value());
-  __ j(equal, &absent);
-  __ mov(scratch, FieldOperand(scratch2, JSObject::kElementsOffset));
-  __ mov(scratch2, FieldOperand(scratch2, HeapObject::kMapOffset));
-  // scratch: elements of current prototype
-  // scratch2: map of current prototype
-  __ CmpInstanceType(scratch2, JS_OBJECT_TYPE);
-  __ j(below, slow);
-  __ test_b(FieldOperand(scratch2, Map::kBitFieldOffset),
-            Immediate((1 << Map::kIsAccessCheckNeeded) |
-                      (1 << Map::kHasIndexedInterceptor)));
-  __ j(not_zero, slow);
-  __ cmp(scratch, masm->isolate()->factory()->empty_fixed_array());
-  __ j(not_equal, slow);
-  __ jmp(&check_next_prototype);
-
-  __ bind(&absent);
-  __ mov(result, masm->isolate()->factory()->undefined_value());
-  __ jmp(&done);
-
-  __ bind(&in_bounds);
-  // Fast case: Do the load.
-  STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
-  __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
-  __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
-  // In case the loaded value is the_hole we have to check the prototype chain.
-  __ j(equal, &check_prototypes);
-  __ Move(result, scratch);
-  __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if the key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
-                                 Register map, Register hash,
-                                 Label* index_string, Label* not_unique) {
-  // Register use:
-  //   key - holds the key and is unchanged. Assumed to be non-smi.
-  // Scratch registers:
-  //   map - used to hold the map of the key.
-  //   hash - used to hold the hash of the key.
-  Label unique;
-  __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
-  __ j(above, not_unique);
-  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
-  __ j(equal, &unique);
-
-  // Is the string an array index, with cached numeric value?
-  __ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
-  __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
-  __ j(zero, index_string);
-
-  // Is the string internalized? We already know it's a string so a single
-  // bit test is enough.
-  STATIC_ASSERT(kNotInternalizedTag != 0);
-  __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
-            Immediate(kIsNotInternalizedMask));
-  __ j(not_zero, not_unique);
-
-  __ bind(&unique);
-}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // The return address is on the stack.
-  Label slow, check_name, index_smi, index_name, property_array_property;
-  Label probe_dictionary, check_number_dictionary;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register key = LoadDescriptor::NameRegister();
-  DCHECK(receiver.is(edx));
-  DCHECK(key.is(ecx));
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &check_name);
-  __ bind(&index_smi);
-  // Now the key is known to be a smi. This place is also jumped to from
-  // where a numeric string is converted to a smi.
-
-  GenerateKeyedLoadReceiverCheck(masm, receiver, eax,
-                                 Map::kHasIndexedInterceptor, &slow);
-
-  // Check the receiver's map to see if it has fast elements.
-  __ CheckFastElements(eax, &check_number_dictionary);
-
-  GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow);
-  Isolate* isolate = masm->isolate();
-  Counters* counters = isolate->counters();
-  __ IncrementCounter(counters->ic_keyed_load_generic_smi(), 1);
-  __ ret(0);
-
-  __ bind(&check_number_dictionary);
-  __ mov(ebx, key);
-  __ SmiUntag(ebx);
-  __ mov(eax, FieldOperand(receiver, JSObject::kElementsOffset));
-
-  // Check whether the elements is a number dictionary.
-  // ebx: untagged index
-  // eax: elements
-  __ CheckMap(eax, isolate->factory()->hash_table_map(), &slow,
-              DONT_DO_SMI_CHECK);
-  Label slow_pop_receiver;
-  // Push receiver on the stack to free up a register for the dictionary
-  // probing.
-  __ push(receiver);
-  __ LoadFromNumberDictionary(&slow_pop_receiver, eax, key, ebx, edx, edi, eax);
-  // Pop receiver before returning.
-  __ pop(receiver);
-  __ ret(0);
-
-  __ bind(&slow_pop_receiver);
-  // Pop the receiver from the stack and jump to runtime.
-  __ pop(receiver);
-
-  __ bind(&slow);
-  // Slow case: jump to runtime.
-  __ IncrementCounter(counters->ic_keyed_load_generic_slow(), 1);
-  GenerateRuntimeGetProperty(masm);
-
-  __ bind(&check_name);
-  GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
-
-  GenerateKeyedLoadReceiverCheck(masm, receiver, eax, Map::kHasNamedInterceptor,
-                                 &slow);
-
-  // If the receiver is a fast-case object, check the stub cache. Otherwise
-  // probe the dictionary.
-  __ mov(ebx, FieldOperand(receiver, JSObject::kPropertiesOffset));
-  __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
-         Immediate(isolate->factory()->hash_table_map()));
-  __ j(equal, &probe_dictionary);
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(isolate);
-  int slot = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
-  __ push(Immediate(Smi::FromInt(slot)));
-  __ push(Immediate(dummy_vector));
-
-  masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, ebx,
-                                                    edi);
-
-  __ pop(LoadWithVectorDescriptor::VectorRegister());
-  __ pop(LoadDescriptor::SlotRegister());
-
-  // Cache miss.
-  GenerateMiss(masm);
-
-  // Do a quick inline probe of the receiver's dictionary, if it
-  // exists.
-  __ bind(&probe_dictionary);
-
-  __ mov(eax, FieldOperand(receiver, JSObject::kMapOffset));
-  __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
-  GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
-
-  GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax);
-  __ IncrementCounter(counters->ic_keyed_load_generic_symbol(), 1);
-  __ ret(0);
-
-  __ bind(&index_name);
-  __ IndexFromHash(ebx, key);
-  // Now jump to the place where smi keys are handled.
-  __ jmp(&index_smi);
-}
-
-
 static void KeyedStoreGenerateMegamorphicHelper(
     MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
     KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
diff --git a/src/icu_util.cc b/src/icu_util.cc
index bf59fd0..4b511d9 100644
--- a/src/icu_util.cc
+++ b/src/icu_util.cc
@@ -52,9 +52,9 @@
   }
   char* icu_data_file_default;
 #if defined(V8_TARGET_LITTLE_ENDIAN)
-  RelativePath(&icu_data_file_default, exec_path, "icudtl.dat");
+  base::RelativePath(&icu_data_file_default, exec_path, "icudtl.dat");
 #elif defined(V8_TARGET_BIG_ENDIAN)
-  RelativePath(&icu_data_file_default, exec_path, "icudtb.dat");
+  base::RelativePath(&icu_data_file_default, exec_path, "icudtb.dat");
 #else
 #error Unknown byte ordering
 #endif
diff --git a/src/inspector/BUILD.gn b/src/inspector/BUILD.gn
index 15c090f..6ebb91c 100644
--- a/src/inspector/BUILD.gn
+++ b/src/inspector/BUILD.gn
@@ -4,7 +4,7 @@
 
 import("../../gni/v8.gni")
 
-_inspector_protocol = "//third_party/WebKit/Source/platform/inspector_protocol"
+_inspector_protocol = v8_path_prefix + "/third_party/inspector_protocol"
 import("$_inspector_protocol/inspector_protocol.gni")
 
 _protocol_generated = [
@@ -51,6 +51,7 @@
     ":protocol_compatibility",
   ]
 
+  inspector_protocol_dir = _inspector_protocol
   out_dir = target_gen_dir
   config_file = "inspector_protocol_config.json"
   inputs = [
diff --git a/src/inspector/DEPS b/src/inspector/DEPS
index 4486204..d49c6a6 100644
--- a/src/inspector/DEPS
+++ b/src/inspector/DEPS
@@ -1,8 +1,11 @@
 include_rules = [
   "-src",
-  "+src/inspector",
   "+src/base/atomicops.h",
   "+src/base/macros.h",
   "+src/base/logging.h",
   "+src/base/platform/platform.h",
+  "+src/inspector",
+  "+src/tracing",
+  "-include/v8-debug.h",
+  "+src/debug/debug-interface.h",
 ]
diff --git a/src/inspector/debugger-script.js b/src/inspector/debugger-script.js
index 98910d6..1614566 100644
--- a/src/inspector/debugger-script.js
+++ b/src/inspector/debugger-script.js
@@ -33,18 +33,6 @@
 
 var DebuggerScript = {};
 
-/** @enum */
-const PauseOnExceptionsState = {
-    DontPauseOnExceptions: 0,
-    PauseOnAllExceptions: 1,
-    PauseOnUncaughtExceptions: 2
-};
-DebuggerScript.PauseOnExceptionsState = PauseOnExceptionsState;
-
-DebuggerScript._pauseOnExceptionsState = DebuggerScript.PauseOnExceptionsState.DontPauseOnExceptions;
-Debug.clearBreakOnException();
-Debug.clearBreakOnUncaughtException();
-
 /**
  * @param {?CompileEvent} eventData
  */
@@ -52,7 +40,7 @@
 {
     var script = eventData.script().value();
     if (!script.is_debugger_script)
-        return DebuggerScript._formatScript(eventData.script().value());
+        return script;
     return null;
 }
 
@@ -152,82 +140,6 @@
 }
 
 /**
- * @param {string|undefined} contextData
- * @return {string}
- */
-DebuggerScript._executionContextAuxData = function(contextData)
-{
-    if (!contextData)
-        return "";
-    var match = contextData.match(/^[^,]*,[^,]*,(.*)$/);
-    return match ? match[1] : "";
-}
-
-/**
- * @param {string} contextGroupId
- * @return {!Array<!FormattedScript>}
- */
-DebuggerScript.getScripts = function(contextGroupId)
-{
-    var result = [];
-    var scripts = Debug.scripts();
-    var contextDataPrefix = null;
-    if (contextGroupId)
-        contextDataPrefix = contextGroupId + ",";
-    for (var i = 0; i < scripts.length; ++i) {
-        var script = scripts[i];
-        if (contextDataPrefix) {
-            if (!script.context_data)
-                continue;
-            // Context data is a string in the following format:
-            // <contextGroupId>,<contextId>,<auxData>
-            if (script.context_data.indexOf(contextDataPrefix) !== 0)
-                continue;
-        }
-        if (script.is_debugger_script)
-            continue;
-        result.push(DebuggerScript._formatScript(script));
-    }
-    return result;
-}
-
-/**
- * @param {!Script} script
- * @return {!FormattedScript}
- */
-DebuggerScript._formatScript = function(script)
-{
-    var lineEnds = script.line_ends;
-    var lineCount = lineEnds.length;
-    var endLine = script.line_offset + lineCount - 1;
-    var endColumn;
-    // V8 will not count last line if script source ends with \n.
-    if (script.source[script.source.length - 1] === '\n') {
-        endLine += 1;
-        endColumn = 0;
-    } else {
-        if (lineCount === 1)
-            endColumn = script.source.length + script.column_offset;
-        else
-            endColumn = script.source.length - (lineEnds[lineCount - 2] + 1);
-    }
-    return {
-        id: script.id,
-        name: script.nameOrSourceURL(),
-        sourceURL: script.source_url,
-        sourceMappingURL: script.source_mapping_url,
-        source: script.source,
-        startLine: script.line_offset,
-        startColumn: script.column_offset,
-        endLine: endLine,
-        endColumn: endColumn,
-        executionContextId: DebuggerScript._executionContextId(script.context_data),
-        // Note that we cannot derive aux data from context id because of compilation cache.
-        executionContextAuxData: DebuggerScript._executionContextAuxData(script.context_data)
-    };
-}
-
-/**
  * @param {!ExecutionState} execState
  * @param {!BreakpointInfo} info
  * @return {string|undefined}
@@ -253,32 +165,6 @@
 }
 
 /**
- * @return {number}
- */
-DebuggerScript.pauseOnExceptionsState = function()
-{
-    return DebuggerScript._pauseOnExceptionsState;
-}
-
-/**
- * @param {number} newState
- */
-DebuggerScript.setPauseOnExceptionsState = function(newState)
-{
-    DebuggerScript._pauseOnExceptionsState = newState;
-
-    if (DebuggerScript.PauseOnExceptionsState.PauseOnAllExceptions === newState)
-        Debug.setBreakOnException();
-    else
-        Debug.clearBreakOnException();
-
-    if (DebuggerScript.PauseOnExceptionsState.PauseOnUncaughtExceptions === newState)
-        Debug.setBreakOnUncaughtException();
-    else
-        Debug.clearBreakOnUncaughtException();
-}
-
-/**
  * @param {!ExecutionState} execState
  * @param {number} limit
  * @return {!Array<!JavaScriptCallFrame>}
@@ -291,43 +177,6 @@
     return frames;
 }
 
-/**
- * @param {!ExecutionState} execState
- */
-DebuggerScript.stepIntoStatement = function(execState)
-{
-    execState.prepareStep(Debug.StepAction.StepIn);
-}
-
-/**
- * @param {!ExecutionState} execState
- */
-DebuggerScript.stepFrameStatement = function(execState)
-{
-    execState.prepareStep(Debug.StepAction.StepFrame);
-}
-
-/**
- * @param {!ExecutionState} execState
- */
-DebuggerScript.stepOverStatement = function(execState)
-{
-    execState.prepareStep(Debug.StepAction.StepNext);
-}
-
-/**
- * @param {!ExecutionState} execState
- */
-DebuggerScript.stepOutOfFunction = function(execState)
-{
-    execState.prepareStep(Debug.StepAction.StepOut);
-}
-
-DebuggerScript.clearStepping = function()
-{
-    Debug.clearStepping();
-}
-
 // Returns array in form:
 //      [ 0, <v8_result_report> ] in case of success
 //   or [ 1, <general_error_message>, <compiler_message>, <line_number>, <column_number> ] in case of compile error, numbers are 1-based.
@@ -416,6 +265,7 @@
     var frameDetails = frameMirror.details();
 
     var funcObject = frameDetails.func();
+    var scriptObject = frameDetails.script();
     var sourcePosition = frameDetails.sourcePosition();
     var thisObject = frameDetails.receiver();
 
@@ -448,6 +298,7 @@
     // Calculated lazily.
     var scopeChain;
     var funcMirror;
+    var scriptMirror;
     var location;
     /** @type {!Array<?RawLocation>} */
     var scopeStartLocations;
@@ -516,7 +367,7 @@
     {
         if (!details) {
             var scopeObjects = ensureScopeChain();
-            var script = ensureFuncMirror().script();
+            var script = ensureScriptMirror();
             /** @type {!Array<Scope>} */
             var scopes = [];
             for (var i = 0; i < scopeObjects.length; ++i) {
@@ -570,14 +421,24 @@
     }
 
     /**
+     * @return {!ScriptMirror}
+     */
+    function ensureScriptMirror()
+    {
+        if (!scriptMirror) {
+            scriptMirror = MakeMirror(scriptObject);
+        }
+        return /** @type {!ScriptMirror} */(scriptMirror);
+    }
+
+    /**
      * @return {!{line: number, column: number}}
      */
     function ensureLocation()
     {
         if (!location) {
-            var script = ensureFuncMirror().script();
-            if (script)
-                location = script.locationFromPosition(sourcePosition, true);
+            var script = ensureScriptMirror();
+            location = script.locationFromPosition(sourcePosition, true);
             if (!location)
                 location = { line: 0, column: 0 };
         }
@@ -616,12 +477,12 @@
     }
 
     /**
-     * @return {number|undefined}
+     * @return {number}
      */
     function sourceID()
     {
-        var script = ensureFuncMirror().script();
-        return script && script.id();
+        var script = ensureScriptMirror();
+        return script.id();
     }
 
     /**
diff --git a/src/inspector/debugger_script_externs.js b/src/inspector/debugger_script_externs.js
index c7df61f..cc152d5 100644
--- a/src/inspector/debugger_script_externs.js
+++ b/src/inspector/debugger_script_externs.js
@@ -44,7 +44,7 @@
 var JavaScriptCallFrameDetails;
 
 /** @typedef {{
-        sourceID: function():(number|undefined),
+        sourceID: function():(number),
         line: function():number,
         column: function():number,
         thisObject: !Object,
@@ -61,19 +61,6 @@
  */
 var Debug = {};
 
-Debug.setBreakOnException = function() {}
-
-Debug.clearBreakOnException = function() {}
-
-Debug.setBreakOnUncaughtException = function() {}
-
-/**
- * @return {undefined}
- */
-Debug.clearBreakOnUncaughtException = function() {}
-
-Debug.clearStepping = function() {}
-
 Debug.clearAllBreakPoints = function() {}
 
 /** @return {!Array<!Script>} */
@@ -203,9 +190,6 @@
 /** @interface */
 function ExecutionState() {}
 
-/** @param {!Debug.StepAction} action */
-ExecutionState.prototype.prepareStep = function(action) {}
-
 /**
  * @param {string} source
  * @param {boolean} disableBreak
@@ -257,7 +241,6 @@
  *    source_mapping_url: (string|undefined),
  *    is_debugger_script: boolean,
  *    source: string,
- *    line_ends: !Array<number>,
  *    line_offset: number,
  *    column_offset: number,
  *    nameOrSourceURL: function():string,
@@ -288,6 +271,9 @@
 /** @return {function()} */
 FrameDetails.prototype.func = function() {}
 
+/** @return {!Object} */
+FrameDetails.prototype.script = function() {}
+
 /** @return {boolean} */
 FrameDetails.prototype.isAtReturn = function() {}
 
@@ -466,6 +452,9 @@
 /** @return {!FrameDetails} */
 FrameMirror.prototype.details = function() {}
 
+/** @return {!ScriptMirror} */
+FrameMirror.prototype.script = function() {}
+
 /**
  * @param {string} source
  * @param {boolean} disableBreak
diff --git a/src/inspector/injected-script-source.js b/src/inspector/injected-script-source.js
index 39c6c9c..f3c8d6b 100644
--- a/src/inspector/injected-script-source.js
+++ b/src/inspector/injected-script-source.js
@@ -260,18 +260,6 @@
     },
 
     /**
-     * @param {!Array<*>} array
-     * @param {string} groupName
-     * @param {boolean} forceValueType
-     * @param {boolean} generatePreview
-     */
-    wrapObjectsInArray: function(array, groupName, forceValueType, generatePreview)
-    {
-        for (var i = 0; i < array.length; ++i)
-            array[i] = this.wrapObject(array[i], groupName, forceValueType, generatePreview);
-    },
-
-    /**
      * @param {!Object} table
      * @param {!Array.<string>|string|boolean} columns
      * @return {!RuntimeAgent.RemoteObject}
diff --git a/src/inspector/injected-script.cc b/src/inspector/injected-script.cc
index a100dea..d605227 100644
--- a/src/inspector/injected-script.cc
+++ b/src/inspector/injected-script.cc
@@ -54,11 +54,6 @@
 using protocol::Runtime::RemoteObject;
 using protocol::Maybe;
 
-static bool hasInternalError(ErrorString* errorString, bool hasError) {
-  if (hasError) *errorString = "Internal error";
-  return hasError;
-}
-
 std::unique_ptr<InjectedScript> InjectedScript::create(
     InspectedContext* inspectedContext) {
   v8::Isolate* isolate = inspectedContext->isolate();
@@ -124,10 +119,9 @@
 
 InjectedScript::~InjectedScript() {}
 
-void InjectedScript::getProperties(
-    ErrorString* errorString, v8::Local<v8::Object> object,
-    const String16& groupName, bool ownProperties, bool accessorPropertiesOnly,
-    bool generatePreview,
+Response InjectedScript::getProperties(
+    v8::Local<v8::Object> object, const String16& groupName, bool ownProperties,
+    bool accessorPropertiesOnly, bool generatePreview,
     std::unique_ptr<Array<PropertyDescriptor>>* properties,
     Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
   v8::HandleScope handles(m_context->isolate());
@@ -143,21 +137,23 @@
   v8::TryCatch tryCatch(m_context->isolate());
   v8::Local<v8::Value> resultValue = function.callWithoutExceptionHandling();
   if (tryCatch.HasCaught()) {
-    *exceptionDetails = createExceptionDetails(errorString, tryCatch, groupName,
-                                               generatePreview);
+    Response response = createExceptionDetails(
+        tryCatch, groupName, generatePreview, exceptionDetails);
+    if (!response.isSuccess()) return response;
     // FIXME: make properties optional
     *properties = Array<PropertyDescriptor>::create();
-    return;
+    return Response::OK();
   }
-  if (hasInternalError(errorString, resultValue.IsEmpty())) return;
-  std::unique_ptr<protocol::Value> protocolValue =
-      toProtocolValue(errorString, context, resultValue);
-  if (!protocolValue) return;
-  protocol::ErrorSupport errors(errorString);
+  if (resultValue.IsEmpty()) return Response::InternalError();
+  std::unique_ptr<protocol::Value> protocolValue;
+  Response response = toProtocolValue(context, resultValue, &protocolValue);
+  if (!response.isSuccess()) return response;
+  protocol::ErrorSupport errors;
   std::unique_ptr<Array<PropertyDescriptor>> result =
       Array<PropertyDescriptor>::parse(protocolValue.get(), &errors);
-  if (!hasInternalError(errorString, errors.hasErrors()))
-    *properties = std::move(result);
+  if (errors.hasErrors()) return Response::Error(errors.errors());
+  *properties = std::move(result);
+  return Response::OK();
 }
 
 void InjectedScript::releaseObject(const String16& objectId) {
@@ -172,55 +168,52 @@
   m_native->unbind(boundId);
 }
 
-std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapObject(
-    ErrorString* errorString, v8::Local<v8::Value> value,
-    const String16& groupName, bool forceValueType,
-    bool generatePreview) const {
+Response InjectedScript::wrapObject(
+    v8::Local<v8::Value> value, const String16& groupName, bool forceValueType,
+    bool generatePreview,
+    std::unique_ptr<protocol::Runtime::RemoteObject>* result) const {
   v8::HandleScope handles(m_context->isolate());
   v8::Local<v8::Value> wrappedObject;
   v8::Local<v8::Context> context = m_context->context();
-  if (!wrapValue(errorString, value, groupName, forceValueType, generatePreview)
-           .ToLocal(&wrappedObject))
-    return nullptr;
+  Response response = wrapValue(value, groupName, forceValueType,
+                                generatePreview, &wrappedObject);
+  if (!response.isSuccess()) return response;
   protocol::ErrorSupport errors;
-  std::unique_ptr<protocol::Value> protocolValue =
-      toProtocolValue(errorString, context, wrappedObject);
-  if (!protocolValue) return nullptr;
-  std::unique_ptr<protocol::Runtime::RemoteObject> remoteObject =
+  std::unique_ptr<protocol::Value> protocolValue;
+  response = toProtocolValue(context, wrappedObject, &protocolValue);
+  if (!response.isSuccess()) return response;
+
+  *result =
       protocol::Runtime::RemoteObject::parse(protocolValue.get(), &errors);
-  if (!remoteObject) *errorString = errors.errors();
-  return remoteObject;
+  if (!result->get()) return Response::Error(errors.errors());
+  return Response::OK();
 }
 
-bool InjectedScript::wrapObjectProperty(ErrorString* errorString,
-                                        v8::Local<v8::Object> object,
-                                        v8::Local<v8::Name> key,
-                                        const String16& groupName,
-                                        bool forceValueType,
-                                        bool generatePreview) const {
+Response InjectedScript::wrapObjectProperty(v8::Local<v8::Object> object,
+                                            v8::Local<v8::Name> key,
+                                            const String16& groupName,
+                                            bool forceValueType,
+                                            bool generatePreview) const {
   v8::Local<v8::Value> property;
   v8::Local<v8::Context> context = m_context->context();
-  if (hasInternalError(errorString,
-                       !object->Get(context, key).ToLocal(&property)))
-    return false;
+  if (!object->Get(context, key).ToLocal(&property))
+    return Response::InternalError();
   v8::Local<v8::Value> wrappedProperty;
-  if (!wrapValue(errorString, property, groupName, forceValueType,
-                 generatePreview)
-           .ToLocal(&wrappedProperty))
-    return false;
+  Response response = wrapValue(property, groupName, forceValueType,
+                                generatePreview, &wrappedProperty);
+  if (!response.isSuccess()) return response;
   v8::Maybe<bool> success =
       createDataProperty(context, object, key, wrappedProperty);
-  if (hasInternalError(errorString, success.IsNothing() || !success.FromJust()))
-    return false;
-  return true;
+  if (success.IsNothing() || !success.FromJust())
+    return Response::InternalError();
+  return Response::OK();
 }
 
-bool InjectedScript::wrapPropertyInArray(ErrorString* errorString,
-                                         v8::Local<v8::Array> array,
-                                         v8::Local<v8::String> property,
-                                         const String16& groupName,
-                                         bool forceValueType,
-                                         bool generatePreview) const {
+Response InjectedScript::wrapPropertyInArray(v8::Local<v8::Array> array,
+                                             v8::Local<v8::String> property,
+                                             const String16& groupName,
+                                             bool forceValueType,
+                                             bool generatePreview) const {
   V8FunctionCall function(m_context->inspector(), m_context->context(),
                           v8Value(), "wrapPropertyInArray");
   function.appendArgument(array);
@@ -230,29 +223,13 @@
   function.appendArgument(generatePreview);
   bool hadException = false;
   function.call(hadException);
-  return !hasInternalError(errorString, hadException);
+  return hadException ? Response::InternalError() : Response::OK();
 }
 
-bool InjectedScript::wrapObjectsInArray(ErrorString* errorString,
-                                        v8::Local<v8::Array> array,
-                                        const String16& groupName,
-                                        bool forceValueType,
-                                        bool generatePreview) const {
-  V8FunctionCall function(m_context->inspector(), m_context->context(),
-                          v8Value(), "wrapObjectsInArray");
-  function.appendArgument(array);
-  function.appendArgument(groupName);
-  function.appendArgument(forceValueType);
-  function.appendArgument(generatePreview);
-  bool hadException = false;
-  function.call(hadException);
-  return !hasInternalError(errorString, hadException);
-}
-
-v8::MaybeLocal<v8::Value> InjectedScript::wrapValue(
-    ErrorString* errorString, v8::Local<v8::Value> value,
-    const String16& groupName, bool forceValueType,
-    bool generatePreview) const {
+Response InjectedScript::wrapValue(v8::Local<v8::Value> value,
+                                   const String16& groupName,
+                                   bool forceValueType, bool generatePreview,
+                                   v8::Local<v8::Value>* result) const {
   V8FunctionCall function(m_context->inspector(), m_context->context(),
                           v8Value(), "wrapObject");
   function.appendArgument(value);
@@ -260,10 +237,9 @@
   function.appendArgument(forceValueType);
   function.appendArgument(generatePreview);
   bool hadException = false;
-  v8::Local<v8::Value> r = function.call(hadException);
-  if (hasInternalError(errorString, hadException || r.IsEmpty()))
-    return v8::MaybeLocal<v8::Value>();
-  return r;
+  *result = function.call(hadException);
+  if (hadException || result->IsEmpty()) return Response::InternalError();
+  return Response::OK();
 }
 
 std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
@@ -280,21 +256,19 @@
   bool hadException = false;
   v8::Local<v8::Value> r = function.call(hadException);
   if (hadException || r.IsEmpty()) return nullptr;
-  protocol::ErrorString errorString;
-  std::unique_ptr<protocol::Value> protocolValue =
-      toProtocolValue(&errorString, context, r);
-  if (!protocolValue) return nullptr;
+  std::unique_ptr<protocol::Value> protocolValue;
+  Response response = toProtocolValue(context, r, &protocolValue);
+  if (!response.isSuccess()) return nullptr;
   protocol::ErrorSupport errors;
   return protocol::Runtime::RemoteObject::parse(protocolValue.get(), &errors);
 }
 
-bool InjectedScript::findObject(ErrorString* errorString,
-                                const RemoteObjectId& objectId,
-                                v8::Local<v8::Value>* outObject) const {
+Response InjectedScript::findObject(const RemoteObjectId& objectId,
+                                    v8::Local<v8::Value>* outObject) const {
   *outObject = m_native->objectForId(objectId.id());
   if (outObject->IsEmpty())
-    *errorString = "Could not find object with given id";
-  return !outObject->IsEmpty();
+    return Response::Error("Could not find object with given id");
+  return Response::OK();
 }
 
 String16 InjectedScript::objectGroupName(const RemoteObjectId& objectId) const {
@@ -326,47 +300,41 @@
   return m_lastEvaluationResult.Get(m_context->isolate());
 }
 
-v8::MaybeLocal<v8::Value> InjectedScript::resolveCallArgument(
-    ErrorString* errorString, protocol::Runtime::CallArgument* callArgument) {
+Response InjectedScript::resolveCallArgument(
+    protocol::Runtime::CallArgument* callArgument,
+    v8::Local<v8::Value>* result) {
   if (callArgument->hasObjectId()) {
-    std::unique_ptr<RemoteObjectId> remoteObjectId =
-        RemoteObjectId::parse(errorString, callArgument->getObjectId(""));
-    if (!remoteObjectId) return v8::MaybeLocal<v8::Value>();
-    if (remoteObjectId->contextId() != m_context->contextId()) {
-      *errorString =
+    std::unique_ptr<RemoteObjectId> remoteObjectId;
+    Response response =
+        RemoteObjectId::parse(callArgument->getObjectId(""), &remoteObjectId);
+    if (!response.isSuccess()) return response;
+    if (remoteObjectId->contextId() != m_context->contextId())
+      return Response::Error(
           "Argument should belong to the same JavaScript world as target "
-          "object";
-      return v8::MaybeLocal<v8::Value>();
-    }
-    v8::Local<v8::Value> object;
-    if (!findObject(errorString, *remoteObjectId, &object))
-      return v8::MaybeLocal<v8::Value>();
-    return object;
+          "object");
+    return findObject(*remoteObjectId, result);
   }
   if (callArgument->hasValue() || callArgument->hasUnserializableValue()) {
     String16 value =
         callArgument->hasValue()
             ? callArgument->getValue(nullptr)->toJSONString()
             : "Number(\"" + callArgument->getUnserializableValue("") + "\")";
-    v8::Local<v8::Value> object;
     if (!m_context->inspector()
              ->compileAndRunInternalScript(
                  m_context->context(), toV8String(m_context->isolate(), value))
-             .ToLocal(&object)) {
-      *errorString = "Couldn't parse value object in call argument";
-      return v8::MaybeLocal<v8::Value>();
+             .ToLocal(result)) {
+      return Response::Error("Couldn't parse value object in call argument");
     }
-    return object;
+    return Response::OK();
   }
-  return v8::Undefined(m_context->isolate());
+  *result = v8::Undefined(m_context->isolate());
+  return Response::OK();
 }
 
-std::unique_ptr<protocol::Runtime::ExceptionDetails>
-InjectedScript::createExceptionDetails(ErrorString* errorString,
-                                       const v8::TryCatch& tryCatch,
-                                       const String16& objectGroup,
-                                       bool generatePreview) {
-  if (!tryCatch.HasCaught()) return nullptr;
+Response InjectedScript::createExceptionDetails(
+    const v8::TryCatch& tryCatch, const String16& objectGroup,
+    bool generatePreview, Maybe<protocol::Runtime::ExceptionDetails>* result) {
+  if (!tryCatch.HasCaught()) return Response::InternalError();
   v8::Local<v8::Message> message = tryCatch.Message();
   v8::Local<v8::Value> exception = tryCatch.Exception();
   String16 messageText =
@@ -396,43 +364,44 @@
                                           ->buildInspectorObjectImpl());
   }
   if (!exception.IsEmpty()) {
-    std::unique_ptr<protocol::Runtime::RemoteObject> wrapped = wrapObject(
-        errorString, exception, objectGroup, false /* forceValueType */,
-        generatePreview && !exception->IsNativeError());
-    if (!wrapped) return nullptr;
+    std::unique_ptr<protocol::Runtime::RemoteObject> wrapped;
+    Response response =
+        wrapObject(exception, objectGroup, false /* forceValueType */,
+                   generatePreview && !exception->IsNativeError(), &wrapped);
+    if (!response.isSuccess()) return response;
     exceptionDetails->setException(std::move(wrapped));
   }
-  return exceptionDetails;
+  *result = std::move(exceptionDetails);
+  return Response::OK();
 }
 
-void InjectedScript::wrapEvaluateResult(
-    ErrorString* errorString, v8::MaybeLocal<v8::Value> maybeResultValue,
-    const v8::TryCatch& tryCatch, const String16& objectGroup,
-    bool returnByValue, bool generatePreview,
+Response InjectedScript::wrapEvaluateResult(
+    v8::MaybeLocal<v8::Value> maybeResultValue, const v8::TryCatch& tryCatch,
+    const String16& objectGroup, bool returnByValue, bool generatePreview,
     std::unique_ptr<protocol::Runtime::RemoteObject>* result,
     Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
   v8::Local<v8::Value> resultValue;
   if (!tryCatch.HasCaught()) {
-    if (hasInternalError(errorString, !maybeResultValue.ToLocal(&resultValue)))
-      return;
-    std::unique_ptr<RemoteObject> remoteObject = wrapObject(
-        errorString, resultValue, objectGroup, returnByValue, generatePreview);
-    if (!remoteObject) return;
+    if (!maybeResultValue.ToLocal(&resultValue))
+      return Response::InternalError();
+    Response response = wrapObject(resultValue, objectGroup, returnByValue,
+                                   generatePreview, result);
+    if (!response.isSuccess()) return response;
     if (objectGroup == "console")
       m_lastEvaluationResult.Reset(m_context->isolate(), resultValue);
-    *result = std::move(remoteObject);
   } else {
     v8::Local<v8::Value> exception = tryCatch.Exception();
-    std::unique_ptr<RemoteObject> remoteObject =
-        wrapObject(errorString, exception, objectGroup, false,
-                   generatePreview && !exception->IsNativeError());
-    if (!remoteObject) return;
+    Response response =
+        wrapObject(exception, objectGroup, false,
+                   generatePreview && !exception->IsNativeError(), result);
+    if (!response.isSuccess()) return response;
     // We send exception in result for compatibility reasons, even though it's
     // accessible through exceptionDetails.exception.
-    *result = std::move(remoteObject);
-    *exceptionDetails = createExceptionDetails(errorString, tryCatch,
-                                               objectGroup, generatePreview);
+    response = createExceptionDetails(tryCatch, objectGroup, generatePreview,
+                                      exceptionDetails);
+    if (!response.isSuccess()) return response;
   }
+  return Response::OK();
 }
 
 v8::Local<v8::Object> InjectedScript::commandLineAPI() {
@@ -442,41 +411,35 @@
   return m_commandLineAPI.Get(m_context->isolate());
 }
 
-InjectedScript::Scope::Scope(ErrorString* errorString,
-                             V8InspectorImpl* inspector, int contextGroupId)
-    : m_errorString(errorString),
-      m_inspector(inspector),
+InjectedScript::Scope::Scope(V8InspectorImpl* inspector, int contextGroupId)
+    : m_inspector(inspector),
       m_contextGroupId(contextGroupId),
       m_injectedScript(nullptr),
       m_handleScope(inspector->isolate()),
       m_tryCatch(inspector->isolate()),
       m_ignoreExceptionsAndMuteConsole(false),
-      m_previousPauseOnExceptionsState(V8Debugger::DontPauseOnExceptions),
+      m_previousPauseOnExceptionsState(v8::DebugInterface::NoBreakOnException),
       m_userGesture(false) {}
 
-bool InjectedScript::Scope::initialize() {
+Response InjectedScript::Scope::initialize() {
   cleanup();
   // TODO(dgozman): what if we reattach to the same context group during
   // evaluate? Introduce a session id?
   V8InspectorSessionImpl* session =
       m_inspector->sessionForContextGroup(m_contextGroupId);
-  if (!session) {
-    *m_errorString = "Internal error";
-    return false;
-  }
-  findInjectedScript(session);
-  if (!m_injectedScript) return false;
+  if (!session) return Response::InternalError();
+  Response response = findInjectedScript(session);
+  if (!response.isSuccess()) return response;
   m_context = m_injectedScript->context()->context();
   m_context->Enter();
-  return true;
+  return Response::OK();
 }
 
-bool InjectedScript::Scope::installCommandLineAPI() {
+void InjectedScript::Scope::installCommandLineAPI() {
   DCHECK(m_injectedScript && !m_context.IsEmpty() &&
          !m_commandLineAPIScope.get());
   m_commandLineAPIScope.reset(new V8Console::CommandLineAPIScope(
       m_context, m_injectedScript->commandLineAPI(), m_context->Global()));
-  return true;
 }
 
 void InjectedScript::Scope::ignoreExceptionsAndMuteConsole() {
@@ -485,14 +448,14 @@
   m_inspector->client()->muteMetrics(m_contextGroupId);
   m_inspector->muteExceptions(m_contextGroupId);
   m_previousPauseOnExceptionsState =
-      setPauseOnExceptionsState(V8Debugger::DontPauseOnExceptions);
+      setPauseOnExceptionsState(v8::DebugInterface::NoBreakOnException);
 }
 
-V8Debugger::PauseOnExceptionsState
+v8::DebugInterface::ExceptionBreakState
 InjectedScript::Scope::setPauseOnExceptionsState(
-    V8Debugger::PauseOnExceptionsState newState) {
+    v8::DebugInterface::ExceptionBreakState newState) {
   if (!m_inspector->debugger()->enabled()) return newState;
-  V8Debugger::PauseOnExceptionsState presentState =
+  v8::DebugInterface::ExceptionBreakState presentState =
       m_inspector->debugger()->getPauseOnExceptionsState();
   if (presentState != newState)
     m_inspector->debugger()->setPauseOnExceptionsState(newState);
@@ -523,59 +486,57 @@
   cleanup();
 }
 
-InjectedScript::ContextScope::ContextScope(ErrorString* errorString,
-                                           V8InspectorImpl* inspector,
+InjectedScript::ContextScope::ContextScope(V8InspectorImpl* inspector,
                                            int contextGroupId,
                                            int executionContextId)
-    : InjectedScript::Scope(errorString, inspector, contextGroupId),
+    : InjectedScript::Scope(inspector, contextGroupId),
       m_executionContextId(executionContextId) {}
 
 InjectedScript::ContextScope::~ContextScope() {}
 
-void InjectedScript::ContextScope::findInjectedScript(
+Response InjectedScript::ContextScope::findInjectedScript(
     V8InspectorSessionImpl* session) {
-  m_injectedScript =
-      session->findInjectedScript(m_errorString, m_executionContextId);
+  return session->findInjectedScript(m_executionContextId, m_injectedScript);
 }
 
-InjectedScript::ObjectScope::ObjectScope(ErrorString* errorString,
-                                         V8InspectorImpl* inspector,
+InjectedScript::ObjectScope::ObjectScope(V8InspectorImpl* inspector,
                                          int contextGroupId,
                                          const String16& remoteObjectId)
-    : InjectedScript::Scope(errorString, inspector, contextGroupId),
+    : InjectedScript::Scope(inspector, contextGroupId),
       m_remoteObjectId(remoteObjectId) {}
 
 InjectedScript::ObjectScope::~ObjectScope() {}
 
-void InjectedScript::ObjectScope::findInjectedScript(
+Response InjectedScript::ObjectScope::findInjectedScript(
     V8InspectorSessionImpl* session) {
-  std::unique_ptr<RemoteObjectId> remoteId =
-      RemoteObjectId::parse(m_errorString, m_remoteObjectId);
-  if (!remoteId) return;
-  InjectedScript* injectedScript =
-      session->findInjectedScript(m_errorString, remoteId.get());
-  if (!injectedScript) return;
+  std::unique_ptr<RemoteObjectId> remoteId;
+  Response response = RemoteObjectId::parse(m_remoteObjectId, &remoteId);
+  if (!response.isSuccess()) return response;
+  InjectedScript* injectedScript = nullptr;
+  response = session->findInjectedScript(remoteId.get(), injectedScript);
+  if (!response.isSuccess()) return response;
   m_objectGroupName = injectedScript->objectGroupName(*remoteId);
-  if (!injectedScript->findObject(m_errorString, *remoteId, &m_object)) return;
+  response = injectedScript->findObject(*remoteId, &m_object);
+  if (!response.isSuccess()) return response;
   m_injectedScript = injectedScript;
+  return Response::OK();
 }
 
-InjectedScript::CallFrameScope::CallFrameScope(ErrorString* errorString,
-                                               V8InspectorImpl* inspector,
+InjectedScript::CallFrameScope::CallFrameScope(V8InspectorImpl* inspector,
                                                int contextGroupId,
                                                const String16& remoteObjectId)
-    : InjectedScript::Scope(errorString, inspector, contextGroupId),
+    : InjectedScript::Scope(inspector, contextGroupId),
       m_remoteCallFrameId(remoteObjectId) {}
 
 InjectedScript::CallFrameScope::~CallFrameScope() {}
 
-void InjectedScript::CallFrameScope::findInjectedScript(
+Response InjectedScript::CallFrameScope::findInjectedScript(
     V8InspectorSessionImpl* session) {
-  std::unique_ptr<RemoteCallFrameId> remoteId =
-      RemoteCallFrameId::parse(m_errorString, m_remoteCallFrameId);
-  if (!remoteId) return;
+  std::unique_ptr<RemoteCallFrameId> remoteId;
+  Response response = RemoteCallFrameId::parse(m_remoteCallFrameId, &remoteId);
+  if (!response.isSuccess()) return response;
   m_frameOrdinal = static_cast<size_t>(remoteId->frameOrdinal());
-  m_injectedScript = session->findInjectedScript(m_errorString, remoteId.get());
+  return session->findInjectedScript(remoteId.get(), m_injectedScript);
 }
 
 }  // namespace v8_inspector
diff --git a/src/inspector/injected-script.h b/src/inspector/injected-script.h
index 9b324c9..6500f4d 100644
--- a/src/inspector/injected-script.h
+++ b/src/inspector/injected-script.h
@@ -48,8 +48,8 @@
 class V8InspectorImpl;
 class V8InspectorSessionImpl;
 
-using protocol::ErrorString;
 using protocol::Maybe;
+using protocol::Response;
 
 class InjectedScript final {
  public:
@@ -58,56 +58,51 @@
 
   InspectedContext* context() const { return m_context; }
 
-  void getProperties(
-      ErrorString*, v8::Local<v8::Object>, const String16& groupName,
-      bool ownProperties, bool accessorPropertiesOnly, bool generatePreview,
+  Response getProperties(
+      v8::Local<v8::Object>, const String16& groupName, bool ownProperties,
+      bool accessorPropertiesOnly, bool generatePreview,
       std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
           result,
       Maybe<protocol::Runtime::ExceptionDetails>*);
   void releaseObject(const String16& objectId);
 
-  std::unique_ptr<protocol::Runtime::RemoteObject> wrapObject(
-      ErrorString*, v8::Local<v8::Value>, const String16& groupName,
-      bool forceValueType = false, bool generatePreview = false) const;
-  bool wrapObjectProperty(ErrorString*, v8::Local<v8::Object>,
-                          v8::Local<v8::Name> key, const String16& groupName,
-                          bool forceValueType = false,
-                          bool generatePreview = false) const;
-  bool wrapPropertyInArray(ErrorString*, v8::Local<v8::Array>,
-                           v8::Local<v8::String> property,
-                           const String16& groupName,
-                           bool forceValueType = false,
-                           bool generatePreview = false) const;
-  bool wrapObjectsInArray(ErrorString*, v8::Local<v8::Array>,
-                          const String16& groupName,
-                          bool forceValueType = false,
-                          bool generatePreview = false) const;
+  Response wrapObject(
+      v8::Local<v8::Value>, const String16& groupName, bool forceValueType,
+      bool generatePreview,
+      std::unique_ptr<protocol::Runtime::RemoteObject>* result) const;
+  Response wrapObjectProperty(v8::Local<v8::Object>, v8::Local<v8::Name> key,
+                              const String16& groupName,
+                              bool forceValueType = false,
+                              bool generatePreview = false) const;
+  Response wrapPropertyInArray(v8::Local<v8::Array>,
+                               v8::Local<v8::String> property,
+                               const String16& groupName,
+                               bool forceValueType = false,
+                               bool generatePreview = false) const;
   std::unique_ptr<protocol::Runtime::RemoteObject> wrapTable(
       v8::Local<v8::Value> table, v8::Local<v8::Value> columns) const;
 
-  bool findObject(ErrorString*, const RemoteObjectId&,
-                  v8::Local<v8::Value>*) const;
+  Response findObject(const RemoteObjectId&, v8::Local<v8::Value>*) const;
   String16 objectGroupName(const RemoteObjectId&) const;
   void releaseObjectGroup(const String16&);
   void setCustomObjectFormatterEnabled(bool);
-  v8::MaybeLocal<v8::Value> resolveCallArgument(
-      ErrorString*, protocol::Runtime::CallArgument*);
+  Response resolveCallArgument(protocol::Runtime::CallArgument*,
+                               v8::Local<v8::Value>* result);
 
-  std::unique_ptr<protocol::Runtime::ExceptionDetails> createExceptionDetails(
-      ErrorString*, const v8::TryCatch&, const String16& groupName,
-      bool generatePreview);
-  void wrapEvaluateResult(
-      ErrorString*, v8::MaybeLocal<v8::Value> maybeResultValue,
-      const v8::TryCatch&, const String16& objectGroup, bool returnByValue,
-      bool generatePreview,
+  Response createExceptionDetails(
+      const v8::TryCatch&, const String16& groupName, bool generatePreview,
+      Maybe<protocol::Runtime::ExceptionDetails>* result);
+  Response wrapEvaluateResult(
+      v8::MaybeLocal<v8::Value> maybeResultValue, const v8::TryCatch&,
+      const String16& objectGroup, bool returnByValue, bool generatePreview,
       std::unique_ptr<protocol::Runtime::RemoteObject>* result,
       Maybe<protocol::Runtime::ExceptionDetails>*);
   v8::Local<v8::Value> lastEvaluationResult() const;
 
   class Scope {
    public:
-    bool initialize();
-    bool installCommandLineAPI();
+    Response initialize();
+    void installCommandLineAPI();
     void ignoreExceptionsAndMuteConsole();
     void pretendUserGesture();
     v8::Local<v8::Context> context() const { return m_context; }
@@ -115,37 +110,35 @@
     const v8::TryCatch& tryCatch() const { return m_tryCatch; }
 
    protected:
-    Scope(ErrorString*, V8InspectorImpl*, int contextGroupId);
+    Scope(V8InspectorImpl*, int contextGroupId);
     virtual ~Scope();
-    virtual void findInjectedScript(V8InspectorSessionImpl*) = 0;
+    virtual Response findInjectedScript(V8InspectorSessionImpl*) = 0;
 
-    ErrorString* m_errorString;
     V8InspectorImpl* m_inspector;
     int m_contextGroupId;
     InjectedScript* m_injectedScript;
 
    private:
     void cleanup();
-    V8Debugger::PauseOnExceptionsState setPauseOnExceptionsState(
-        V8Debugger::PauseOnExceptionsState);
+    v8::DebugInterface::ExceptionBreakState setPauseOnExceptionsState(
+        v8::DebugInterface::ExceptionBreakState);
 
     v8::HandleScope m_handleScope;
     v8::TryCatch m_tryCatch;
     v8::Local<v8::Context> m_context;
     std::unique_ptr<V8Console::CommandLineAPIScope> m_commandLineAPIScope;
     bool m_ignoreExceptionsAndMuteConsole;
-    V8Debugger::PauseOnExceptionsState m_previousPauseOnExceptionsState;
+    v8::DebugInterface::ExceptionBreakState m_previousPauseOnExceptionsState;
     bool m_userGesture;
   };
 
   class ContextScope : public Scope {
    public:
-    ContextScope(ErrorString*, V8InspectorImpl*, int contextGroupId,
-                 int executionContextId);
+    ContextScope(V8InspectorImpl*, int contextGroupId, int executionContextId);
     ~ContextScope();
 
    private:
-    void findInjectedScript(V8InspectorSessionImpl*) override;
+    Response findInjectedScript(V8InspectorSessionImpl*) override;
     int m_executionContextId;
 
     DISALLOW_COPY_AND_ASSIGN(ContextScope);
@@ -153,14 +146,14 @@
 
   class ObjectScope : public Scope {
    public:
-    ObjectScope(ErrorString*, V8InspectorImpl*, int contextGroupId,
+    ObjectScope(V8InspectorImpl*, int contextGroupId,
                 const String16& remoteObjectId);
     ~ObjectScope();
     const String16& objectGroupName() const { return m_objectGroupName; }
     v8::Local<v8::Value> object() const { return m_object; }
 
    private:
-    void findInjectedScript(V8InspectorSessionImpl*) override;
+    Response findInjectedScript(V8InspectorSessionImpl*) override;
     String16 m_remoteObjectId;
     String16 m_objectGroupName;
     v8::Local<v8::Value> m_object;
@@ -170,13 +163,13 @@
 
   class CallFrameScope : public Scope {
    public:
-    CallFrameScope(ErrorString*, V8InspectorImpl*, int contextGroupId,
+    CallFrameScope(V8InspectorImpl*, int contextGroupId,
                    const String16& remoteCallFrameId);
     ~CallFrameScope();
     size_t frameOrdinal() const { return m_frameOrdinal; }
 
    private:
-    void findInjectedScript(V8InspectorSessionImpl*) override;
+    Response findInjectedScript(V8InspectorSessionImpl*) override;
     String16 m_remoteCallFrameId;
     size_t m_frameOrdinal;
 
@@ -187,10 +180,9 @@
   InjectedScript(InspectedContext*, v8::Local<v8::Object>,
                  std::unique_ptr<InjectedScriptNative>);
   v8::Local<v8::Value> v8Value() const;
-  v8::MaybeLocal<v8::Value> wrapValue(ErrorString*, v8::Local<v8::Value>,
-                                      const String16& groupName,
-                                      bool forceValueType,
-                                      bool generatePreview) const;
+  Response wrapValue(v8::Local<v8::Value>, const String16& groupName,
+                     bool forceValueType, bool generatePreview,
+                     v8::Local<v8::Value>* result) const;
   v8::Local<v8::Object> commandLineAPI();
 
   InspectedContext* m_context;
diff --git a/src/inspector/inspected-context.cc b/src/inspector/inspected-context.cc
index 9100f64..dab3bba 100644
--- a/src/inspector/inspected-context.cc
+++ b/src/inspector/inspected-context.cc
@@ -14,22 +14,22 @@
 
 namespace v8_inspector {
 
-void InspectedContext::weakCallback(
-    const v8::WeakCallbackInfo<InspectedContext>& data) {
-  InspectedContext* context = data.GetParameter();
-  if (!context->m_context.IsEmpty()) {
-    context->m_context.Reset();
-    data.SetSecondPassCallback(&InspectedContext::weakCallback);
-  } else {
-    context->m_inspector->discardInspectedContext(context->m_contextGroupId,
-                                                  context->m_contextId);
-  }
+namespace {
+
+void clearContext(const v8::WeakCallbackInfo<v8::Global<v8::Context>>& data) {
+  // Inspected context is created in V8InspectorImpl::contextCreated method
+  // and destroyed in V8InspectorImpl::contextDestroyed.
+  // Both methods takes valid v8::Local<v8::Context> handle to the same context,
+  // it means that context is created before InspectedContext constructor and is
+  // always destroyed after InspectedContext destructor therefore this callback
+  // should be never called.
+  // It's possible only if inspector client doesn't call contextDestroyed which
+  // is considered an error.
+  CHECK(false);
+  data.GetParameter()->Reset();
 }
 
-void InspectedContext::consoleWeakCallback(
-    const v8::WeakCallbackInfo<InspectedContext>& data) {
-  data.GetParameter()->m_console.Reset();
-}
+}  // namespace
 
 InspectedContext::InspectedContext(V8InspectorImpl* inspector,
                                    const V8ContextInfo& info, int contextId)
@@ -41,7 +41,7 @@
       m_humanReadableName(toString16(info.humanReadableName)),
       m_auxData(toString16(info.auxData)),
       m_reported(false) {
-  m_context.SetWeak(this, &InspectedContext::weakCallback,
+  m_context.SetWeak(&m_context, &clearContext,
                     v8::WeakCallbackType::kParameter);
 
   v8::Isolate* isolate = m_inspector->isolate();
@@ -54,12 +54,11 @@
            .FromMaybe(false))
     return;
   m_console.Reset(isolate, console);
-  m_console.SetWeak(this, &InspectedContext::consoleWeakCallback,
-                    v8::WeakCallbackType::kParameter);
+  m_console.SetWeak();
 }
 
 InspectedContext::~InspectedContext() {
-  if (!m_context.IsEmpty() && !m_console.IsEmpty()) {
+  if (!m_console.IsEmpty()) {
     v8::HandleScope scope(isolate());
     V8Console::clearInspectedContextIfNeeded(context(),
                                              m_console.Get(isolate()));
diff --git a/src/inspector/inspected-context.h b/src/inspector/inspected-context.h
index d8e72cc..f31eb76 100644
--- a/src/inspector/inspected-context.h
+++ b/src/inspector/inspected-context.h
@@ -41,9 +41,6 @@
  private:
   friend class V8InspectorImpl;
   InspectedContext(V8InspectorImpl*, const V8ContextInfo&, int contextId);
-  static void weakCallback(const v8::WeakCallbackInfo<InspectedContext>&);
-  static void consoleWeakCallback(
-      const v8::WeakCallbackInfo<InspectedContext>&);
 
   V8InspectorImpl* m_inspector;
   v8::Global<v8::Context> m_context;
diff --git a/src/inspector/inspector.gyp b/src/inspector/inspector.gyp
index 2d5c7a5..c70722f 100644
--- a/src/inspector/inspector.gyp
+++ b/src/inspector/inspector.gyp
@@ -4,11 +4,11 @@
 
 {
   'variables': {
-    'protocol_path': '<(PRODUCT_DIR)/../../third_party/WebKit/Source/platform/inspector_protocol',
+    'protocol_path': '../../third_party/inspector_protocol',
   },
   'includes': [
     'inspector.gypi',
-    '<(PRODUCT_DIR)/../../../third_party/WebKit/Source/platform/inspector_protocol/inspector_protocol.gypi',
+    '<(PRODUCT_DIR)/../../../third_party/inspector_protocol/inspector_protocol.gypi',
   ],
   'targets': [
     { 'target_name': 'inspector_injected_script',
@@ -97,7 +97,7 @@
           'action': [
             'python',
             '<(protocol_path)/CodeGenerator.py',
-            '--jinja_dir', '<(PRODUCT_DIR)/../../third_party',
+            '--jinja_dir', '../../third_party',
             '--output_base', '<(SHARED_INTERMEDIATE_DIR)/src/inspector',
             '--config', 'inspector_protocol_config.json',
           ],
diff --git a/src/inspector/java-script-call-frame.cc b/src/inspector/java-script-call-frame.cc
index b70af21..2da4f04 100644
--- a/src/inspector/java-script-call-frame.cc
+++ b/src/inspector/java-script-call-frame.cc
@@ -30,10 +30,9 @@
 
 #include "src/inspector/java-script-call-frame.h"
 
+#include "src/debug/debug-interface.h"
 #include "src/inspector/string-util.h"
 
-#include "include/v8-debug.h"
-
 namespace v8_inspector {
 
 JavaScriptCallFrame::JavaScriptCallFrame(v8::Local<v8::Context> debuggerContext,
@@ -130,10 +129,10 @@
   v8::Local<v8::Function> restartFunction = v8::Local<v8::Function>::Cast(
       callFrame->Get(context, toV8StringInternalized(m_isolate, "restart"))
           .ToLocalChecked());
-  v8::Debug::SetLiveEditEnabled(m_isolate, true);
+  v8::DebugInterface::SetLiveEditEnabled(m_isolate, true);
   v8::MaybeLocal<v8::Value> result = restartFunction->Call(
       m_debuggerContext.Get(m_isolate), callFrame, 0, nullptr);
-  v8::Debug::SetLiveEditEnabled(m_isolate, false);
+  v8::DebugInterface::SetLiveEditEnabled(m_isolate, false);
   return result;
 }
 
diff --git a/src/inspector/js_protocol.json b/src/inspector/js_protocol.json
index aff6806..c1ac585 100644
--- a/src/inspector/js_protocol.json
+++ b/src/inspector/js_protocol.json
@@ -538,6 +538,18 @@
                 "description": "Removes JavaScript breakpoint."
             },
             {
+                "name": "getPossibleBreakpoints",
+                "parameters": [
+                    { "name": "start", "$ref": "Location", "description": "Start of range to search possible breakpoint locations in." },
+                    { "name": "end", "$ref": "Location", "optional": true, "description": "End of range to search possible breakpoint locations in (excluding). When not specifed, end of scripts is used as end of range." }
+                ],
+                "returns": [
+                    { "name": "locations", "type": "array", "items": { "$ref": "Location" }, "description": "List of the possible breakpoint locations." }
+                ],
+                "description": "Returns possible locations for breakpoint. scriptId in start and end range locations should be the same.",
+                "experimental": true
+            },
+            {
                 "name": "continueToLocation",
                 "parameters": [
                     { "name": "location", "$ref": "Location", "description": "Location to continue to." }
diff --git a/src/inspector/remote-object-id.cc b/src/inspector/remote-object-id.cc
index d83020c..aac6724 100644
--- a/src/inspector/remote-object-id.cc
+++ b/src/inspector/remote-object-id.cc
@@ -27,44 +27,34 @@
 
 RemoteObjectId::RemoteObjectId() : RemoteObjectIdBase(), m_id(0) {}
 
-std::unique_ptr<RemoteObjectId> RemoteObjectId::parse(
-    ErrorString* errorString, const String16& objectId) {
-  std::unique_ptr<RemoteObjectId> result(new RemoteObjectId());
+Response RemoteObjectId::parse(const String16& objectId,
+                               std::unique_ptr<RemoteObjectId>* result) {
+  std::unique_ptr<RemoteObjectId> remoteObjectId(new RemoteObjectId());
   std::unique_ptr<protocol::DictionaryValue> parsedObjectId =
-      result->parseInjectedScriptId(objectId);
-  if (!parsedObjectId) {
-    *errorString = "Invalid remote object id";
-    return nullptr;
-  }
+      remoteObjectId->parseInjectedScriptId(objectId);
+  if (!parsedObjectId) return Response::Error("Invalid remote object id");
 
-  bool success = parsedObjectId->getInteger("id", &result->m_id);
-  if (!success) {
-    *errorString = "Invalid remote object id";
-    return nullptr;
-  }
-  return result;
+  bool success = parsedObjectId->getInteger("id", &remoteObjectId->m_id);
+  if (!success) return Response::Error("Invalid remote object id");
+  *result = std::move(remoteObjectId);
+  return Response::OK();
 }
 
 RemoteCallFrameId::RemoteCallFrameId()
     : RemoteObjectIdBase(), m_frameOrdinal(0) {}
 
-std::unique_ptr<RemoteCallFrameId> RemoteCallFrameId::parse(
-    ErrorString* errorString, const String16& objectId) {
-  std::unique_ptr<RemoteCallFrameId> result(new RemoteCallFrameId());
+Response RemoteCallFrameId::parse(const String16& objectId,
+                                  std::unique_ptr<RemoteCallFrameId>* result) {
+  std::unique_ptr<RemoteCallFrameId> remoteCallFrameId(new RemoteCallFrameId());
   std::unique_ptr<protocol::DictionaryValue> parsedObjectId =
-      result->parseInjectedScriptId(objectId);
-  if (!parsedObjectId) {
-    *errorString = "Invalid call frame id";
-    return nullptr;
-  }
+      remoteCallFrameId->parseInjectedScriptId(objectId);
+  if (!parsedObjectId) return Response::Error("Invalid call frame id");
 
-  bool success = parsedObjectId->getInteger("ordinal", &result->m_frameOrdinal);
-  if (!success) {
-    *errorString = "Invalid call frame id";
-    return nullptr;
-  }
-
-  return result;
+  bool success =
+      parsedObjectId->getInteger("ordinal", &remoteCallFrameId->m_frameOrdinal);
+  if (!success) return Response::Error("Invalid call frame id");
+  *result = std::move(remoteCallFrameId);
+  return Response::OK();
 }
 
 String16 RemoteCallFrameId::serialize(int injectedScriptId, int frameOrdinal) {
diff --git a/src/inspector/remote-object-id.h b/src/inspector/remote-object-id.h
index a32f568..3e6928a 100644
--- a/src/inspector/remote-object-id.h
+++ b/src/inspector/remote-object-id.h
@@ -9,7 +9,7 @@
 
 namespace v8_inspector {
 
-using protocol::ErrorString;
+using protocol::Response;
 
 class RemoteObjectIdBase {
  public:
@@ -27,7 +27,7 @@
 
 class RemoteObjectId final : public RemoteObjectIdBase {
  public:
-  static std::unique_ptr<RemoteObjectId> parse(ErrorString*, const String16&);
+  static Response parse(const String16&, std::unique_ptr<RemoteObjectId>*);
   ~RemoteObjectId() {}
   int id() const { return m_id; }
 
@@ -39,8 +39,7 @@
 
 class RemoteCallFrameId final : public RemoteObjectIdBase {
  public:
-  static std::unique_ptr<RemoteCallFrameId> parse(ErrorString*,
-                                                  const String16&);
+  static Response parse(const String16&, std::unique_ptr<RemoteCallFrameId>*);
   ~RemoteCallFrameId() {}
 
   int frameOrdinal() const { return m_frameOrdinal; }
diff --git a/src/inspector/string-16.cc b/src/inspector/string-16.cc
index f608460..09909a9 100644
--- a/src/inspector/string-16.cc
+++ b/src/inspector/string-16.cc
@@ -377,7 +377,11 @@
 String16 String16::fromInteger(size_t number) {
   const size_t kBufferSize = 50;
   char buffer[kBufferSize];
+#if !defined(_WIN32) && !defined(_WIN64)
   v8::base::OS::SNPrintF(buffer, kBufferSize, "%zu", number);
+#else
+  v8::base::OS::SNPrintF(buffer, kBufferSize, "%Iu", number);
+#endif
   return String16(buffer);
 }
 
@@ -443,6 +447,26 @@
   m_buffer.insert(m_buffer.end(), characters, characters + length);
 }
 
+void String16Builder::appendNumber(int number) {
+  const int kBufferSize = 11;
+  char buffer[kBufferSize];
+  int chars = v8::base::OS::SNPrintF(buffer, kBufferSize, "%d", number);
+  DCHECK_GT(kBufferSize, chars);
+  m_buffer.insert(m_buffer.end(), buffer, buffer + chars);
+}
+
+void String16Builder::appendNumber(size_t number) {
+  const int kBufferSize = 20;
+  char buffer[kBufferSize];
+#if !defined(_WIN32) && !defined(_WIN64)
+  int chars = v8::base::OS::SNPrintF(buffer, kBufferSize, "%zu", number);
+#else
+  int chars = v8::base::OS::SNPrintF(buffer, kBufferSize, "%Iu", number);
+#endif
+  DCHECK_GT(kBufferSize, chars);
+  m_buffer.insert(m_buffer.end(), buffer, buffer + chars);
+}
+
 String16 String16Builder::toString() {
   return String16(m_buffer.data(), m_buffer.size());
 }
diff --git a/src/inspector/string-16.h b/src/inspector/string-16.h
index 6dc7759..360ec93 100644
--- a/src/inspector/string-16.h
+++ b/src/inspector/string-16.h
@@ -21,7 +21,10 @@
   static const size_t kNotFound = static_cast<size_t>(-1);
 
   String16() {}
-  String16(const String16& other) : m_impl(other.m_impl) {}
+  String16(const String16& other)
+      : m_impl(other.m_impl), hash_code(other.hash_code) {}
+  String16(const String16&& other)
+      : m_impl(std::move(other.m_impl)), hash_code(other.hash_code) {}
   String16(const UChar* characters, size_t size) : m_impl(characters, size) {}
   String16(const UChar* characters)  // NOLINT(runtime/explicit)
       : m_impl(characters) {}
@@ -31,6 +34,18 @@
     m_impl.resize(size);
     for (size_t i = 0; i < size; ++i) m_impl[i] = characters[i];
   }
+  explicit String16(const std::basic_string<UChar>& impl) : m_impl(impl) {}
+
+  String16& operator=(const String16& other) {
+    m_impl = other.m_impl;
+    hash_code = other.hash_code;
+    return *this;
+  }
+  String16& operator=(String16&& other) {
+    m_impl = std::move(other.m_impl);
+    hash_code = other.hash_code;
+    return *this;
+  }
 
   static String16 fromInteger(int);
   static String16 fromInteger(size_t);
@@ -52,51 +67,53 @@
   size_t reverseFind(const String16& str, size_t start = UINT_MAX) const {
     return m_impl.rfind(str.m_impl, start);
   }
-  void swap(String16& other) { m_impl.swap(other.m_impl); }
+  size_t find(UChar c, size_t start = 0) const { return m_impl.find(c, start); }
+  size_t reverseFind(UChar c, size_t start = UINT_MAX) const {
+    return m_impl.rfind(c, start);
+  }
+  void swap(String16& other) {
+    m_impl.swap(other.m_impl);
+    std::swap(hash_code, other.hash_code);
+  }
 
   // Convenience methods.
   std::string utf8() const;
   static String16 fromUTF8(const char* stringStart, size_t length);
 
-  const std::basic_string<UChar>& impl() const { return m_impl; }
-  explicit String16(const std::basic_string<UChar>& impl) : m_impl(impl) {}
-
   std::size_t hash() const {
-    if (!has_hash) {
-      size_t hash = 0;
-      for (size_t i = 0; i < length(); ++i) hash = 31 * hash + m_impl[i];
-      hash_code = hash;
-      has_hash = true;
+    if (!hash_code) {
+      for (char c : m_impl) hash_code = 31 * hash_code + c;
+      // Map hash code 0 to 1. This double the number of hash collisions for 1,
+      // but avoids recomputing the hash code.
+      if (!hash_code) ++hash_code;
     }
     return hash_code;
   }
 
+  inline bool operator==(const String16& other) const {
+    return m_impl == other.m_impl;
+  }
+  inline bool operator<(const String16& other) const {
+    return m_impl < other.m_impl;
+  }
+  inline bool operator!=(const String16& other) const {
+    return m_impl != other.m_impl;
+  }
+  inline String16 operator+(const String16& other) const {
+    return String16(m_impl + other.m_impl);
+  }
+
+  // Defined later, since it uses the String16Builder.
+  template <typename... T>
+  static String16 concat(T... args);
+
  private:
   std::basic_string<UChar> m_impl;
-  mutable bool has_hash = false;
   mutable std::size_t hash_code = 0;
 };
 
-inline bool operator==(const String16& a, const String16& b) {
-  return a.impl() == b.impl();
-}
-inline bool operator<(const String16& a, const String16& b) {
-  return a.impl() < b.impl();
-}
-inline bool operator!=(const String16& a, const String16& b) {
-  return a.impl() != b.impl();
-}
-inline bool operator==(const String16& a, const char* b) {
-  return a.impl() == String16(b).impl();
-}
-inline String16 operator+(const String16& a, const char* b) {
-  return String16(a.impl() + String16(b).impl());
-}
 inline String16 operator+(const char* a, const String16& b) {
-  return String16(String16(a).impl() + b.impl());
-}
-inline String16 operator+(const String16& a, const String16& b) {
-  return String16(a.impl() + b.impl());
+  return String16(a) + b;
 }
 
 class String16Builder {
@@ -107,13 +124,29 @@
   void append(char);
   void append(const UChar*, size_t);
   void append(const char*, size_t);
+  void appendNumber(int);
+  void appendNumber(size_t);
   String16 toString();
   void reserveCapacity(size_t);
 
+  template <typename T, typename... R>
+  void appendAll(T first, R... rest) {
+    append(first);
+    appendAll(rest...);
+  }
+  void appendAll() {}
+
  private:
   std::vector<UChar> m_buffer;
 };
 
+template <typename... T>
+String16 String16::concat(T... args) {
+  String16Builder builder;
+  builder.appendAll(args...);
+  return builder.toString();
+}
+
 }  // namespace v8_inspector
 
 #if !defined(__APPLE__) || defined(_LIBCPP_VERSION)
diff --git a/src/inspector/string-util.cc b/src/inspector/string-util.cc
index e6b83a5..e6ad5d0 100644
--- a/src/inspector/string-util.cc
+++ b/src/inspector/string-util.cc
@@ -111,94 +111,6 @@
 
 }  // namespace protocol
 
-std::unique_ptr<protocol::Value> toProtocolValue(protocol::String* errorString,
-                                                 v8::Local<v8::Context> context,
-                                                 v8::Local<v8::Value> value,
-                                                 int maxDepth) {
-  if (value.IsEmpty()) {
-    UNREACHABLE();
-    return nullptr;
-  }
-
-  if (!maxDepth) {
-    *errorString = "Object reference chain is too long";
-    return nullptr;
-  }
-  maxDepth--;
-
-  if (value->IsNull() || value->IsUndefined()) return protocol::Value::null();
-  if (value->IsBoolean())
-    return protocol::FundamentalValue::create(value.As<v8::Boolean>()->Value());
-  if (value->IsNumber()) {
-    double doubleValue = value.As<v8::Number>()->Value();
-    int intValue = static_cast<int>(doubleValue);
-    if (intValue == doubleValue)
-      return protocol::FundamentalValue::create(intValue);
-    return protocol::FundamentalValue::create(doubleValue);
-  }
-  if (value->IsString())
-    return protocol::StringValue::create(
-        toProtocolString(value.As<v8::String>()));
-  if (value->IsArray()) {
-    v8::Local<v8::Array> array = value.As<v8::Array>();
-    std::unique_ptr<protocol::ListValue> inspectorArray =
-        protocol::ListValue::create();
-    uint32_t length = array->Length();
-    for (uint32_t i = 0; i < length; i++) {
-      v8::Local<v8::Value> value;
-      if (!array->Get(context, i).ToLocal(&value)) {
-        *errorString = "Internal error";
-        return nullptr;
-      }
-      std::unique_ptr<protocol::Value> element =
-          toProtocolValue(errorString, context, value, maxDepth);
-      if (!element) return nullptr;
-      inspectorArray->pushValue(std::move(element));
-    }
-    return std::move(inspectorArray);
-  }
-  if (value->IsObject()) {
-    std::unique_ptr<protocol::DictionaryValue> jsonObject =
-        protocol::DictionaryValue::create();
-    v8::Local<v8::Object> object = v8::Local<v8::Object>::Cast(value);
-    v8::Local<v8::Array> propertyNames;
-    if (!object->GetPropertyNames(context).ToLocal(&propertyNames)) {
-      *errorString = "Internal error";
-      return nullptr;
-    }
-    uint32_t length = propertyNames->Length();
-    for (uint32_t i = 0; i < length; i++) {
-      v8::Local<v8::Value> name;
-      if (!propertyNames->Get(context, i).ToLocal(&name)) {
-        *errorString = "Internal error";
-        return nullptr;
-      }
-      // FIXME(yurys): v8::Object should support GetOwnPropertyNames
-      if (name->IsString()) {
-        v8::Maybe<bool> hasRealNamedProperty = object->HasRealNamedProperty(
-            context, v8::Local<v8::String>::Cast(name));
-        if (!hasRealNamedProperty.IsJust() || !hasRealNamedProperty.FromJust())
-          continue;
-      }
-      v8::Local<v8::String> propertyName;
-      if (!name->ToString(context).ToLocal(&propertyName)) continue;
-      v8::Local<v8::Value> property;
-      if (!object->Get(context, name).ToLocal(&property)) {
-        *errorString = "Internal error";
-        return nullptr;
-      }
-      std::unique_ptr<protocol::Value> propertyValue =
-          toProtocolValue(errorString, context, property, maxDepth);
-      if (!propertyValue) return nullptr;
-      jsonObject->setValue(toProtocolString(propertyName),
-                           std::move(propertyValue));
-    }
-    return std::move(jsonObject);
-  }
-  *errorString = "Object couldn't be returned by value";
-  return nullptr;
-}
-
 // static
 std::unique_ptr<StringBuffer> StringBuffer::create(const StringView& string) {
   String16 owner = toString16(string);
diff --git a/src/inspector/string-util.h b/src/inspector/string-util.h
index 30137b8..e1a69e8 100644
--- a/src/inspector/string-util.h
+++ b/src/inspector/string-util.h
@@ -40,11 +40,6 @@
 
 }  // namespace protocol
 
-std::unique_ptr<protocol::Value> toProtocolValue(protocol::String* errorString,
-                                                 v8::Local<v8::Context>,
-                                                 v8::Local<v8::Value>,
-                                                 int maxDepth = 1000);
-
 v8::Local<v8::String> toV8String(v8::Isolate*, const String16&);
 v8::Local<v8::String> toV8StringInternalized(v8::Isolate*, const String16&);
 v8::Local<v8::String> toV8StringInternalized(v8::Isolate*, const char*);
diff --git a/src/inspector/v8-console-agent-impl.cc b/src/inspector/v8-console-agent-impl.cc
index 8eb883c..6b0e12a 100644
--- a/src/inspector/v8-console-agent-impl.cc
+++ b/src/inspector/v8-console-agent-impl.cc
@@ -26,28 +26,29 @@
 
 V8ConsoleAgentImpl::~V8ConsoleAgentImpl() {}
 
-void V8ConsoleAgentImpl::enable(ErrorString* errorString) {
-  if (m_enabled) return;
+Response V8ConsoleAgentImpl::enable() {
+  if (m_enabled) return Response::OK();
   m_state->setBoolean(ConsoleAgentState::consoleEnabled, true);
   m_enabled = true;
   m_session->inspector()->enableStackCapturingIfNeeded();
   reportAllMessages();
+  return Response::OK();
 }
 
-void V8ConsoleAgentImpl::disable(ErrorString* errorString) {
-  if (!m_enabled) return;
+Response V8ConsoleAgentImpl::disable() {
+  if (!m_enabled) return Response::OK();
   m_session->inspector()->disableStackCapturingIfNeeded();
   m_state->setBoolean(ConsoleAgentState::consoleEnabled, false);
   m_enabled = false;
+  return Response::OK();
 }
 
-void V8ConsoleAgentImpl::clearMessages(ErrorString* errorString) {}
+Response V8ConsoleAgentImpl::clearMessages() { return Response::OK(); }
 
 void V8ConsoleAgentImpl::restore() {
   if (!m_state->booleanProperty(ConsoleAgentState::consoleEnabled, false))
     return;
-  ErrorString ignored;
-  enable(&ignored);
+  enable();
 }
 
 void V8ConsoleAgentImpl::messageAdded(V8ConsoleMessage* message) {
diff --git a/src/inspector/v8-console-agent-impl.h b/src/inspector/v8-console-agent-impl.h
index f3d598b..db17e54 100644
--- a/src/inspector/v8-console-agent-impl.h
+++ b/src/inspector/v8-console-agent-impl.h
@@ -14,7 +14,7 @@
 class V8ConsoleMessage;
 class V8InspectorSessionImpl;
 
-using protocol::ErrorString;
+using protocol::Response;
 
 class V8ConsoleAgentImpl : public protocol::Console::Backend {
  public:
@@ -22,9 +22,9 @@
                      protocol::DictionaryValue* state);
   ~V8ConsoleAgentImpl() override;
 
-  void enable(ErrorString*) override;
-  void disable(ErrorString*) override;
-  void clearMessages(ErrorString*) override;
+  Response enable() override;
+  Response disable() override;
+  Response clearMessages() override;
 
   void restore();
   void messageAdded(V8ConsoleMessage*);
diff --git a/src/inspector/v8-console.cc b/src/inspector/v8-console.cc
index ddd4bf6..fee6117 100644
--- a/src/inspector/v8-console.cc
+++ b/src/inspector/v8-console.cc
@@ -618,12 +618,11 @@
   if (!context) return;
   InjectedScript* injectedScript = context->getInjectedScript();
   if (!injectedScript) return;
-  ErrorString errorString;
-  std::unique_ptr<protocol::Runtime::RemoteObject> wrappedObject =
-      injectedScript->wrapObject(&errorString, info[0], "",
-                                 false /** forceValueType */,
-                                 false /** generatePreview */);
-  if (!wrappedObject || !errorString.isEmpty()) return;
+  std::unique_ptr<protocol::Runtime::RemoteObject> wrappedObject;
+  protocol::Response response =
+      injectedScript->wrapObject(info[0], "", false /** forceValueType */,
+                                 false /** generatePreview */, &wrappedObject);
+  if (!response.isSuccess()) return;
 
   std::unique_ptr<protocol::DictionaryValue> hints =
       protocol::DictionaryValue::create();
diff --git a/src/inspector/v8-debugger-agent-impl.cc b/src/inspector/v8-debugger-agent-impl.cc
index 80e2611..224ae28 100644
--- a/src/inspector/v8-debugger-agent-impl.cc
+++ b/src/inspector/v8-debugger-agent-impl.cc
@@ -6,6 +6,7 @@
 
 #include <algorithm>
 
+#include "src/debug/debug-interface.h"
 #include "src/inspector/injected-script.h"
 #include "src/inspector/inspected-context.h"
 #include "src/inspector/java-script-call-frame.h"
@@ -21,6 +22,7 @@
 #include "src/inspector/v8-regex.h"
 #include "src/inspector/v8-runtime-agent-impl.h"
 #include "src/inspector/v8-stack-trace-impl.h"
+#include "src/inspector/v8-value-copier.h"
 
 #include "include/v8-inspector.h"
 
@@ -52,8 +54,11 @@
 
 }  // namespace DebuggerAgentState
 
-static const int maxSkipStepFrameCount = 128;
-static const char backtraceObjectGroup[] = "backtrace";
+static const int kMaxSkipStepFrameCount = 128;
+static const char kBacktraceObjectGroup[] = "backtrace";
+static const char kDebuggerNotEnabled[] = "Debugger agent is not enabled";
+static const char kDebuggerNotPaused[] =
+    "Can only perform operation while paused.";
 
 static String16 breakpointIdSuffix(
     V8DebuggerAgentImpl::BreakpointSource source) {
@@ -71,8 +76,14 @@
 static String16 generateBreakpointId(
     const String16& scriptId, int lineNumber, int columnNumber,
     V8DebuggerAgentImpl::BreakpointSource source) {
-  return scriptId + ":" + String16::fromInteger(lineNumber) + ":" +
-         String16::fromInteger(columnNumber) + breakpointIdSuffix(source);
+  String16Builder builder;
+  builder.append(scriptId);
+  builder.append(':');
+  builder.appendNumber(lineNumber);
+  builder.append(':');
+  builder.appendNumber(columnNumber);
+  builder.append(breakpointIdSuffix(source));
+  return builder.toString();
 }
 
 static bool positionComparator(const std::pair<int, int>& a,
@@ -81,11 +92,6 @@
   return a.second < b.second;
 }
 
-static bool hasInternalError(ErrorString* errorString, bool hasError) {
-  if (hasError) *errorString = "Internal error";
-  return hasError;
-}
-
 static std::unique_ptr<protocol::Debugger::Location> buildProtocolLocation(
     const String16& scriptId, int lineNumber, int columnNumber) {
   return protocol::Debugger::Location::create()
@@ -120,13 +126,7 @@
 
 V8DebuggerAgentImpl::~V8DebuggerAgentImpl() {}
 
-bool V8DebuggerAgentImpl::checkEnabled(ErrorString* errorString) {
-  if (enabled()) return true;
-  *errorString = "Debugger agent is not enabled";
-  return false;
-}
-
-void V8DebuggerAgentImpl::enable() {
+void V8DebuggerAgentImpl::enableImpl() {
   // m_inspector->addListener may result in reporting all parsed scripts to
   // the agent so it should already be in enabled state by then.
   m_enabled = true;
@@ -145,24 +145,23 @@
 
 bool V8DebuggerAgentImpl::enabled() { return m_enabled; }
 
-void V8DebuggerAgentImpl::enable(ErrorString* errorString) {
-  if (enabled()) return;
+Response V8DebuggerAgentImpl::enable() {
+  if (enabled()) return Response::OK();
 
-  if (!m_inspector->client()->canExecuteScripts(m_session->contextGroupId())) {
-    *errorString = "Script execution is prohibited";
-    return;
-  }
+  if (!m_inspector->client()->canExecuteScripts(m_session->contextGroupId()))
+    return Response::Error("Script execution is prohibited");
 
-  enable();
+  enableImpl();
+  return Response::OK();
 }
 
-void V8DebuggerAgentImpl::disable(ErrorString*) {
-  if (!enabled()) return;
+Response V8DebuggerAgentImpl::disable() {
+  if (!enabled()) return Response::OK();
 
   m_state->setObject(DebuggerAgentState::javaScriptBreakpoints,
                      protocol::DictionaryValue::create());
   m_state->setInteger(DebuggerAgentState::pauseOnExceptionsState,
-                      V8Debugger::DontPauseOnExceptions);
+                      v8::DebugInterface::NoBreakOnException);
   m_state->setInteger(DebuggerAgentState::asyncCallStackDepth, 0);
 
   if (!m_pausedContext.IsEmpty()) m_debugger->continueProgram();
@@ -188,6 +187,7 @@
   m_state->remove(DebuggerAgentState::blackboxPattern);
   m_enabled = false;
   m_state->setBoolean(DebuggerAgentState::debuggerEnabled, false);
+  return Response::OK();
 }
 
 void V8DebuggerAgentImpl::restore() {
@@ -197,13 +197,11 @@
   if (!m_inspector->client()->canExecuteScripts(m_session->contextGroupId()))
     return;
 
-  enable();
-  ErrorString error;
+  enableImpl();
 
-  int pauseState = V8Debugger::DontPauseOnExceptions;
+  int pauseState = v8::DebugInterface::NoBreakOnException;
   m_state->getInteger(DebuggerAgentState::pauseOnExceptionsState, &pauseState);
-  setPauseOnExceptionsImpl(&error, pauseState);
-  DCHECK(error.isEmpty());
+  setPauseOnExceptionsImpl(pauseState);
 
   m_skipAllPauses =
       m_state->booleanProperty(DebuggerAgentState::skipAllPauses, false);
@@ -216,19 +214,20 @@
   String16 blackboxPattern;
   if (m_state->getString(DebuggerAgentState::blackboxPattern,
                          &blackboxPattern)) {
-    if (!setBlackboxPattern(&error, blackboxPattern)) UNREACHABLE();
+    setBlackboxPattern(blackboxPattern);
   }
 }
 
-void V8DebuggerAgentImpl::setBreakpointsActive(ErrorString* errorString,
-                                               bool active) {
-  if (!checkEnabled(errorString)) return;
+Response V8DebuggerAgentImpl::setBreakpointsActive(bool active) {
+  if (!enabled()) return Response::Error(kDebuggerNotEnabled);
   m_debugger->setBreakpointsActivated(active);
+  return Response::OK();
 }
 
-void V8DebuggerAgentImpl::setSkipAllPauses(ErrorString*, bool skip) {
+Response V8DebuggerAgentImpl::setSkipAllPauses(bool skip) {
   m_skipAllPauses = skip;
   m_state->setBoolean(DebuggerAgentState::skipAllPauses, m_skipAllPauses);
+  return Response::OK();
 }
 
 static std::unique_ptr<protocol::DictionaryValue>
@@ -254,27 +253,21 @@
   return url == pattern;
 }
 
-void V8DebuggerAgentImpl::setBreakpointByUrl(
-    ErrorString* errorString, int lineNumber,
-    const Maybe<String16>& optionalURL, const Maybe<String16>& optionalURLRegex,
-    const Maybe<int>& optionalColumnNumber,
-    const Maybe<String16>& optionalCondition, String16* outBreakpointId,
+Response V8DebuggerAgentImpl::setBreakpointByUrl(
+    int lineNumber, Maybe<String16> optionalURL,
+    Maybe<String16> optionalURLRegex, Maybe<int> optionalColumnNumber,
+    Maybe<String16> optionalCondition, String16* outBreakpointId,
     std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations) {
   *locations = Array<protocol::Debugger::Location>::create();
-  if (optionalURL.isJust() == optionalURLRegex.isJust()) {
-    *errorString = "Either url or urlRegex must be specified.";
-    return;
-  }
+  if (optionalURL.isJust() == optionalURLRegex.isJust())
+    return Response::Error("Either url or urlRegex must be specified.");
 
   String16 url = optionalURL.isJust() ? optionalURL.fromJust()
                                       : optionalURLRegex.fromJust();
   int columnNumber = 0;
   if (optionalColumnNumber.isJust()) {
     columnNumber = optionalColumnNumber.fromJust();
-    if (columnNumber < 0) {
-      *errorString = "Incorrect column number";
-      return;
-    }
+    if (columnNumber < 0) return Response::Error("Incorrect column number");
   }
   String16 condition = optionalCondition.fromMaybe("");
   bool isRegex = optionalURLRegex.isJust();
@@ -291,10 +284,8 @@
     m_state->setObject(DebuggerAgentState::javaScriptBreakpoints,
                        std::move(newValue));
   }
-  if (breakpointsCookie->get(breakpointId)) {
-    *errorString = "Breakpoint at specified location already exists.";
-    return;
-  }
+  if (breakpointsCookie->get(breakpointId))
+    return Response::Error("Breakpoint at specified location already exists.");
 
   breakpointsCookie->setObject(
       breakpointId, buildObjectForBreakpointCookie(
@@ -310,30 +301,16 @@
   }
 
   *outBreakpointId = breakpointId;
+  return Response::OK();
 }
 
-static bool parseLocation(
-    ErrorString* errorString,
-    std::unique_ptr<protocol::Debugger::Location> location, String16* scriptId,
-    int* lineNumber, int* columnNumber) {
-  *scriptId = location->getScriptId();
-  *lineNumber = location->getLineNumber();
-  *columnNumber = location->getColumnNumber(0);
-  return true;
-}
-
-void V8DebuggerAgentImpl::setBreakpoint(
-    ErrorString* errorString,
+Response V8DebuggerAgentImpl::setBreakpoint(
     std::unique_ptr<protocol::Debugger::Location> location,
-    const Maybe<String16>& optionalCondition, String16* outBreakpointId,
+    Maybe<String16> optionalCondition, String16* outBreakpointId,
     std::unique_ptr<protocol::Debugger::Location>* actualLocation) {
-  String16 scriptId;
-  int lineNumber;
-  int columnNumber;
-
-  if (!parseLocation(errorString, std::move(location), &scriptId, &lineNumber,
-                     &columnNumber))
-    return;
+  String16 scriptId = location->getScriptId();
+  int lineNumber = location->getLineNumber();
+  int columnNumber = location->getColumnNumber(0);
 
   String16 condition = optionalCondition.fromMaybe("");
 
@@ -341,28 +318,26 @@
       scriptId, lineNumber, columnNumber, UserBreakpointSource);
   if (m_breakpointIdToDebuggerBreakpointIds.find(breakpointId) !=
       m_breakpointIdToDebuggerBreakpointIds.end()) {
-    *errorString = "Breakpoint at specified location already exists.";
-    return;
+    return Response::Error("Breakpoint at specified location already exists.");
   }
   ScriptBreakpoint breakpoint(lineNumber, columnNumber, condition);
   *actualLocation = resolveBreakpoint(breakpointId, scriptId, breakpoint,
                                       UserBreakpointSource);
-  if (*actualLocation)
-    *outBreakpointId = breakpointId;
-  else
-    *errorString = "Could not resolve breakpoint";
+  if (!*actualLocation) return Response::Error("Could not resolve breakpoint");
+  *outBreakpointId = breakpointId;
+  return Response::OK();
 }
 
-void V8DebuggerAgentImpl::removeBreakpoint(ErrorString* errorString,
-                                           const String16& breakpointId) {
-  if (!checkEnabled(errorString)) return;
+Response V8DebuggerAgentImpl::removeBreakpoint(const String16& breakpointId) {
+  if (!enabled()) return Response::Error(kDebuggerNotEnabled);
   protocol::DictionaryValue* breakpointsCookie =
       m_state->getObject(DebuggerAgentState::javaScriptBreakpoints);
   if (breakpointsCookie) breakpointsCookie->remove(breakpointId);
-  removeBreakpoint(breakpointId);
+  removeBreakpointImpl(breakpointId);
+  return Response::OK();
 }
 
-void V8DebuggerAgentImpl::removeBreakpoint(const String16& breakpointId) {
+void V8DebuggerAgentImpl::removeBreakpointImpl(const String16& breakpointId) {
   DCHECK(enabled());
   BreakpointIdToDebuggerBreakpointIdsMap::iterator
       debuggerBreakpointIdsIterator =
@@ -380,27 +355,64 @@
   m_breakpointIdToDebuggerBreakpointIds.erase(breakpointId);
 }
 
-void V8DebuggerAgentImpl::continueToLocation(
-    ErrorString* errorString,
+Response V8DebuggerAgentImpl::getPossibleBreakpoints(
+    std::unique_ptr<protocol::Debugger::Location> start,
+    Maybe<protocol::Debugger::Location> end,
+    std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations) {
+  String16 scriptId = start->getScriptId();
+
+  if (start->getLineNumber() < 0 || start->getColumnNumber(0) < 0)
+    return Response::Error(
+        "start.lineNumber and start.columnNumber should be >= 0");
+
+  v8::DebugInterface::Location v8Start(start->getLineNumber(),
+                                       start->getColumnNumber(0));
+  v8::DebugInterface::Location v8End;
+  if (end.isJust()) {
+    if (end.fromJust()->getScriptId() != scriptId)
+      return Response::Error("Locations should contain the same scriptId");
+    int line = end.fromJust()->getLineNumber();
+    int column = end.fromJust()->getColumnNumber(0);
+    if (line < 0 || column < 0)
+      return Response::Error(
+          "end.lineNumber and end.columnNumber should be >= 0");
+    v8End = v8::DebugInterface::Location(line, column);
+  }
+  auto it = m_scripts.find(scriptId);
+  if (it == m_scripts.end()) return Response::Error("Script not found");
+
+  std::vector<v8::DebugInterface::Location> v8Locations;
+  if (!it->second->getPossibleBreakpoints(v8Start, v8End, &v8Locations))
+    return Response::InternalError();
+
+  *locations = protocol::Array<protocol::Debugger::Location>::create();
+  for (size_t i = 0; i < v8Locations.size(); ++i) {
+    (*locations)
+        ->addItem(protocol::Debugger::Location::create()
+                      .setScriptId(scriptId)
+                      .setLineNumber(v8Locations[i].GetLineNumber())
+                      .setColumnNumber(v8Locations[i].GetColumnNumber())
+                      .build());
+  }
+  return Response::OK();
+}
+
+Response V8DebuggerAgentImpl::continueToLocation(
     std::unique_ptr<protocol::Debugger::Location> location) {
-  if (!checkEnabled(errorString)) return;
+  if (!enabled()) return Response::Error(kDebuggerNotEnabled);
   if (!m_continueToLocationBreakpointId.isEmpty()) {
     m_debugger->removeBreakpoint(m_continueToLocationBreakpointId);
     m_continueToLocationBreakpointId = "";
   }
 
-  String16 scriptId;
-  int lineNumber;
-  int columnNumber;
-
-  if (!parseLocation(errorString, std::move(location), &scriptId, &lineNumber,
-                     &columnNumber))
-    return;
+  String16 scriptId = location->getScriptId();
+  int lineNumber = location->getLineNumber();
+  int columnNumber = location->getColumnNumber(0);
 
   ScriptBreakpoint breakpoint(lineNumber, columnNumber, "");
   m_continueToLocationBreakpointId = m_debugger->setBreakpoint(
       scriptId, breakpoint, &lineNumber, &columnNumber);
-  resume(errorString);
+  return resume();
 }
 
 bool V8DebuggerAgentImpl::isCurrentCallStackEmptyOrBlackboxed() {
@@ -471,7 +483,7 @@
   if (!isCallFrameWithUnknownScriptOrBlackboxed(topCallFrame))
     return RequestNoSkip;
 
-  if (m_skippedStepFrameCount >= maxSkipStepFrameCount) return RequestStepOut;
+  if (m_skippedStepFrameCount >= kMaxSkipStepFrameCount) return RequestStepOut;
 
   if (!m_skippedStepFrameCount) m_recursionLevelForStepFrame = 1;
 
@@ -509,17 +521,14 @@
   return buildProtocolLocation(scriptId, actualLineNumber, actualColumnNumber);
 }
 
-void V8DebuggerAgentImpl::searchInContent(
-    ErrorString* error, const String16& scriptId, const String16& query,
-    const Maybe<bool>& optionalCaseSensitive,
-    const Maybe<bool>& optionalIsRegex,
+Response V8DebuggerAgentImpl::searchInContent(
+    const String16& scriptId, const String16& query,
+    Maybe<bool> optionalCaseSensitive, Maybe<bool> optionalIsRegex,
     std::unique_ptr<Array<protocol::Debugger::SearchMatch>>* results) {
   v8::HandleScope handles(m_isolate);
   ScriptsMap::iterator it = m_scripts.find(scriptId);
-  if (it == m_scripts.end()) {
-    *error = String16("No script for id: " + scriptId);
-    return;
-  }
+  if (it == m_scripts.end())
+    return Response::Error("No script for id: " + scriptId);
 
   std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>> matches =
       searchInTextByLinesImpl(m_session,
@@ -529,44 +538,46 @@
   *results = protocol::Array<protocol::Debugger::SearchMatch>::create();
   for (size_t i = 0; i < matches.size(); ++i)
     (*results)->addItem(std::move(matches[i]));
+  return Response::OK();
 }
 
-void V8DebuggerAgentImpl::setScriptSource(
-    ErrorString* errorString, const String16& scriptId,
-    const String16& newContent, const Maybe<bool>& dryRun,
+Response V8DebuggerAgentImpl::setScriptSource(
+    const String16& scriptId, const String16& newContent, Maybe<bool> dryRun,
     Maybe<protocol::Array<protocol::Debugger::CallFrame>>* newCallFrames,
     Maybe<bool>* stackChanged, Maybe<StackTrace>* asyncStackTrace,
     Maybe<protocol::Runtime::ExceptionDetails>* optOutCompileError) {
-  if (!checkEnabled(errorString)) return;
+  if (!enabled()) return Response::Error(kDebuggerNotEnabled);
 
   v8::HandleScope handles(m_isolate);
   v8::Local<v8::String> newSource = toV8String(m_isolate, newContent);
-  if (!m_debugger->setScriptSource(scriptId, newSource, dryRun.fromMaybe(false),
-                                   errorString, optOutCompileError,
-                                   &m_pausedCallFrames, stackChanged))
-    return;
+  bool compileError = false;
+  Response response = m_debugger->setScriptSource(
+      scriptId, newSource, dryRun.fromMaybe(false), optOutCompileError,
+      &m_pausedCallFrames, stackChanged, &compileError);
+  if (!response.isSuccess() || compileError) return response;
 
   ScriptsMap::iterator it = m_scripts.find(scriptId);
-  if (it != m_scripts.end()) it->second->setSource(m_isolate, newSource);
+  if (it != m_scripts.end()) it->second->setSource(newSource);
 
-  std::unique_ptr<Array<CallFrame>> callFrames = currentCallFrames(errorString);
-  if (!callFrames) return;
+  std::unique_ptr<Array<CallFrame>> callFrames;
+  response = currentCallFrames(&callFrames);
+  if (!response.isSuccess()) return response;
   *newCallFrames = std::move(callFrames);
   *asyncStackTrace = currentAsyncStackTrace();
+  return Response::OK();
 }
 
-void V8DebuggerAgentImpl::restartFrame(
-    ErrorString* errorString, const String16& callFrameId,
+Response V8DebuggerAgentImpl::restartFrame(
+    const String16& callFrameId,
     std::unique_ptr<Array<CallFrame>>* newCallFrames,
     Maybe<StackTrace>* asyncStackTrace) {
-  if (!assertPaused(errorString)) return;
-  InjectedScript::CallFrameScope scope(
-      errorString, m_inspector, m_session->contextGroupId(), callFrameId);
-  if (!scope.initialize()) return;
-  if (scope.frameOrdinal() >= m_pausedCallFrames.size()) {
-    *errorString = "Could not find call frame with given id";
-    return;
-  }
+  if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+  InjectedScript::CallFrameScope scope(m_inspector, m_session->contextGroupId(),
+                                       callFrameId);
+  Response response = scope.initialize();
+  if (!response.isSuccess()) return response;
+  if (scope.frameOrdinal() >= m_pausedCallFrames.size())
+    return Response::Error("Could not find call frame with given id");
 
   v8::Local<v8::Value> resultValue;
   v8::Local<v8::Boolean> result;
@@ -575,28 +586,26 @@
       scope.tryCatch().HasCaught() ||
       !resultValue->ToBoolean(scope.context()).ToLocal(&result) ||
       !result->Value()) {
-    *errorString = "Internal error";
-    return;
+    return Response::InternalError();
   }
   JavaScriptCallFrames frames = m_debugger->currentCallFrames();
   m_pausedCallFrames.swap(frames);
 
-  *newCallFrames = currentCallFrames(errorString);
-  if (!*newCallFrames) return;
+  response = currentCallFrames(newCallFrames);
+  if (!response.isSuccess()) return response;
   *asyncStackTrace = currentAsyncStackTrace();
+  return Response::OK();
 }
 
-void V8DebuggerAgentImpl::getScriptSource(ErrorString* error,
-                                          const String16& scriptId,
-                                          String16* scriptSource) {
-  if (!checkEnabled(error)) return;
+Response V8DebuggerAgentImpl::getScriptSource(const String16& scriptId,
+                                              String16* scriptSource) {
+  if (!enabled()) return Response::Error(kDebuggerNotEnabled);
   ScriptsMap::iterator it = m_scripts.find(scriptId);
-  if (it == m_scripts.end()) {
-    *error = "No script for id: " + scriptId;
-    return;
-  }
+  if (it == m_scripts.end())
+    return Response::Error("No script for id: " + scriptId);
   v8::HandleScope handles(m_isolate);
   *scriptSource = toProtocolString(it->second->source(m_isolate));
+  return Response::OK();
 }
 
 void V8DebuggerAgentImpl::schedulePauseOnNextStatement(
@@ -632,103 +641,100 @@
   m_debugger->setPauseOnNextStatement(false);
 }
 
-void V8DebuggerAgentImpl::pause(ErrorString* errorString) {
-  if (!checkEnabled(errorString)) return;
-  if (m_javaScriptPauseScheduled || m_debugger->isPaused()) return;
+Response V8DebuggerAgentImpl::pause() {
+  if (!enabled()) return Response::Error(kDebuggerNotEnabled);
+  if (m_javaScriptPauseScheduled || m_debugger->isPaused())
+    return Response::OK();
   clearBreakDetails();
   m_javaScriptPauseScheduled = true;
   m_scheduledDebuggerStep = NoStep;
   m_skippedStepFrameCount = 0;
   m_steppingFromFramework = false;
   m_debugger->setPauseOnNextStatement(true);
+  return Response::OK();
 }
 
-void V8DebuggerAgentImpl::resume(ErrorString* errorString) {
-  if (!assertPaused(errorString)) return;
+Response V8DebuggerAgentImpl::resume() {
+  if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
   m_scheduledDebuggerStep = NoStep;
   m_steppingFromFramework = false;
-  m_session->releaseObjectGroup(backtraceObjectGroup);
+  m_session->releaseObjectGroup(kBacktraceObjectGroup);
   m_debugger->continueProgram();
+  return Response::OK();
 }
 
-void V8DebuggerAgentImpl::stepOver(ErrorString* errorString) {
-  if (!assertPaused(errorString)) return;
+Response V8DebuggerAgentImpl::stepOver() {
+  if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
   // StepOver at function return point should fallback to StepInto.
   JavaScriptCallFrame* frame =
       !m_pausedCallFrames.empty() ? m_pausedCallFrames[0].get() : nullptr;
-  if (frame && frame->isAtReturn()) {
-    stepInto(errorString);
-    return;
-  }
+  if (frame && frame->isAtReturn()) return stepInto();
   m_scheduledDebuggerStep = StepOver;
   m_steppingFromFramework = isTopPausedCallFrameBlackboxed();
-  m_session->releaseObjectGroup(backtraceObjectGroup);
+  m_session->releaseObjectGroup(kBacktraceObjectGroup);
   m_debugger->stepOverStatement();
+  return Response::OK();
 }
 
-void V8DebuggerAgentImpl::stepInto(ErrorString* errorString) {
-  if (!assertPaused(errorString)) return;
+Response V8DebuggerAgentImpl::stepInto() {
+  if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
   m_scheduledDebuggerStep = StepInto;
   m_steppingFromFramework = isTopPausedCallFrameBlackboxed();
-  m_session->releaseObjectGroup(backtraceObjectGroup);
+  m_session->releaseObjectGroup(kBacktraceObjectGroup);
   m_debugger->stepIntoStatement();
+  return Response::OK();
 }
 
-void V8DebuggerAgentImpl::stepOut(ErrorString* errorString) {
-  if (!assertPaused(errorString)) return;
+Response V8DebuggerAgentImpl::stepOut() {
+  if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
   m_scheduledDebuggerStep = StepOut;
   m_skipNextDebuggerStepOut = false;
   m_recursionLevelForStepOut = 1;
   m_steppingFromFramework = isTopPausedCallFrameBlackboxed();
-  m_session->releaseObjectGroup(backtraceObjectGroup);
+  m_session->releaseObjectGroup(kBacktraceObjectGroup);
   m_debugger->stepOutOfFunction();
+  return Response::OK();
 }
 
-void V8DebuggerAgentImpl::setPauseOnExceptions(
-    ErrorString* errorString, const String16& stringPauseState) {
-  if (!checkEnabled(errorString)) return;
-  V8Debugger::PauseOnExceptionsState pauseState;
+Response V8DebuggerAgentImpl::setPauseOnExceptions(
+    const String16& stringPauseState) {
+  if (!enabled()) return Response::Error(kDebuggerNotEnabled);
+  v8::DebugInterface::ExceptionBreakState pauseState;
   if (stringPauseState == "none") {
-    pauseState = V8Debugger::DontPauseOnExceptions;
+    pauseState = v8::DebugInterface::NoBreakOnException;
   } else if (stringPauseState == "all") {
-    pauseState = V8Debugger::PauseOnAllExceptions;
+    pauseState = v8::DebugInterface::BreakOnAnyException;
   } else if (stringPauseState == "uncaught") {
-    pauseState = V8Debugger::PauseOnUncaughtExceptions;
+    pauseState = v8::DebugInterface::BreakOnUncaughtException;
   } else {
-    *errorString = "Unknown pause on exceptions mode: " + stringPauseState;
-    return;
+    return Response::Error("Unknown pause on exceptions mode: " +
+                           stringPauseState);
   }
-  setPauseOnExceptionsImpl(errorString, pauseState);
+  setPauseOnExceptionsImpl(pauseState);
+  return Response::OK();
 }
 
-void V8DebuggerAgentImpl::setPauseOnExceptionsImpl(ErrorString* errorString,
-                                                   int pauseState) {
+void V8DebuggerAgentImpl::setPauseOnExceptionsImpl(int pauseState) {
   m_debugger->setPauseOnExceptionsState(
-      static_cast<V8Debugger::PauseOnExceptionsState>(pauseState));
-  if (m_debugger->getPauseOnExceptionsState() != pauseState)
-    *errorString = "Internal error. Could not change pause on exceptions state";
-  else
-    m_state->setInteger(DebuggerAgentState::pauseOnExceptionsState, pauseState);
+      static_cast<v8::DebugInterface::ExceptionBreakState>(pauseState));
+  m_state->setInteger(DebuggerAgentState::pauseOnExceptionsState, pauseState);
 }
 
-void V8DebuggerAgentImpl::evaluateOnCallFrame(
-    ErrorString* errorString, const String16& callFrameId,
-    const String16& expression, const Maybe<String16>& objectGroup,
-    const Maybe<bool>& includeCommandLineAPI, const Maybe<bool>& silent,
-    const Maybe<bool>& returnByValue, const Maybe<bool>& generatePreview,
+Response V8DebuggerAgentImpl::evaluateOnCallFrame(
+    const String16& callFrameId, const String16& expression,
+    Maybe<String16> objectGroup, Maybe<bool> includeCommandLineAPI,
+    Maybe<bool> silent, Maybe<bool> returnByValue, Maybe<bool> generatePreview,
     std::unique_ptr<RemoteObject>* result,
     Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
-  if (!assertPaused(errorString)) return;
-  InjectedScript::CallFrameScope scope(
-      errorString, m_inspector, m_session->contextGroupId(), callFrameId);
-  if (!scope.initialize()) return;
-  if (scope.frameOrdinal() >= m_pausedCallFrames.size()) {
-    *errorString = "Could not find call frame with given id";
-    return;
-  }
+  if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+  InjectedScript::CallFrameScope scope(m_inspector, m_session->contextGroupId(),
+                                       callFrameId);
+  Response response = scope.initialize();
+  if (!response.isSuccess()) return response;
+  if (scope.frameOrdinal() >= m_pausedCallFrames.size())
+    return Response::Error("Could not find call frame with given id");
 
-  if (includeCommandLineAPI.fromMaybe(false) && !scope.installCommandLineAPI())
-    return;
+  if (includeCommandLineAPI.fromMaybe(false)) scope.installCommandLineAPI();
   if (silent.fromMaybe(false)) scope.ignoreExceptionsAndMuteConsole();
 
   v8::MaybeLocal<v8::Value> maybeResultValue =
@@ -737,56 +743,52 @@
 
   // Re-initialize after running client's code, as it could have destroyed
   // context or session.
-  if (!scope.initialize()) return;
-  scope.injectedScript()->wrapEvaluateResult(
-      errorString, maybeResultValue, scope.tryCatch(),
-      objectGroup.fromMaybe(""), returnByValue.fromMaybe(false),
-      generatePreview.fromMaybe(false), result, exceptionDetails);
+  response = scope.initialize();
+  if (!response.isSuccess()) return response;
+  return scope.injectedScript()->wrapEvaluateResult(
+      maybeResultValue, scope.tryCatch(), objectGroup.fromMaybe(""),
+      returnByValue.fromMaybe(false), generatePreview.fromMaybe(false), result,
+      exceptionDetails);
 }
 
-void V8DebuggerAgentImpl::setVariableValue(
-    ErrorString* errorString, int scopeNumber, const String16& variableName,
+Response V8DebuggerAgentImpl::setVariableValue(
+    int scopeNumber, const String16& variableName,
     std::unique_ptr<protocol::Runtime::CallArgument> newValueArgument,
     const String16& callFrameId) {
-  if (!checkEnabled(errorString)) return;
-  if (!assertPaused(errorString)) return;
-  InjectedScript::CallFrameScope scope(
-      errorString, m_inspector, m_session->contextGroupId(), callFrameId);
-  if (!scope.initialize()) return;
-
+  if (!enabled()) return Response::Error(kDebuggerNotEnabled);
+  if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+  InjectedScript::CallFrameScope scope(m_inspector, m_session->contextGroupId(),
+                                       callFrameId);
+  Response response = scope.initialize();
+  if (!response.isSuccess()) return response;
   v8::Local<v8::Value> newValue;
-  if (!scope.injectedScript()
-           ->resolveCallArgument(errorString, newValueArgument.get())
-           .ToLocal(&newValue))
-    return;
+  response = scope.injectedScript()->resolveCallArgument(newValueArgument.get(),
+                                                         &newValue);
+  if (!response.isSuccess()) return response;
 
-  if (scope.frameOrdinal() >= m_pausedCallFrames.size()) {
-    *errorString = "Could not find call frame with given id";
-    return;
-  }
+  if (scope.frameOrdinal() >= m_pausedCallFrames.size())
+    return Response::Error("Could not find call frame with given id");
   v8::MaybeLocal<v8::Value> result =
       m_pausedCallFrames[scope.frameOrdinal()]->setVariableValue(
           scopeNumber, toV8String(m_isolate, variableName), newValue);
-  if (scope.tryCatch().HasCaught() || result.IsEmpty()) {
-    *errorString = "Internal error";
-    return;
-  }
+  if (scope.tryCatch().HasCaught() || result.IsEmpty())
+    return Response::InternalError();
+  return Response::OK();
 }
 
-void V8DebuggerAgentImpl::setAsyncCallStackDepth(ErrorString* errorString,
-                                                 int depth) {
-  if (!checkEnabled(errorString)) return;
+Response V8DebuggerAgentImpl::setAsyncCallStackDepth(int depth) {
+  if (!enabled()) return Response::Error(kDebuggerNotEnabled);
   m_state->setInteger(DebuggerAgentState::asyncCallStackDepth, depth);
   m_debugger->setAsyncCallStackDepth(this, depth);
+  return Response::OK();
 }
 
-void V8DebuggerAgentImpl::setBlackboxPatterns(
-    ErrorString* errorString,
+Response V8DebuggerAgentImpl::setBlackboxPatterns(
     std::unique_ptr<protocol::Array<String16>> patterns) {
   if (!patterns->length()) {
     m_blackboxPattern = nullptr;
     m_state->remove(DebuggerAgentState::blackboxPattern);
-    return;
+    return Response::OK();
   }
 
   String16Builder patternBuilder;
@@ -798,48 +800,41 @@
   patternBuilder.append(patterns->get(patterns->length() - 1));
   patternBuilder.append(')');
   String16 pattern = patternBuilder.toString();
-  if (!setBlackboxPattern(errorString, pattern)) return;
+  Response response = setBlackboxPattern(pattern);
+  if (!response.isSuccess()) return response;
   m_state->setString(DebuggerAgentState::blackboxPattern, pattern);
+  return Response::OK();
 }
 
-bool V8DebuggerAgentImpl::setBlackboxPattern(ErrorString* errorString,
-                                             const String16& pattern) {
+Response V8DebuggerAgentImpl::setBlackboxPattern(const String16& pattern) {
   std::unique_ptr<V8Regex> regex(new V8Regex(
       m_inspector, pattern, true /** caseSensitive */, false /** multiline */));
-  if (!regex->isValid()) {
-    *errorString = "Pattern parser error: " + regex->errorMessage();
-    return false;
-  }
+  if (!regex->isValid())
+    return Response::Error("Pattern parser error: " + regex->errorMessage());
   m_blackboxPattern = std::move(regex);
-  return true;
+  return Response::OK();
 }
 
-void V8DebuggerAgentImpl::setBlackboxedRanges(
-    ErrorString* error, const String16& scriptId,
+Response V8DebuggerAgentImpl::setBlackboxedRanges(
+    const String16& scriptId,
     std::unique_ptr<protocol::Array<protocol::Debugger::ScriptPosition>>
         inPositions) {
-  if (m_scripts.find(scriptId) == m_scripts.end()) {
-    *error = "No script with passed id.";
-    return;
-  }
+  if (m_scripts.find(scriptId) == m_scripts.end())
+    return Response::Error("No script with passed id.");
 
   if (!inPositions->length()) {
     m_blackboxedPositions.erase(scriptId);
-    return;
+    return Response::OK();
   }
 
   std::vector<std::pair<int, int>> positions;
   positions.reserve(inPositions->length());
   for (size_t i = 0; i < inPositions->length(); ++i) {
     protocol::Debugger::ScriptPosition* position = inPositions->get(i);
-    if (position->getLineNumber() < 0) {
-      *error = "Position missing 'line' or 'line' < 0.";
-      return;
-    }
-    if (position->getColumnNumber() < 0) {
-      *error = "Position missing 'column' or 'column' < 0.";
-      return;
-    }
+    if (position->getLineNumber() < 0)
+      return Response::Error("Position missing 'line' or 'line' < 0.");
+    if (position->getColumnNumber() < 0)
+      return Response::Error("Position missing 'column' or 'column' < 0.");
     positions.push_back(
         std::make_pair(position->getLineNumber(), position->getColumnNumber()));
   }
@@ -849,12 +844,12 @@
     if (positions[i - 1].first == positions[i].first &&
         positions[i - 1].second < positions[i].second)
       continue;
-    *error =
-        "Input positions array is not sorted or contains duplicate values.";
-    return;
+    return Response::Error(
+        "Input positions array is not sorted or contains duplicate values.");
   }
 
   m_blackboxedPositions[scriptId] = positions;
+  return Response::OK();
 }
 
 void V8DebuggerAgentImpl::willExecuteScript(int scriptId) {
@@ -907,14 +902,15 @@
   }
 }
 
-std::unique_ptr<Array<CallFrame>> V8DebuggerAgentImpl::currentCallFrames(
-    ErrorString* errorString) {
-  if (m_pausedContext.IsEmpty() || !m_pausedCallFrames.size())
-    return Array<CallFrame>::create();
-  ErrorString ignored;
+Response V8DebuggerAgentImpl::currentCallFrames(
+    std::unique_ptr<Array<CallFrame>>* result) {
+  if (m_pausedContext.IsEmpty() || !m_pausedCallFrames.size()) {
+    *result = Array<CallFrame>::create();
+    return Response::OK();
+  }
   v8::HandleScope handles(m_isolate);
   v8::Local<v8::Context> debuggerContext =
-      v8::Debug::GetDebugContext(m_isolate);
+      v8::DebugInterface::GetDebugContext(m_isolate);
   v8::Context::Scope contextScope(debuggerContext);
 
   v8::Local<v8::Array> objects = v8::Array::New(m_isolate);
@@ -925,104 +921,92 @@
         m_pausedCallFrames[frameOrdinal];
 
     v8::Local<v8::Object> details = currentCallFrame->details();
-    if (hasInternalError(errorString, details.IsEmpty()))
-      return Array<CallFrame>::create();
+    if (details.IsEmpty()) return Response::InternalError();
 
     int contextId = currentCallFrame->contextId();
-    InjectedScript* injectedScript =
-        contextId ? m_session->findInjectedScript(&ignored, contextId)
-                  : nullptr;
+
+    InjectedScript* injectedScript = nullptr;
+    if (contextId) m_session->findInjectedScript(contextId, injectedScript);
 
     String16 callFrameId =
         RemoteCallFrameId::serialize(contextId, static_cast<int>(frameOrdinal));
-    if (hasInternalError(
-            errorString,
-            !details
-                 ->Set(debuggerContext,
-                       toV8StringInternalized(m_isolate, "callFrameId"),
-                       toV8String(m_isolate, callFrameId))
-                 .FromMaybe(false)))
-      return Array<CallFrame>::create();
+    if (!details
+             ->Set(debuggerContext,
+                   toV8StringInternalized(m_isolate, "callFrameId"),
+                   toV8String(m_isolate, callFrameId))
+             .FromMaybe(false)) {
+      return Response::InternalError();
+    }
 
     if (injectedScript) {
       v8::Local<v8::Value> scopeChain;
-      if (hasInternalError(
-              errorString,
-              !details->Get(debuggerContext,
-                            toV8StringInternalized(m_isolate, "scopeChain"))
-                      .ToLocal(&scopeChain) ||
-                  !scopeChain->IsArray()))
-        return Array<CallFrame>::create();
+      if (!details
+               ->Get(debuggerContext,
+                     toV8StringInternalized(m_isolate, "scopeChain"))
+               .ToLocal(&scopeChain) ||
+          !scopeChain->IsArray()) {
+        return Response::InternalError();
+      }
       v8::Local<v8::Array> scopeChainArray = scopeChain.As<v8::Array>();
-      if (!injectedScript->wrapPropertyInArray(
-              errorString, scopeChainArray,
-              toV8StringInternalized(m_isolate, "object"),
-              backtraceObjectGroup))
-        return Array<CallFrame>::create();
-      if (!injectedScript->wrapObjectProperty(
-              errorString, details, toV8StringInternalized(m_isolate, "this"),
-              backtraceObjectGroup))
-        return Array<CallFrame>::create();
+      Response response = injectedScript->wrapPropertyInArray(
+          scopeChainArray, toV8StringInternalized(m_isolate, "object"),
+          kBacktraceObjectGroup);
+      if (!response.isSuccess()) return response;
+      response = injectedScript->wrapObjectProperty(
+          details, toV8StringInternalized(m_isolate, "this"),
+          kBacktraceObjectGroup);
+      if (!response.isSuccess()) return response;
       if (details
               ->Has(debuggerContext,
                     toV8StringInternalized(m_isolate, "returnValue"))
               .FromMaybe(false)) {
-        if (!injectedScript->wrapObjectProperty(
-                errorString, details,
-                toV8StringInternalized(m_isolate, "returnValue"),
-                backtraceObjectGroup))
-          return Array<CallFrame>::create();
+        response = injectedScript->wrapObjectProperty(
+            details, toV8StringInternalized(m_isolate, "returnValue"),
+            kBacktraceObjectGroup);
+        if (!response.isSuccess()) return response;
       }
     } else {
-      if (hasInternalError(errorString, !details
-                                             ->Set(debuggerContext,
-                                                   toV8StringInternalized(
-                                                       m_isolate, "scopeChain"),
-                                                   v8::Array::New(m_isolate, 0))
-                                             .FromMaybe(false)))
-        return Array<CallFrame>::create();
+      if (!details
+               ->Set(debuggerContext,
+                     toV8StringInternalized(m_isolate, "scopeChain"),
+                     v8::Array::New(m_isolate, 0))
+               .FromMaybe(false)) {
+        return Response::InternalError();
+      }
       v8::Local<v8::Object> remoteObject = v8::Object::New(m_isolate);
-      if (hasInternalError(
-              errorString,
-              !remoteObject
-                   ->Set(debuggerContext,
-                         toV8StringInternalized(m_isolate, "type"),
-                         toV8StringInternalized(m_isolate, "undefined"))
-                   .FromMaybe(false)))
-        return Array<CallFrame>::create();
-      if (hasInternalError(errorString,
-                           !details
-                                ->Set(debuggerContext,
-                                      toV8StringInternalized(m_isolate, "this"),
-                                      remoteObject)
-                                .FromMaybe(false)))
-        return Array<CallFrame>::create();
-      if (hasInternalError(
-              errorString,
-              !details
-                   ->Delete(debuggerContext,
-                            toV8StringInternalized(m_isolate, "returnValue"))
-                   .FromMaybe(false)))
-        return Array<CallFrame>::create();
+      if (!remoteObject
+               ->Set(debuggerContext, toV8StringInternalized(m_isolate, "type"),
+                     toV8StringInternalized(m_isolate, "undefined"))
+               .FromMaybe(false)) {
+        return Response::InternalError();
+      }
+      if (!details
+               ->Set(debuggerContext, toV8StringInternalized(m_isolate, "this"),
+                     remoteObject)
+               .FromMaybe(false)) {
+        return Response::InternalError();
+      }
+      if (!details
+               ->Delete(debuggerContext,
+                        toV8StringInternalized(m_isolate, "returnValue"))
+               .FromMaybe(false)) {
+        return Response::InternalError();
+      }
     }
 
-    if (hasInternalError(
-            errorString,
-            !objects
-                 ->Set(debuggerContext, static_cast<int>(frameOrdinal), details)
-                 .FromMaybe(false)))
-      return Array<CallFrame>::create();
+    if (!objects->Set(debuggerContext, static_cast<int>(frameOrdinal), details)
+             .FromMaybe(false)) {
+      return Response::InternalError();
+    }
   }
 
-  std::unique_ptr<protocol::Value> protocolValue =
-      toProtocolValue(errorString, debuggerContext, objects);
-  if (!protocolValue) return Array<CallFrame>::create();
+  std::unique_ptr<protocol::Value> protocolValue;
+  Response response = toProtocolValue(debuggerContext, objects, &protocolValue);
+  if (!response.isSuccess()) return response;
   protocol::ErrorSupport errorSupport;
-  std::unique_ptr<Array<CallFrame>> callFrames =
-      Array<CallFrame>::parse(protocolValue.get(), &errorSupport);
-  if (hasInternalError(errorString, !callFrames))
-    return Array<CallFrame>::create();
-  return callFrames;
+  *result = Array<CallFrame>::parse(protocolValue.get(), &errorSupport);
+  if (!*result) return Response::Error(errorSupport.errors());
+  return Response::OK();
 }
 
 std::unique_ptr<StackTrace> V8DebuggerAgentImpl::currentAsyncStackTrace() {
@@ -1049,8 +1033,8 @@
   String16 scriptId = script->scriptId();
   String16 scriptURL = script->sourceURL();
 
-  const Maybe<String16>& sourceMapURLParam = script->sourceMappingURL();
-  const Maybe<protocol::DictionaryValue>& executionContextAuxDataParam(
+  Maybe<String16> sourceMapURLParam = script->sourceMappingURL();
+  Maybe<protocol::DictionaryValue> executionContextAuxDataParam(
       std::move(executionContextAuxData));
   const bool* isLiveEditParam = isLiveEdit ? &isLiveEdit : nullptr;
   const bool* hasSourceURLParam = hasSourceURL ? &hasSourceURL : nullptr;
@@ -1058,14 +1042,14 @@
     m_frontend.scriptParsed(
         scriptId, scriptURL, script->startLine(), script->startColumn(),
         script->endLine(), script->endColumn(), script->executionContextId(),
-        script->hash(), executionContextAuxDataParam, isLiveEditParam,
-        sourceMapURLParam, hasSourceURLParam);
+        script->hash(), std::move(executionContextAuxDataParam),
+        isLiveEditParam, std::move(sourceMapURLParam), hasSourceURLParam);
   else
     m_frontend.scriptFailedToParse(
         scriptId, scriptURL, script->startLine(), script->startColumn(),
         script->endLine(), script->endColumn(), script->executionContextId(),
-        script->hash(), executionContextAuxDataParam, sourceMapURLParam,
-        hasSourceURLParam);
+        script->hash(), std::move(executionContextAuxDataParam),
+        std::move(sourceMapURLParam), hasSourceURLParam);
 
   m_scripts[scriptId] = std::move(script);
 
@@ -1100,7 +1084,8 @@
 
 V8DebuggerAgentImpl::SkipPauseRequest V8DebuggerAgentImpl::didPause(
     v8::Local<v8::Context> context, v8::Local<v8::Value> exception,
-    const std::vector<String16>& hitBreakpoints, bool isPromiseRejection) {
+    const std::vector<String16>& hitBreakpoints, bool isPromiseRejection,
+    bool isUncaught) {
   JavaScriptCallFrames callFrames = m_debugger->currentCallFrames(1);
   JavaScriptCallFrame* topCallFrame =
       !callFrames.empty() ? callFrames.begin()->get() : nullptr;
@@ -1131,18 +1116,23 @@
   v8::HandleScope handles(m_isolate);
 
   if (!exception.IsEmpty()) {
-    ErrorString ignored;
-    InjectedScript* injectedScript =
-        m_session->findInjectedScript(&ignored, V8Debugger::contextId(context));
+    InjectedScript* injectedScript = nullptr;
+    m_session->findInjectedScript(V8Debugger::contextId(context),
+                                  injectedScript);
     if (injectedScript) {
       m_breakReason =
           isPromiseRejection
               ? protocol::Debugger::Paused::ReasonEnum::PromiseRejection
               : protocol::Debugger::Paused::ReasonEnum::Exception;
-      ErrorString errorString;
-      auto obj = injectedScript->wrapObject(&errorString, exception,
-                                            backtraceObjectGroup);
-      m_breakAuxData = obj ? obj->serialize() : nullptr;
+      std::unique_ptr<protocol::Runtime::RemoteObject> obj;
+      injectedScript->wrapObject(exception, kBacktraceObjectGroup, false, false,
+                                 &obj);
+      if (obj) {
+        m_breakAuxData = obj->serialize();
+        m_breakAuxData->setBoolean("uncaught", isUncaught);
+      } else {
+        m_breakAuxData = nullptr;
+      }
       // m_breakAuxData might be null after this.
     }
   }
@@ -1163,8 +1153,10 @@
     }
   }
 
-  ErrorString errorString;
-  m_frontend.paused(currentCallFrames(&errorString), m_breakReason,
+  std::unique_ptr<Array<CallFrame>> protocolCallFrames;
+  Response response = currentCallFrames(&protocolCallFrames);
+  if (!response.isSuccess()) protocolCallFrames = Array<CallFrame>::create();
+  m_frontend.paused(std::move(protocolCallFrames), m_breakReason,
                     std::move(m_breakAuxData), std::move(hitBreakpointIds),
                     currentAsyncStackTrace());
   m_scheduledDebuggerStep = NoStep;
@@ -1209,19 +1201,11 @@
     std::unique_ptr<protocol::DictionaryValue> data) {
   if (!enabled() ||
       m_debugger->getPauseOnExceptionsState() ==
-          V8Debugger::DontPauseOnExceptions)
+          v8::DebugInterface::NoBreakOnException)
     return;
   breakProgram(breakReason, std::move(data));
 }
 
-bool V8DebuggerAgentImpl::assertPaused(ErrorString* errorString) {
-  if (m_pausedContext.IsEmpty()) {
-    *errorString = "Can only perform operation while paused.";
-    return false;
-  }
-  return true;
-}
-
 void V8DebuggerAgentImpl::clearBreakDetails() {
   m_breakReason = protocol::Debugger::Paused::ReasonEnum::Other;
   m_breakAuxData = nullptr;
@@ -1240,7 +1224,7 @@
 void V8DebuggerAgentImpl::removeBreakpointAt(const String16& scriptId,
                                              int lineNumber, int columnNumber,
                                              BreakpointSource source) {
-  removeBreakpoint(
+  removeBreakpointImpl(
       generateBreakpointId(scriptId, lineNumber, columnNumber, source));
 }
 
diff --git a/src/inspector/v8-debugger-agent-impl.h b/src/inspector/v8-debugger-agent-impl.h
index 62aa67b..e5285f4 100644
--- a/src/inspector/v8-debugger-agent-impl.h
+++ b/src/inspector/v8-debugger-agent-impl.h
@@ -24,8 +24,8 @@
 class V8Regex;
 class V8StackTraceImpl;
 
-using protocol::ErrorString;
 using protocol::Maybe;
+using protocol::Response;
 
 class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
  public:
@@ -49,67 +49,69 @@
   void restore();
 
   // Part of the protocol.
-  void enable(ErrorString*) override;
-  void disable(ErrorString*) override;
-  void setBreakpointsActive(ErrorString*, bool active) override;
-  void setSkipAllPauses(ErrorString*, bool skip) override;
-  void setBreakpointByUrl(
-      ErrorString*, int lineNumber, const Maybe<String16>& optionalURL,
-      const Maybe<String16>& optionalURLRegex,
-      const Maybe<int>& optionalColumnNumber,
-      const Maybe<String16>& optionalCondition, String16*,
+  Response enable() override;
+  Response disable() override;
+  Response setBreakpointsActive(bool active) override;
+  Response setSkipAllPauses(bool skip) override;
+  Response setBreakpointByUrl(
+      int lineNumber, Maybe<String16> optionalURL,
+      Maybe<String16> optionalURLRegex, Maybe<int> optionalColumnNumber,
+      Maybe<String16> optionalCondition, String16*,
       std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations)
       override;
-  void setBreakpoint(
-      ErrorString*, std::unique_ptr<protocol::Debugger::Location>,
-      const Maybe<String16>& optionalCondition, String16*,
+  Response setBreakpoint(
+      std::unique_ptr<protocol::Debugger::Location>,
+      Maybe<String16> optionalCondition, String16*,
       std::unique_ptr<protocol::Debugger::Location>* actualLocation) override;
-  void removeBreakpoint(ErrorString*, const String16& breakpointId) override;
-  void continueToLocation(
-      ErrorString*, std::unique_ptr<protocol::Debugger::Location>) override;
-  void searchInContent(
-      ErrorString*, const String16& scriptId, const String16& query,
-      const Maybe<bool>& optionalCaseSensitive,
-      const Maybe<bool>& optionalIsRegex,
+  Response removeBreakpoint(const String16& breakpointId) override;
+  Response continueToLocation(
+      std::unique_ptr<protocol::Debugger::Location>) override;
+  Response searchInContent(
+      const String16& scriptId, const String16& query,
+      Maybe<bool> optionalCaseSensitive, Maybe<bool> optionalIsRegex,
       std::unique_ptr<protocol::Array<protocol::Debugger::SearchMatch>>*)
       override;
-  void setScriptSource(
-      ErrorString*, const String16& inScriptId, const String16& inScriptSource,
-      const Maybe<bool>& dryRun,
+  Response getPossibleBreakpoints(
+      std::unique_ptr<protocol::Debugger::Location> start,
+      Maybe<protocol::Debugger::Location> end,
+      std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations)
+      override;
+  Response setScriptSource(
+      const String16& inScriptId, const String16& inScriptSource,
+      Maybe<bool> dryRun,
       Maybe<protocol::Array<protocol::Debugger::CallFrame>>* optOutCallFrames,
       Maybe<bool>* optOutStackChanged,
       Maybe<protocol::Runtime::StackTrace>* optOutAsyncStackTrace,
       Maybe<protocol::Runtime::ExceptionDetails>* optOutCompileError) override;
-  void restartFrame(
-      ErrorString*, const String16& callFrameId,
+  Response restartFrame(
+      const String16& callFrameId,
       std::unique_ptr<protocol::Array<protocol::Debugger::CallFrame>>*
           newCallFrames,
       Maybe<protocol::Runtime::StackTrace>* asyncStackTrace) override;
-  void getScriptSource(ErrorString*, const String16& scriptId,
-                       String16* scriptSource) override;
-  void pause(ErrorString*) override;
-  void resume(ErrorString*) override;
-  void stepOver(ErrorString*) override;
-  void stepInto(ErrorString*) override;
-  void stepOut(ErrorString*) override;
-  void setPauseOnExceptions(ErrorString*, const String16& pauseState) override;
-  void evaluateOnCallFrame(
-      ErrorString*, const String16& callFrameId, const String16& expression,
-      const Maybe<String16>& objectGroup,
-      const Maybe<bool>& includeCommandLineAPI, const Maybe<bool>& silent,
-      const Maybe<bool>& returnByValue, const Maybe<bool>& generatePreview,
+  Response getScriptSource(const String16& scriptId,
+                           String16* scriptSource) override;
+  Response pause() override;
+  Response resume() override;
+  Response stepOver() override;
+  Response stepInto() override;
+  Response stepOut() override;
+  Response setPauseOnExceptions(const String16& pauseState) override;
+  Response evaluateOnCallFrame(
+      const String16& callFrameId, const String16& expression,
+      Maybe<String16> objectGroup, Maybe<bool> includeCommandLineAPI,
+      Maybe<bool> silent, Maybe<bool> returnByValue,
+      Maybe<bool> generatePreview,
       std::unique_ptr<protocol::Runtime::RemoteObject>* result,
       Maybe<protocol::Runtime::ExceptionDetails>*) override;
-  void setVariableValue(
-      ErrorString*, int scopeNumber, const String16& variableName,
+  Response setVariableValue(
+      int scopeNumber, const String16& variableName,
       std::unique_ptr<protocol::Runtime::CallArgument> newValue,
       const String16& callFrame) override;
-  void setAsyncCallStackDepth(ErrorString*, int depth) override;
-  void setBlackboxPatterns(
-      ErrorString*,
+  Response setAsyncCallStackDepth(int depth) override;
+  Response setBlackboxPatterns(
       std::unique_ptr<protocol::Array<String16>> patterns) override;
-  void setBlackboxedRanges(
-      ErrorString*, const String16& scriptId,
+  Response setBlackboxedRanges(
+      const String16& scriptId,
       std::unique_ptr<protocol::Array<protocol::Debugger::ScriptPosition>>
           positions) override;
 
@@ -135,7 +137,7 @@
   SkipPauseRequest didPause(v8::Local<v8::Context>,
                             v8::Local<v8::Value> exception,
                             const std::vector<String16>& hitBreakpoints,
-                            bool isPromiseRejection);
+                            bool isPromiseRejection, bool isUncaught);
   void didContinue();
   void didParseSource(std::unique_ptr<V8DebuggerScript>, bool success);
   void willExecuteScript(int scriptId);
@@ -144,27 +146,25 @@
   v8::Isolate* isolate() { return m_isolate; }
 
  private:
-  bool checkEnabled(ErrorString*);
-  void enable();
+  void enableImpl();
 
   SkipPauseRequest shouldSkipExceptionPause(JavaScriptCallFrame* topCallFrame);
   SkipPauseRequest shouldSkipStepPause(JavaScriptCallFrame* topCallFrame);
 
   void schedulePauseOnNextStatementIfSteppingInto();
 
-  std::unique_ptr<protocol::Array<protocol::Debugger::CallFrame>>
-  currentCallFrames(ErrorString*);
+  Response currentCallFrames(
+      std::unique_ptr<protocol::Array<protocol::Debugger::CallFrame>>*);
   std::unique_ptr<protocol::Runtime::StackTrace> currentAsyncStackTrace();
 
   void changeJavaScriptRecursionLevel(int step);
 
-  void setPauseOnExceptionsImpl(ErrorString*, int);
+  void setPauseOnExceptionsImpl(int);
 
   std::unique_ptr<protocol::Debugger::Location> resolveBreakpoint(
       const String16& breakpointId, const String16& scriptId,
       const ScriptBreakpoint&, BreakpointSource);
-  void removeBreakpoint(const String16& breakpointId);
-  bool assertPaused(ErrorString*);
+  void removeBreakpointImpl(const String16& breakpointId);
   void clearBreakDetails();
 
   bool isCurrentCallStackEmptyOrBlackboxed();
@@ -174,7 +174,7 @@
   void internalSetAsyncCallStackDepth(int);
   void increaseCachedSkipStackGeneration();
 
-  bool setBlackboxPattern(ErrorString*, const String16& pattern);
+  Response setBlackboxPattern(const String16& pattern);
 
   using ScriptsMap =
       protocol::HashMap<String16, std::unique_ptr<V8DebuggerScript>>;
diff --git a/src/inspector/v8-debugger-script.cc b/src/inspector/v8-debugger-script.cc
index 485188a..ed0c0d6 100644
--- a/src/inspector/v8-debugger-script.cc
+++ b/src/inspector/v8-debugger-script.cc
@@ -67,50 +67,66 @@
   return hash.toString();
 }
 
-static v8::Local<v8::Value> GetChecked(v8::Local<v8::Context> context,
-                                       v8::Local<v8::Object> object,
-                                       const char* name) {
-  return object
-      ->Get(context, toV8StringInternalized(context->GetIsolate(), name))
-      .ToLocalChecked();
-}
-
-static int GetCheckedInt(v8::Local<v8::Context> context,
-                         v8::Local<v8::Object> object, const char* name) {
-  return static_cast<int>(GetChecked(context, object, name)
-                              ->ToInteger(context)
-                              .ToLocalChecked()
-                              ->Value());
-}
-
-V8DebuggerScript::V8DebuggerScript(v8::Local<v8::Context> context,
-                                   v8::Local<v8::Object> object,
+V8DebuggerScript::V8DebuggerScript(v8::Isolate* isolate,
+                                   v8::Local<v8::DebugInterface::Script> script,
                                    bool isLiveEdit) {
-  v8::Isolate* isolate = context->GetIsolate();
-  v8::Local<v8::Value> idValue = GetChecked(context, object, "id");
-  DCHECK(!idValue.IsEmpty() && idValue->IsInt32());
-  m_id = String16::fromInteger(idValue->Int32Value(context).FromJust());
+  m_isolate = script->GetIsolate();
+  m_id = String16::fromInteger(script->Id());
+  v8::Local<v8::String> tmp;
+  if (script->Name().ToLocal(&tmp)) m_url = toProtocolString(tmp);
+  if (script->SourceURL().ToLocal(&tmp)) {
+    m_sourceURL = toProtocolString(tmp);
+    if (m_url.isEmpty()) m_url = toProtocolString(tmp);
+  }
+  if (script->SourceMappingURL().ToLocal(&tmp))
+    m_sourceMappingURL = toProtocolString(tmp);
+  m_startLine = script->LineOffset();
+  m_startColumn = script->ColumnOffset();
+  std::vector<int> lineEnds = script->LineEnds();
+  CHECK(lineEnds.size());
+  int source_length = lineEnds[lineEnds.size() - 1];
+  if (lineEnds.size()) {
+    m_endLine = static_cast<int>(lineEnds.size()) + m_startLine - 1;
+    if (lineEnds.size() > 1) {
+      m_endColumn = source_length - lineEnds[lineEnds.size() - 2] - 1;
+    } else {
+      m_endColumn = source_length + m_startColumn;
+    }
+  } else {
+    m_endLine = m_startLine;
+    m_endColumn = m_startColumn;
+  }
 
-  m_url = toProtocolStringWithTypeCheck(GetChecked(context, object, "name"));
-  m_sourceURL =
-      toProtocolStringWithTypeCheck(GetChecked(context, object, "sourceURL"));
-  m_sourceMappingURL = toProtocolStringWithTypeCheck(
-      GetChecked(context, object, "sourceMappingURL"));
-  m_startLine = GetCheckedInt(context, object, "startLine");
-  m_startColumn = GetCheckedInt(context, object, "startColumn");
-  m_endLine = GetCheckedInt(context, object, "endLine");
-  m_endColumn = GetCheckedInt(context, object, "endColumn");
-  m_executionContextAuxData = toProtocolStringWithTypeCheck(
-      GetChecked(context, object, "executionContextAuxData"));
-  m_executionContextId = GetCheckedInt(context, object, "executionContextId");
+  if (script->ContextData().ToLocal(&tmp)) {
+    String16 contextData = toProtocolString(tmp);
+    size_t firstComma = contextData.find(",", 0);
+    size_t secondComma = firstComma != String16::kNotFound
+                             ? contextData.find(",", firstComma + 1)
+                             : String16::kNotFound;
+    if (secondComma != String16::kNotFound) {
+      String16 executionContextId =
+          contextData.substring(firstComma + 1, secondComma - firstComma - 1);
+      bool isOk = false;
+      m_executionContextId = executionContextId.toInteger(&isOk);
+      if (!isOk) m_executionContextId = 0;
+      m_executionContextAuxData = contextData.substring(secondComma + 1);
+    }
+  }
+
   m_isLiveEdit = isLiveEdit;
 
-  v8::Local<v8::Value> sourceValue;
-  if (!object->Get(context, toV8StringInternalized(isolate, "source"))
-           .ToLocal(&sourceValue) ||
-      !sourceValue->IsString())
-    return;
-  setSource(isolate, sourceValue.As<v8::String>());
+  if (script->Source().ToLocal(&tmp)) {
+    m_source.Reset(m_isolate, tmp);
+    String16 source = toProtocolString(tmp);
+    m_hash = calculateHash(source);
+    // V8 will not count last line if script source ends with \n.
+    if (source.length() > 1 && source[source.length() - 1] == '\n') {
+      m_endLine++;
+      m_endColumn = 0;
+    }
+  }
+
+  m_script.Reset(m_isolate, script);
 }
 
 V8DebuggerScript::~V8DebuggerScript() {}
@@ -131,10 +147,18 @@
   m_sourceMappingURL = sourceMappingURL;
 }
 
-void V8DebuggerScript::setSource(v8::Isolate* isolate,
-                                 v8::Local<v8::String> source) {
-  m_source.Reset(isolate, source);
+void V8DebuggerScript::setSource(v8::Local<v8::String> source) {
+  m_source.Reset(m_isolate, source);
   m_hash = calculateHash(toProtocolString(source));
 }
 
+bool V8DebuggerScript::getPossibleBreakpoints(
+    const v8::DebugInterface::Location& start,
+    const v8::DebugInterface::Location& end,
+    std::vector<v8::DebugInterface::Location>* locations) {
+  v8::HandleScope scope(m_isolate);
+  v8::Local<v8::DebugInterface::Script> script = m_script.Get(m_isolate);
+  return script->GetPossibleBreakpoints(start, end, locations);
+}
+
 }  // namespace v8_inspector
diff --git a/src/inspector/v8-debugger-script.h b/src/inspector/v8-debugger-script.h
index 78c44b5..97b5ba9 100644
--- a/src/inspector/v8-debugger-script.h
+++ b/src/inspector/v8-debugger-script.h
@@ -34,12 +34,14 @@
 #include "src/inspector/string-16.h"
 
 #include "include/v8.h"
+#include "src/debug/debug-interface.h"
 
 namespace v8_inspector {
 
 class V8DebuggerScript {
  public:
-  V8DebuggerScript(v8::Local<v8::Context>, v8::Local<v8::Object>,
+  V8DebuggerScript(v8::Isolate* isolate,
+                   v8::Local<v8::DebugInterface::Script> script,
                    bool isLiveEdit);
   ~V8DebuggerScript();
 
@@ -62,7 +64,12 @@
 
   void setSourceURL(const String16&);
   void setSourceMappingURL(const String16&);
-  void setSource(v8::Isolate*, v8::Local<v8::String>);
+  void setSource(v8::Local<v8::String>);
+
+  bool getPossibleBreakpoints(
+      const v8::DebugInterface::Location& start,
+      const v8::DebugInterface::Location& end,
+      std::vector<v8::DebugInterface::Location>* locations);
 
  private:
   String16 m_id;
@@ -79,6 +86,9 @@
   String16 m_executionContextAuxData;
   bool m_isLiveEdit;
 
+  v8::Isolate* m_isolate;
+  v8::Global<v8::DebugInterface::Script> m_script;
+
   DISALLOW_COPY_AND_ASSIGN(V8DebuggerScript);
 };
 
diff --git a/src/inspector/v8-debugger.cc b/src/inspector/v8-debugger.cc
index d393f81..b3657e5 100644
--- a/src/inspector/v8-debugger.cc
+++ b/src/inspector/v8-debugger.cc
@@ -14,11 +14,11 @@
 #include "src/inspector/v8-stack-trace-impl.h"
 #include "src/inspector/v8-value-copier.h"
 
+#include "include/v8-util.h"
+
 namespace v8_inspector {
 
 namespace {
-const char stepIntoV8MethodName[] = "stepIntoStatement";
-const char stepOutV8MethodName[] = "stepOutOfFunction";
 static const char v8AsyncTaskEventEnqueue[] = "enqueue";
 static const char v8AsyncTaskEventEnqueueRecurring[] = "enqueueRecurring";
 static const char v8AsyncTaskEventWillHandle[] = "willHandle";
@@ -55,7 +55,8 @@
       m_breakpointsActivated(true),
       m_runningNestedMessageLoop(false),
       m_ignoreScriptParsedEventsCounter(0),
-      m_maxAsyncCallStackDepth(0) {}
+      m_maxAsyncCallStackDepth(0),
+      m_pauseOnExceptionsState(v8::DebugInterface::NoBreakOnException) {}
 
 V8Debugger::~V8Debugger() {}
 
@@ -63,9 +64,14 @@
   if (m_enableCount++) return;
   DCHECK(!enabled());
   v8::HandleScope scope(m_isolate);
-  v8::Debug::SetDebugEventListener(m_isolate, &V8Debugger::v8DebugEventCallback,
-                                   v8::External::New(m_isolate, this));
-  m_debuggerContext.Reset(m_isolate, v8::Debug::GetDebugContext(m_isolate));
+  v8::DebugInterface::SetDebugEventListener(m_isolate,
+                                            &V8Debugger::v8DebugEventCallback,
+                                            v8::External::New(m_isolate, this));
+  m_debuggerContext.Reset(m_isolate,
+                          v8::DebugInterface::GetDebugContext(m_isolate));
+  v8::DebugInterface::ChangeBreakOnException(
+      m_isolate, v8::DebugInterface::NoBreakOnException);
+  m_pauseOnExceptionsState = v8::DebugInterface::NoBreakOnException;
   compileDebuggerScript();
 }
 
@@ -76,7 +82,7 @@
   m_debuggerScript.Reset();
   m_debuggerContext.Reset();
   allAsyncTasksCanceled();
-  v8::Debug::SetDebugEventListener(m_isolate, nullptr);
+  v8::DebugInterface::SetDebugEventListener(m_isolate, nullptr);
 }
 
 bool V8Debugger::enabled() const { return !m_debuggerScript.IsEmpty(); }
@@ -112,29 +118,20 @@
     int contextGroupId,
     std::vector<std::unique_ptr<V8DebuggerScript>>& result) {
   v8::HandleScope scope(m_isolate);
-  v8::MicrotasksScope microtasks(m_isolate,
-                                 v8::MicrotasksScope::kDoNotRunMicrotasks);
-  v8::Local<v8::Context> context = debuggerContext();
-  v8::Local<v8::Object> debuggerScript = m_debuggerScript.Get(m_isolate);
-  DCHECK(!debuggerScript->IsUndefined());
-  v8::Local<v8::Function> getScriptsFunction = v8::Local<v8::Function>::Cast(
-      debuggerScript
-          ->Get(context, toV8StringInternalized(m_isolate, "getScripts"))
-          .ToLocalChecked());
-  v8::Local<v8::Value> argv[] = {v8::Integer::New(m_isolate, contextGroupId)};
-  v8::Local<v8::Value> value;
-  if (!getScriptsFunction->Call(context, debuggerScript, arraysize(argv), argv)
-           .ToLocal(&value))
-    return;
-  DCHECK(value->IsArray());
-  v8::Local<v8::Array> scriptsArray = v8::Local<v8::Array>::Cast(value);
-  result.reserve(scriptsArray->Length());
-  for (unsigned i = 0; i < scriptsArray->Length(); ++i) {
-    v8::Local<v8::Object> scriptObject = v8::Local<v8::Object>::Cast(
-        scriptsArray->Get(context, v8::Integer::New(m_isolate, i))
-            .ToLocalChecked());
-    result.push_back(wrapUnique(
-        new V8DebuggerScript(context, scriptObject, inLiveEditScope)));
+  v8::PersistentValueVector<v8::DebugInterface::Script> scripts(m_isolate);
+  v8::DebugInterface::GetLoadedScripts(m_isolate, scripts);
+  String16 contextPrefix = String16::fromInteger(contextGroupId) + ",";
+  for (size_t i = 0; i < scripts.Size(); ++i) {
+    v8::Local<v8::DebugInterface::Script> script = scripts.Get(i);
+    if (!script->WasCompiled()) continue;
+    v8::ScriptOriginOptions origin = script->OriginOptions();
+    if (origin.IsEmbedderDebugScript()) continue;
+    v8::Local<v8::String> v8ContextData;
+    if (!script->ContextData().ToLocal(&v8ContextData)) continue;
+    String16 contextData = toProtocolString(v8ContextData);
+    if (contextData.find(contextPrefix) != 0) continue;
+    result.push_back(
+        wrapUnique(new V8DebuggerScript(m_isolate, script, false)));
   }
 }
 
@@ -171,7 +168,7 @@
           ->Get(context, toV8StringInternalized(m_isolate, "setBreakpoint"))
           .ToLocalChecked());
   v8::Local<v8::Value> breakpointId =
-      v8::Debug::Call(debuggerContext(), setBreakpointFunction, info)
+      v8::DebugInterface::Call(debuggerContext(), setBreakpointFunction, info)
           .ToLocalChecked();
   if (!breakpointId->IsString()) return "";
   *actualLineNumber =
@@ -206,7 +203,7 @@
               ->Get(context,
                     toV8StringInternalized(m_isolate, "removeBreakpoint"))
               .ToLocalChecked());
-  v8::Debug::Call(debuggerContext(), removeBreakpointFunction, info)
+  v8::DebugInterface::Call(debuggerContext(), removeBreakpointFunction, info)
       .ToLocalChecked();
 }
 
@@ -219,7 +216,8 @@
       m_debuggerScript.Get(m_isolate)
           ->Get(context, toV8StringInternalized(m_isolate, "clearBreakpoints"))
           .ToLocalChecked());
-  v8::Debug::Call(debuggerContext(), clearBreakpoints).ToLocalChecked();
+  v8::DebugInterface::Call(debuggerContext(), clearBreakpoints)
+      .ToLocalChecked();
 }
 
 void V8Debugger::setBreakpointsActivated(bool activated) {
@@ -243,42 +241,32 @@
               ->Get(context, toV8StringInternalized(m_isolate,
                                                     "setBreakpointsActivated"))
               .ToLocalChecked());
-  v8::Debug::Call(debuggerContext(), setBreakpointsActivated, info)
+  v8::DebugInterface::Call(debuggerContext(), setBreakpointsActivated, info)
       .ToLocalChecked();
 
   m_breakpointsActivated = activated;
 }
 
-V8Debugger::PauseOnExceptionsState V8Debugger::getPauseOnExceptionsState() {
+v8::DebugInterface::ExceptionBreakState
+V8Debugger::getPauseOnExceptionsState() {
   DCHECK(enabled());
-  v8::HandleScope scope(m_isolate);
-  v8::Local<v8::Context> context = debuggerContext();
-  v8::Context::Scope contextScope(context);
-
-  v8::Local<v8::Value> argv[] = {v8::Undefined(m_isolate)};
-  v8::Local<v8::Value> result =
-      callDebuggerMethod("pauseOnExceptionsState", 0, argv).ToLocalChecked();
-  return static_cast<V8Debugger::PauseOnExceptionsState>(
-      result->Int32Value(context).FromJust());
+  return m_pauseOnExceptionsState;
 }
 
 void V8Debugger::setPauseOnExceptionsState(
-    PauseOnExceptionsState pauseOnExceptionsState) {
+    v8::DebugInterface::ExceptionBreakState pauseOnExceptionsState) {
   DCHECK(enabled());
-  v8::HandleScope scope(m_isolate);
-  v8::Context::Scope contextScope(debuggerContext());
-
-  v8::Local<v8::Value> argv[] = {
-      v8::Int32::New(m_isolate, pauseOnExceptionsState)};
-  callDebuggerMethod("setPauseOnExceptionsState", 1, argv);
+  if (m_pauseOnExceptionsState == pauseOnExceptionsState) return;
+  v8::DebugInterface::ChangeBreakOnException(m_isolate, pauseOnExceptionsState);
+  m_pauseOnExceptionsState = pauseOnExceptionsState;
 }
 
 void V8Debugger::setPauseOnNextStatement(bool pause) {
   if (m_runningNestedMessageLoop) return;
   if (pause)
-    v8::Debug::DebugBreak(m_isolate);
+    v8::DebugInterface::DebugBreak(m_isolate);
   else
-    v8::Debug::CancelDebugBreak(m_isolate);
+    v8::DebugInterface::CancelDebugBreak(m_isolate);
 }
 
 bool V8Debugger::canBreakProgram() {
@@ -306,7 +294,7 @@
                          v8::ConstructorBehavior::kThrow)
            .ToLocal(&breakFunction))
     return;
-  v8::Debug::Call(debuggerContext(), breakFunction).ToLocalChecked();
+  v8::DebugInterface::Call(debuggerContext(), breakFunction).ToLocalChecked();
 }
 
 void V8Debugger::continueProgram() {
@@ -318,52 +306,42 @@
 void V8Debugger::stepIntoStatement() {
   DCHECK(isPaused());
   DCHECK(!m_executionState.IsEmpty());
-  v8::HandleScope handleScope(m_isolate);
-  v8::Local<v8::Value> argv[] = {m_executionState};
-  callDebuggerMethod(stepIntoV8MethodName, 1, argv);
+  v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepIn);
   continueProgram();
 }
 
 void V8Debugger::stepOverStatement() {
   DCHECK(isPaused());
   DCHECK(!m_executionState.IsEmpty());
-  v8::HandleScope handleScope(m_isolate);
-  v8::Local<v8::Value> argv[] = {m_executionState};
-  callDebuggerMethod("stepOverStatement", 1, argv);
+  v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepNext);
   continueProgram();
 }
 
 void V8Debugger::stepOutOfFunction() {
   DCHECK(isPaused());
   DCHECK(!m_executionState.IsEmpty());
-  v8::HandleScope handleScope(m_isolate);
-  v8::Local<v8::Value> argv[] = {m_executionState};
-  callDebuggerMethod(stepOutV8MethodName, 1, argv);
+  v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepOut);
   continueProgram();
 }
 
 void V8Debugger::clearStepping() {
   DCHECK(enabled());
-  v8::HandleScope scope(m_isolate);
-  v8::Context::Scope contextScope(debuggerContext());
-
-  v8::Local<v8::Value> argv[] = {v8::Undefined(m_isolate)};
-  callDebuggerMethod("clearStepping", 0, argv);
+  v8::DebugInterface::ClearStepping(m_isolate);
 }
 
-bool V8Debugger::setScriptSource(
+Response V8Debugger::setScriptSource(
     const String16& sourceID, v8::Local<v8::String> newSource, bool dryRun,
-    ErrorString* error,
     Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails,
-    JavaScriptCallFrames* newCallFrames, Maybe<bool>* stackChanged) {
+    JavaScriptCallFrames* newCallFrames, Maybe<bool>* stackChanged,
+    bool* compileError) {
   class EnableLiveEditScope {
    public:
     explicit EnableLiveEditScope(v8::Isolate* isolate) : m_isolate(isolate) {
-      v8::Debug::SetLiveEditEnabled(m_isolate, true);
+      v8::DebugInterface::SetLiveEditEnabled(m_isolate, true);
       inLiveEditScope = true;
     }
     ~EnableLiveEditScope() {
-      v8::Debug::SetLiveEditEnabled(m_isolate, false);
+      v8::DebugInterface::SetLiveEditEnabled(m_isolate, false);
       inLiveEditScope = false;
     }
 
@@ -371,6 +349,7 @@
     v8::Isolate* m_isolate;
   };
 
+  *compileError = false;
   DCHECK(enabled());
   v8::HandleScope scope(m_isolate);
 
@@ -391,10 +370,9 @@
     if (tryCatch.HasCaught()) {
       v8::Local<v8::Message> message = tryCatch.Message();
       if (!message.IsEmpty())
-        *error = toProtocolStringWithTypeCheck(message->Get());
+        return Response::Error(toProtocolStringWithTypeCheck(message->Get()));
       else
-        *error = "Unknown error.";
-      return false;
+        return Response::InternalError();
     }
     v8result = maybeResult.ToLocalChecked();
   }
@@ -419,7 +397,7 @@
         JavaScriptCallFrames frames = currentCallFrames();
         newCallFrames->swap(frames);
       }
-      return true;
+      return Response::OK();
     }
     // Compile error.
     case 1: {
@@ -441,11 +419,11 @@
                                                     ->Value()) -
                                1)
               .build();
-      return false;
+      *compileError = true;
+      return Response::OK();
     }
   }
-  *error = "Unknown error.";
-  return false;
+  return Response::InternalError();
 }
 
 JavaScriptCallFrames V8Debugger::currentCallFrames(int limit) {
@@ -459,8 +437,8 @@
                       toV8StringInternalized(m_isolate, "currentCallFrames"))
                 .ToLocalChecked());
     currentCallFramesV8 =
-        v8::Debug::Call(debuggerContext(), currentCallFramesFunction,
-                        v8::Integer::New(m_isolate, limit))
+        v8::DebugInterface::Call(debuggerContext(), currentCallFramesFunction,
+                                 v8::Integer::New(m_isolate, limit))
             .ToLocalChecked();
   } else {
     v8::Local<v8::Value> argv[] = {m_executionState,
@@ -508,7 +486,7 @@
                                     v8::Local<v8::Object> executionState,
                                     v8::Local<v8::Value> exception,
                                     v8::Local<v8::Array> hitBreakpointNumbers,
-                                    bool isPromiseRejection) {
+                                    bool isPromiseRejection, bool isUncaught) {
   // Don't allow nested breaks.
   if (m_runningNestedMessageLoop) return;
 
@@ -531,7 +509,7 @@
   m_pausedContext = pausedContext;
   m_executionState = executionState;
   V8DebuggerAgentImpl::SkipPauseRequest result = agent->didPause(
-      pausedContext, exception, breakpointIds, isPromiseRejection);
+      pausedContext, exception, breakpointIds, isPromiseRejection, isUncaught);
   if (result == V8DebuggerAgentImpl::RequestNoSkip) {
     m_runningNestedMessageLoop = true;
     int groupId = getGroupId(pausedContext);
@@ -547,19 +525,16 @@
   m_executionState.Clear();
 
   if (result == V8DebuggerAgentImpl::RequestStepFrame) {
-    v8::Local<v8::Value> argv[] = {executionState};
-    callDebuggerMethod("stepFrameStatement", 1, argv);
+    v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepFrame);
   } else if (result == V8DebuggerAgentImpl::RequestStepInto) {
-    v8::Local<v8::Value> argv[] = {executionState};
-    callDebuggerMethod(stepIntoV8MethodName, 1, argv);
+    v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepIn);
   } else if (result == V8DebuggerAgentImpl::RequestStepOut) {
-    v8::Local<v8::Value> argv[] = {executionState};
-    callDebuggerMethod(stepOutV8MethodName, 1, argv);
+    v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepOut);
   }
 }
 
 void V8Debugger::v8DebugEventCallback(
-    const v8::Debug::EventDetails& eventDetails) {
+    const v8::DebugInterface::EventDetails& eventDetails) {
   V8Debugger* thisPtr = toV8Debugger(eventDetails.GetCallbackData());
   thisPtr->handleV8DebugEvent(eventDetails);
 }
@@ -575,12 +550,12 @@
           .ToLocalChecked();
   DCHECK(!getterValue.IsEmpty() && getterValue->IsFunction());
   return v8::Local<v8::Function>::Cast(getterValue)
-      ->Call(m_isolate->GetCurrentContext(), object, 0, 0)
+      ->Call(m_isolate->GetCurrentContext(), object, 0, nullptr)
       .ToLocalChecked();
 }
 
 void V8Debugger::handleV8DebugEvent(
-    const v8::Debug::EventDetails& eventDetails) {
+    const v8::DebugInterface::EventDetails& eventDetails) {
   if (!enabled()) return;
   v8::DebugEvent event = eventDetails.GetEvent();
   if (event != v8::AsyncTaskEvent && event != v8::Break &&
@@ -604,26 +579,35 @@
     v8::HandleScope scope(m_isolate);
     if (m_ignoreScriptParsedEventsCounter == 0 &&
         (event == v8::AfterCompile || event == v8::CompileError)) {
-      v8::Context::Scope contextScope(debuggerContext());
+      v8::Local<v8::Context> context = debuggerContext();
+      v8::Context::Scope contextScope(context);
       v8::Local<v8::Value> argv[] = {eventDetails.GetEventData()};
       v8::Local<v8::Value> value =
           callDebuggerMethod("getAfterCompileScript", 1, argv).ToLocalChecked();
       if (value->IsNull()) return;
       DCHECK(value->IsObject());
       v8::Local<v8::Object> scriptObject = v8::Local<v8::Object>::Cast(value);
+      v8::Local<v8::DebugInterface::Script> script;
+      if (!v8::DebugInterface::Script::Wrap(m_isolate, scriptObject)
+               .ToLocal(&script))
+        return;
       agent->didParseSource(
-          wrapUnique(new V8DebuggerScript(debuggerContext(), scriptObject,
-                                          inLiveEditScope)),
+          wrapUnique(new V8DebuggerScript(m_isolate, script, inLiveEditScope)),
           event == v8::AfterCompile);
     } else if (event == v8::Exception) {
+      v8::Local<v8::Context> context = debuggerContext();
       v8::Local<v8::Object> eventData = eventDetails.GetEventData();
       v8::Local<v8::Value> exception =
           callInternalGetterFunction(eventData, "exception");
       v8::Local<v8::Value> promise =
           callInternalGetterFunction(eventData, "promise");
       bool isPromiseRejection = !promise.IsEmpty() && promise->IsObject();
+      v8::Local<v8::Value> uncaught =
+          callInternalGetterFunction(eventData, "uncaught");
+      bool isUncaught = uncaught->BooleanValue(context).FromJust();
       handleProgramBreak(eventContext, eventDetails.GetExecutionState(),
-                         exception, v8::Local<v8::Array>(), isPromiseRejection);
+                         exception, v8::Local<v8::Array>(), isPromiseRejection,
+                         isUncaught);
     } else if (event == v8::Break) {
       v8::Local<v8::Value> argv[] = {eventDetails.GetEventData()};
       v8::Local<v8::Value> hitBreakpoints =
@@ -729,7 +713,8 @@
 v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
     v8::Local<v8::Context> context, v8::Local<v8::Value> value) {
   v8::Local<v8::Array> properties;
-  if (!v8::Debug::GetInternalProperties(m_isolate, value).ToLocal(&properties))
+  if (!v8::DebugInterface::GetInternalProperties(m_isolate, value)
+           .ToLocal(&properties))
     return v8::MaybeLocal<v8::Array>();
   if (value->IsFunction()) {
     v8::Local<v8::Function> function = value.As<v8::Function>();
diff --git a/src/inspector/v8-debugger.h b/src/inspector/v8-debugger.h
index 83c1b21..4c74778 100644
--- a/src/inspector/v8-debugger.h
+++ b/src/inspector/v8-debugger.h
@@ -8,12 +8,12 @@
 #include <vector>
 
 #include "src/base/macros.h"
+#include "src/debug/debug-interface.h"
 #include "src/inspector/java-script-call-frame.h"
 #include "src/inspector/protocol/Forward.h"
 #include "src/inspector/protocol/Runtime.h"
 #include "src/inspector/v8-debugger-script.h"
 
-#include "include/v8-debug.h"
 #include "include/v8-inspector.h"
 
 namespace v8_inspector {
@@ -23,7 +23,7 @@
 class V8InspectorImpl;
 class V8StackTraceImpl;
 
-using protocol::ErrorString;
+using protocol::Response;
 
 class V8Debugger {
  public:
@@ -42,13 +42,8 @@
   void setBreakpointsActivated(bool);
   bool breakpointsActivated() const { return m_breakpointsActivated; }
 
-  enum PauseOnExceptionsState {
-    DontPauseOnExceptions,
-    PauseOnAllExceptions,
-    PauseOnUncaughtExceptions
-  };
-  PauseOnExceptionsState getPauseOnExceptionsState();
-  void setPauseOnExceptionsState(PauseOnExceptionsState);
+  v8::DebugInterface::ExceptionBreakState getPauseOnExceptionsState();
+  void setPauseOnExceptionsState(v8::DebugInterface::ExceptionBreakState);
   void setPauseOnNextStatement(bool);
   bool canBreakProgram();
   void breakProgram();
@@ -58,12 +53,11 @@
   void stepOutOfFunction();
   void clearStepping();
 
-  bool setScriptSource(const String16& sourceID,
-                       v8::Local<v8::String> newSource, bool dryRun,
-                       ErrorString*,
-                       protocol::Maybe<protocol::Runtime::ExceptionDetails>*,
-                       JavaScriptCallFrames* newCallFrames,
-                       protocol::Maybe<bool>* stackChanged);
+  Response setScriptSource(
+      const String16& sourceID, v8::Local<v8::String> newSource, bool dryRun,
+      protocol::Maybe<protocol::Runtime::ExceptionDetails>*,
+      JavaScriptCallFrames* newCallFrames, protocol::Maybe<bool>* stackChanged,
+      bool* compileError);
   JavaScriptCallFrames currentCallFrames(int limit = 0);
 
   // Each script inherits debug data from v8::Context where it has been
@@ -113,11 +107,12 @@
                           v8::Local<v8::Object> executionState,
                           v8::Local<v8::Value> exception,
                           v8::Local<v8::Array> hitBreakpoints,
-                          bool isPromiseRejection = false);
-  static void v8DebugEventCallback(const v8::Debug::EventDetails&);
+                          bool isPromiseRejection = false,
+                          bool isUncaught = false);
+  static void v8DebugEventCallback(const v8::DebugInterface::EventDetails&);
   v8::Local<v8::Value> callInternalGetterFunction(v8::Local<v8::Object>,
                                                   const char* functionName);
-  void handleV8DebugEvent(const v8::Debug::EventDetails&);
+  void handleV8DebugEvent(const v8::DebugInterface::EventDetails&);
   void handleV8AsyncTaskEvent(v8::Local<v8::Context>,
                               v8::Local<v8::Object> executionState,
                               v8::Local<v8::Object> eventData);
@@ -152,6 +147,8 @@
   std::vector<std::unique_ptr<V8StackTraceImpl>> m_currentStacks;
   protocol::HashMap<V8DebuggerAgentImpl*, int> m_maxAsyncCallStackDepthMap;
 
+  v8::DebugInterface::ExceptionBreakState m_pauseOnExceptionsState;
+
   DISALLOW_COPY_AND_ASSIGN(V8Debugger);
 };
 
diff --git a/src/inspector/v8-heap-profiler-agent-impl.cc b/src/inspector/v8-heap-profiler-agent-impl.cc
index 84c890b..0ff04e7 100644
--- a/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -164,39 +164,42 @@
         HeapProfilerAgentState::allocationTrackingEnabled, false));
   if (m_state->booleanProperty(
           HeapProfilerAgentState::samplingHeapProfilerEnabled, false)) {
-    ErrorString error;
     double samplingInterval = m_state->doubleProperty(
         HeapProfilerAgentState::samplingHeapProfilerInterval, -1);
     DCHECK_GE(samplingInterval, 0);
-    startSampling(&error, Maybe<double>(samplingInterval));
+    startSampling(Maybe<double>(samplingInterval));
   }
 }
 
-void V8HeapProfilerAgentImpl::collectGarbage(ErrorString*) {
+Response V8HeapProfilerAgentImpl::collectGarbage() {
   m_isolate->LowMemoryNotification();
+  return Response::OK();
 }
 
-void V8HeapProfilerAgentImpl::startTrackingHeapObjects(
-    ErrorString*, const protocol::Maybe<bool>& trackAllocations) {
+Response V8HeapProfilerAgentImpl::startTrackingHeapObjects(
+    Maybe<bool> trackAllocations) {
   m_state->setBoolean(HeapProfilerAgentState::heapObjectsTrackingEnabled, true);
   bool allocationTrackingEnabled = trackAllocations.fromMaybe(false);
   m_state->setBoolean(HeapProfilerAgentState::allocationTrackingEnabled,
                       allocationTrackingEnabled);
   startTrackingHeapObjectsInternal(allocationTrackingEnabled);
+  return Response::OK();
 }
 
-void V8HeapProfilerAgentImpl::stopTrackingHeapObjects(
-    ErrorString* error, const protocol::Maybe<bool>& reportProgress) {
+Response V8HeapProfilerAgentImpl::stopTrackingHeapObjects(
+    Maybe<bool> reportProgress) {
   requestHeapStatsUpdate();
-  takeHeapSnapshot(error, reportProgress);
+  takeHeapSnapshot(std::move(reportProgress));
   stopTrackingHeapObjectsInternal();
+  return Response::OK();
 }
 
-void V8HeapProfilerAgentImpl::enable(ErrorString*) {
+Response V8HeapProfilerAgentImpl::enable() {
   m_state->setBoolean(HeapProfilerAgentState::heapProfilerEnabled, true);
+  return Response::OK();
 }
 
-void V8HeapProfilerAgentImpl::disable(ErrorString* error) {
+Response V8HeapProfilerAgentImpl::disable() {
   stopTrackingHeapObjectsInternal();
   if (m_state->booleanProperty(
           HeapProfilerAgentState::samplingHeapProfilerEnabled, false)) {
@@ -205,15 +208,12 @@
   }
   m_isolate->GetHeapProfiler()->ClearObjectIds();
   m_state->setBoolean(HeapProfilerAgentState::heapProfilerEnabled, false);
+  return Response::OK();
 }
 
-void V8HeapProfilerAgentImpl::takeHeapSnapshot(
-    ErrorString* errorString, const protocol::Maybe<bool>& reportProgress) {
+Response V8HeapProfilerAgentImpl::takeHeapSnapshot(Maybe<bool> reportProgress) {
   v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
-  if (!profiler) {
-    *errorString = "Cannot access v8 heap profiler";
-    return;
-  }
+  if (!profiler) return Response::Error("Cannot access v8 heap profiler");
   std::unique_ptr<HeapSnapshotProgress> progress;
   if (reportProgress.fromMaybe(false))
     progress = wrapUnique(new HeapSnapshotProgress(&m_frontend));
@@ -221,80 +221,62 @@
   GlobalObjectNameResolver resolver(m_session);
   const v8::HeapSnapshot* snapshot =
       profiler->TakeHeapSnapshot(progress.get(), &resolver);
-  if (!snapshot) {
-    *errorString = "Failed to take heap snapshot";
-    return;
-  }
+  if (!snapshot) return Response::Error("Failed to take heap snapshot");
   HeapSnapshotOutputStream stream(&m_frontend);
   snapshot->Serialize(&stream);
   const_cast<v8::HeapSnapshot*>(snapshot)->Delete();
+  return Response::OK();
 }
 
-void V8HeapProfilerAgentImpl::getObjectByHeapObjectId(
-    ErrorString* error, const String16& heapSnapshotObjectId,
-    const protocol::Maybe<String16>& objectGroup,
+Response V8HeapProfilerAgentImpl::getObjectByHeapObjectId(
+    const String16& heapSnapshotObjectId, Maybe<String16> objectGroup,
     std::unique_ptr<protocol::Runtime::RemoteObject>* result) {
   bool ok;
   int id = heapSnapshotObjectId.toInteger(&ok);
-  if (!ok) {
-    *error = "Invalid heap snapshot object id";
-    return;
-  }
+  if (!ok) return Response::Error("Invalid heap snapshot object id");
 
   v8::HandleScope handles(m_isolate);
   v8::Local<v8::Object> heapObject = objectByHeapObjectId(m_isolate, id);
-  if (heapObject.IsEmpty()) {
-    *error = "Object is not available";
-    return;
-  }
+  if (heapObject.IsEmpty()) return Response::Error("Object is not available");
 
-  if (!m_session->inspector()->client()->isInspectableHeapObject(heapObject)) {
-    *error = "Object is not available";
-    return;
-  }
+  if (!m_session->inspector()->client()->isInspectableHeapObject(heapObject))
+    return Response::Error("Object is not available");
 
   *result = m_session->wrapObject(heapObject->CreationContext(), heapObject,
                                   objectGroup.fromMaybe(""), false);
-  if (!result) *error = "Object is not available";
+  if (!result) return Response::Error("Object is not available");
+  return Response::OK();
 }
 
-void V8HeapProfilerAgentImpl::addInspectedHeapObject(
-    ErrorString* errorString, const String16& inspectedHeapObjectId) {
+Response V8HeapProfilerAgentImpl::addInspectedHeapObject(
+    const String16& inspectedHeapObjectId) {
   bool ok;
   int id = inspectedHeapObjectId.toInteger(&ok);
-  if (!ok) {
-    *errorString = "Invalid heap snapshot object id";
-    return;
-  }
+  if (!ok) return Response::Error("Invalid heap snapshot object id");
 
   v8::HandleScope handles(m_isolate);
   v8::Local<v8::Object> heapObject = objectByHeapObjectId(m_isolate, id);
-  if (heapObject.IsEmpty()) {
-    *errorString = "Object is not available";
-    return;
-  }
+  if (heapObject.IsEmpty()) return Response::Error("Object is not available");
 
-  if (!m_session->inspector()->client()->isInspectableHeapObject(heapObject)) {
-    *errorString = "Object is not available";
-    return;
-  }
-
+  if (!m_session->inspector()->client()->isInspectableHeapObject(heapObject))
+    return Response::Error("Object is not available");
   m_session->addInspectedObject(wrapUnique(new InspectableHeapObject(id)));
+  return Response::OK();
 }
 
-void V8HeapProfilerAgentImpl::getHeapObjectId(ErrorString* errorString,
-                                              const String16& objectId,
-                                              String16* heapSnapshotObjectId) {
+Response V8HeapProfilerAgentImpl::getHeapObjectId(
+    const String16& objectId, String16* heapSnapshotObjectId) {
   v8::HandleScope handles(m_isolate);
   v8::Local<v8::Value> value;
   v8::Local<v8::Context> context;
-  if (!m_session->unwrapObject(errorString, objectId, &value, &context,
-                               nullptr) ||
-      value->IsUndefined())
-    return;
+  Response response =
+      m_session->unwrapObject(objectId, &value, &context, nullptr);
+  if (!response.isSuccess()) return response;
+  if (value->IsUndefined()) return Response::InternalError();
 
   v8::SnapshotObjectId id = m_isolate->GetHeapProfiler()->GetObjectId(value);
   *heapSnapshotObjectId = String16::fromInteger(static_cast<size_t>(id));
+  return Response::OK();
 }
 
 void V8HeapProfilerAgentImpl::requestHeapStatsUpdate() {
@@ -332,13 +314,10 @@
   m_state->setBoolean(HeapProfilerAgentState::allocationTrackingEnabled, false);
 }
 
-void V8HeapProfilerAgentImpl::startSampling(
-    ErrorString* errorString, const Maybe<double>& samplingInterval) {
+Response V8HeapProfilerAgentImpl::startSampling(
+    Maybe<double> samplingInterval) {
   v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
-  if (!profiler) {
-    *errorString = "Cannot access v8 heap profiler";
-    return;
-  }
+  if (!profiler) return Response::Error("Cannot access v8 heap profiler");
   const unsigned defaultSamplingInterval = 1 << 15;
   double samplingIntervalValue =
       samplingInterval.fromMaybe(defaultSamplingInterval);
@@ -349,6 +328,7 @@
   profiler->StartSamplingHeapProfiler(
       static_cast<uint64_t>(samplingIntervalValue), 128,
       v8::HeapProfiler::kSamplingForceGC);
+  return Response::OK();
 }
 
 namespace {
@@ -379,14 +359,10 @@
 }
 }  // namespace
 
-void V8HeapProfilerAgentImpl::stopSampling(
-    ErrorString* errorString,
+Response V8HeapProfilerAgentImpl::stopSampling(
     std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfile>* profile) {
   v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
-  if (!profiler) {
-    *errorString = "Cannot access v8 heap profiler";
-    return;
-  }
+  if (!profiler) return Response::Error("Cannot access v8 heap profiler");
   v8::HandleScope scope(
       m_isolate);  // Allocation profile contains Local handles.
   std::unique_ptr<v8::AllocationProfile> v8Profile(
@@ -394,14 +370,13 @@
   profiler->StopSamplingHeapProfiler();
   m_state->setBoolean(HeapProfilerAgentState::samplingHeapProfilerEnabled,
                       false);
-  if (!v8Profile) {
-    *errorString = "Cannot access v8 sampled heap profile.";
-    return;
-  }
+  if (!v8Profile)
+    return Response::Error("Cannot access v8 sampled heap profile.");
   v8::AllocationProfile::Node* root = v8Profile->GetRootNode();
   *profile = protocol::HeapProfiler::SamplingHeapProfile::create()
                  .setHead(buildSampingHeapProfileNode(root))
                  .build();
+  return Response::OK();
 }
 
 }  // namespace v8_inspector
diff --git a/src/inspector/v8-heap-profiler-agent-impl.h b/src/inspector/v8-heap-profiler-agent-impl.h
index caa9698..e0e2447 100644
--- a/src/inspector/v8-heap-profiler-agent-impl.h
+++ b/src/inspector/v8-heap-profiler-agent-impl.h
@@ -15,8 +15,8 @@
 
 class V8InspectorSessionImpl;
 
-using protocol::ErrorString;
 using protocol::Maybe;
+using protocol::Response;
 
 class V8HeapProfilerAgentImpl : public protocol::HeapProfiler::Backend {
  public:
@@ -25,32 +25,26 @@
   ~V8HeapProfilerAgentImpl() override;
   void restore();
 
-  void collectGarbage(ErrorString*) override;
+  Response collectGarbage() override;
 
-  void enable(ErrorString*) override;
-  void startTrackingHeapObjects(ErrorString*,
-                                const Maybe<bool>& trackAllocations) override;
-  void stopTrackingHeapObjects(ErrorString*,
-                               const Maybe<bool>& reportProgress) override;
+  Response enable() override;
+  Response startTrackingHeapObjects(Maybe<bool> trackAllocations) override;
+  Response stopTrackingHeapObjects(Maybe<bool> reportProgress) override;
 
-  void disable(ErrorString*) override;
+  Response disable() override;
 
-  void takeHeapSnapshot(ErrorString*,
-                        const Maybe<bool>& reportProgress) override;
+  Response takeHeapSnapshot(Maybe<bool> reportProgress) override;
 
-  void getObjectByHeapObjectId(
-      ErrorString*, const String16& heapSnapshotObjectId,
-      const Maybe<String16>& objectGroup,
+  Response getObjectByHeapObjectId(
+      const String16& heapSnapshotObjectId, Maybe<String16> objectGroup,
       std::unique_ptr<protocol::Runtime::RemoteObject>* result) override;
-  void addInspectedHeapObject(ErrorString*,
-                              const String16& inspectedHeapObjectId) override;
-  void getHeapObjectId(ErrorString*, const String16& objectId,
-                       String16* heapSnapshotObjectId) override;
+  Response addInspectedHeapObject(
+      const String16& inspectedHeapObjectId) override;
+  Response getHeapObjectId(const String16& objectId,
+                           String16* heapSnapshotObjectId) override;
 
-  void startSampling(ErrorString*,
-                     const Maybe<double>& samplingInterval) override;
-  void stopSampling(
-      ErrorString*,
+  Response startSampling(Maybe<double> samplingInterval) override;
+  Response stopSampling(
       std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfile>*) override;
 
  private:
diff --git a/src/inspector/v8-injected-script-host.cc b/src/inspector/v8-injected-script-host.cc
index dc41ef8..3748ec9 100644
--- a/src/inspector/v8-injected-script-host.cc
+++ b/src/inspector/v8-injected-script-host.cc
@@ -166,12 +166,69 @@
 void V8InjectedScriptHost::getInternalPropertiesCallback(
     const v8::FunctionCallbackInfo<v8::Value>& info) {
   if (info.Length() < 1) return;
-  v8::Local<v8::Array> properties;
-  if (unwrapInspector(info)
-          ->debugger()
-          ->internalProperties(info.GetIsolate()->GetCurrentContext(), info[0])
-          .ToLocal(&properties))
+
+  std::unordered_set<String16> allowedProperties;
+  if (info[0]->IsBooleanObject() || info[0]->IsNumberObject() ||
+      info[0]->IsStringObject() || info[0]->IsSymbolObject()) {
+    allowedProperties.insert(String16("[[PrimitiveValue]]"));
+  } else if (info[0]->IsPromise()) {
+    allowedProperties.insert(String16("[[PromiseStatus]]"));
+    allowedProperties.insert(String16("[[PromiseValue]]"));
+  } else if (info[0]->IsGeneratorObject()) {
+    allowedProperties.insert(String16("[[GeneratorStatus]]"));
+  } else if (info[0]->IsMapIterator() || info[0]->IsSetIterator()) {
+    allowedProperties.insert(String16("[[IteratorHasMore]]"));
+    allowedProperties.insert(String16("[[IteratorIndex]]"));
+    allowedProperties.insert(String16("[[IteratorKind]]"));
+    allowedProperties.insert(String16("[[Entries]]"));
+  } else if (info[0]->IsMap() || info[0]->IsWeakMap() || info[0]->IsSet() ||
+             info[0]->IsWeakSet()) {
+    allowedProperties.insert(String16("[[Entries]]"));
+  }
+  if (!allowedProperties.size()) return;
+
+  v8::Isolate* isolate = info.GetIsolate();
+  v8::Local<v8::Array> allProperties;
+  if (!unwrapInspector(info)
+           ->debugger()
+           ->internalProperties(isolate->GetCurrentContext(), info[0])
+           .ToLocal(&allProperties) ||
+      !allProperties->IsArray() || allProperties->Length() % 2 != 0)
+    return;
+
+  {
+    v8::Local<v8::Context> context = isolate->GetCurrentContext();
+    v8::TryCatch tryCatch(isolate);
+    v8::Isolate::DisallowJavascriptExecutionScope throwJs(
+        isolate,
+        v8::Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE);
+
+    v8::Local<v8::Array> properties = v8::Array::New(isolate);
+    if (tryCatch.HasCaught()) return;
+
+    uint32_t outputIndex = 0;
+    for (uint32_t i = 0; i < allProperties->Length(); i += 2) {
+      v8::Local<v8::Value> key;
+      if (!allProperties->Get(context, i).ToLocal(&key)) continue;
+      if (tryCatch.HasCaught()) {
+        tryCatch.Reset();
+        continue;
+      }
+      String16 keyString = toProtocolStringWithTypeCheck(key);
+      if (keyString.isEmpty() ||
+          allowedProperties.find(keyString) == allowedProperties.end())
+        continue;
+      v8::Local<v8::Value> value;
+      if (!allProperties->Get(context, i + 1).ToLocal(&value)) continue;
+      if (tryCatch.HasCaught()) {
+        tryCatch.Reset();
+        continue;
+      }
+      createDataProperty(context, properties, outputIndex++, key);
+      createDataProperty(context, properties, outputIndex++, value);
+    }
     info.GetReturnValue().Set(properties);
+  }
 }
 
 void V8InjectedScriptHost::objectHasOwnPropertyCallback(
diff --git a/src/inspector/v8-inspector-session-impl.cc b/src/inspector/v8-inspector-session-impl.cc
index c3d3f48..e415575 100644
--- a/src/inspector/v8-inspector-session-impl.cc
+++ b/src/inspector/v8-inspector-session-impl.cc
@@ -104,12 +104,11 @@
 }
 
 V8InspectorSessionImpl::~V8InspectorSessionImpl() {
-  ErrorString errorString;
-  m_consoleAgent->disable(&errorString);
-  m_profilerAgent->disable(&errorString);
-  m_heapProfilerAgent->disable(&errorString);
-  m_debuggerAgent->disable(&errorString);
-  m_runtimeAgent->disable(&errorString);
+  m_consoleAgent->disable();
+  m_profilerAgent->disable();
+  m_heapProfilerAgent->disable();
+  m_debuggerAgent->disable();
+  m_runtimeAgent->disable();
 
   discardInjectedScripts();
   m_inspector->disconnect(this);
@@ -165,42 +164,35 @@
   }
 }
 
-InjectedScript* V8InspectorSessionImpl::findInjectedScript(
-    ErrorString* errorString, int contextId) {
-  if (!contextId) {
-    *errorString = "Cannot find context with specified id";
-    return nullptr;
-  }
+Response V8InspectorSessionImpl::findInjectedScript(
+    int contextId, InjectedScript*& injectedScript) {
+  injectedScript = nullptr;
+  if (!contextId)
+    return Response::Error("Cannot find context with specified id");
 
   const V8InspectorImpl::ContextByIdMap* contexts =
       m_inspector->contextGroup(m_contextGroupId);
-  if (!contexts) {
-    *errorString = "Cannot find context with specified id";
-    return nullptr;
-  }
+  if (!contexts)
+    return Response::Error("Cannot find context with specified id");
 
   auto contextsIt = contexts->find(contextId);
-  if (contextsIt == contexts->end()) {
-    *errorString = "Cannot find context with specified id";
-    return nullptr;
-  }
+  if (contextsIt == contexts->end())
+    return Response::Error("Cannot find context with specified id");
 
   const std::unique_ptr<InspectedContext>& context = contextsIt->second;
   if (!context->getInjectedScript()) {
-    if (!context->createInjectedScript()) {
-      *errorString = "Cannot access specified execution context";
-      return nullptr;
-    }
+    if (!context->createInjectedScript())
+      return Response::Error("Cannot access specified execution context");
     if (m_customObjectFormatterEnabled)
       context->getInjectedScript()->setCustomObjectFormatterEnabled(true);
   }
-  return context->getInjectedScript();
+  injectedScript = context->getInjectedScript();
+  return Response::OK();
 }
 
-InjectedScript* V8InspectorSessionImpl::findInjectedScript(
-    ErrorString* errorString, RemoteObjectIdBase* objectId) {
-  return objectId ? findInjectedScript(errorString, objectId->contextId())
-                  : nullptr;
+Response V8InspectorSessionImpl::findInjectedScript(
+    RemoteObjectIdBase* objectId, InjectedScript*& injectedScript) {
+  return findInjectedScript(objectId->contextId(), injectedScript);
 }
 
 void V8InspectorSessionImpl::releaseObjectGroup(const StringView& objectGroup) {
@@ -230,31 +222,35 @@
     std::unique_ptr<StringBuffer>* error, const StringView& objectId,
     v8::Local<v8::Value>* object, v8::Local<v8::Context>* context,
     std::unique_ptr<StringBuffer>* objectGroup) {
-  ErrorString errorString;
   String16 objectGroupString;
-  bool result =
-      unwrapObject(&errorString, toString16(objectId), object, context,
-                   objectGroup ? &objectGroupString : nullptr);
-  if (error) *error = StringBufferImpl::adopt(errorString);
+  Response response = unwrapObject(toString16(objectId), object, context,
+                                   objectGroup ? &objectGroupString : nullptr);
+  if (!response.isSuccess()) {
+    if (error) {
+      String16 errorMessage = response.errorMessage();
+      *error = StringBufferImpl::adopt(errorMessage);
+    }
+    return false;
+  }
   if (objectGroup) *objectGroup = StringBufferImpl::adopt(objectGroupString);
-  return result;
+  return true;
 }
 
-bool V8InspectorSessionImpl::unwrapObject(ErrorString* errorString,
-                                          const String16& objectId,
-                                          v8::Local<v8::Value>* object,
-                                          v8::Local<v8::Context>* context,
-                                          String16* objectGroup) {
-  std::unique_ptr<RemoteObjectId> remoteId =
-      RemoteObjectId::parse(errorString, objectId);
-  if (!remoteId) return false;
-  InjectedScript* injectedScript =
-      findInjectedScript(errorString, remoteId.get());
-  if (!injectedScript) return false;
-  if (!injectedScript->findObject(errorString, *remoteId, object)) return false;
+Response V8InspectorSessionImpl::unwrapObject(const String16& objectId,
+                                              v8::Local<v8::Value>* object,
+                                              v8::Local<v8::Context>* context,
+                                              String16* objectGroup) {
+  std::unique_ptr<RemoteObjectId> remoteId;
+  Response response = RemoteObjectId::parse(objectId, &remoteId);
+  if (!response.isSuccess()) return response;
+  InjectedScript* injectedScript = nullptr;
+  response = findInjectedScript(remoteId.get(), injectedScript);
+  if (!response.isSuccess()) return response;
+  response = injectedScript->findObject(*remoteId, object);
+  if (!response.isSuccess()) return response;
   *context = injectedScript->context()->context();
   if (objectGroup) *objectGroup = injectedScript->objectGroupName(*remoteId);
-  return true;
+  return Response::OK();
 }
 
 std::unique_ptr<protocol::Runtime::API::RemoteObject>
@@ -269,21 +265,20 @@
                                    v8::Local<v8::Value> value,
                                    const String16& groupName,
                                    bool generatePreview) {
-  ErrorString errorString;
-  InjectedScript* injectedScript =
-      findInjectedScript(&errorString, V8Debugger::contextId(context));
+  InjectedScript* injectedScript = nullptr;
+  findInjectedScript(V8Debugger::contextId(context), injectedScript);
   if (!injectedScript) return nullptr;
-  return injectedScript->wrapObject(&errorString, value, groupName, false,
-                                    generatePreview);
+  std::unique_ptr<protocol::Runtime::RemoteObject> result;
+  injectedScript->wrapObject(value, groupName, false, generatePreview, &result);
+  return result;
 }
 
 std::unique_ptr<protocol::Runtime::RemoteObject>
 V8InspectorSessionImpl::wrapTable(v8::Local<v8::Context> context,
                                   v8::Local<v8::Value> table,
                                   v8::Local<v8::Value> columns) {
-  ErrorString errorString;
-  InjectedScript* injectedScript =
-      findInjectedScript(&errorString, V8Debugger::contextId(context));
+  InjectedScript* injectedScript = nullptr;
+  findInjectedScript(V8Debugger::contextId(context), injectedScript);
   if (!injectedScript) return nullptr;
   return injectedScript->wrapTable(table, columns);
 }
@@ -386,19 +381,12 @@
 }
 
 void V8InspectorSessionImpl::setSkipAllPauses(bool skip) {
-  ErrorString errorString;
-  m_debuggerAgent->setSkipAllPauses(&errorString, skip);
+  m_debuggerAgent->setSkipAllPauses(skip);
 }
 
-void V8InspectorSessionImpl::resume() {
-  ErrorString errorString;
-  m_debuggerAgent->resume(&errorString);
-}
+void V8InspectorSessionImpl::resume() { m_debuggerAgent->resume(); }
 
-void V8InspectorSessionImpl::stepOver() {
-  ErrorString errorString;
-  m_debuggerAgent->stepOver(&errorString);
-}
+void V8InspectorSessionImpl::stepOver() { m_debuggerAgent->stepOver(); }
 
 std::vector<std::unique_ptr<protocol::Debugger::API::SearchMatch>>
 V8InspectorSessionImpl::searchInTextByLines(const StringView& text,
diff --git a/src/inspector/v8-inspector-session-impl.h b/src/inspector/v8-inspector-session-impl.h
index e84e8c9..af65aa3 100644
--- a/src/inspector/v8-inspector-session-impl.h
+++ b/src/inspector/v8-inspector-session-impl.h
@@ -26,7 +26,7 @@
 class V8RuntimeAgentImpl;
 class V8SchemaAgentImpl;
 
-using protocol::ErrorString;
+using protocol::Response;
 
 class V8InspectorSessionImpl : public V8InspectorSession,
                                public protocol::FrontendChannel {
@@ -44,8 +44,8 @@
   V8RuntimeAgentImpl* runtimeAgent() { return m_runtimeAgent.get(); }
   int contextGroupId() const { return m_contextGroupId; }
 
-  InjectedScript* findInjectedScript(ErrorString*, int contextId);
-  InjectedScript* findInjectedScript(ErrorString*, RemoteObjectIdBase*);
+  Response findInjectedScript(int contextId, InjectedScript*&);
+  Response findInjectedScript(RemoteObjectIdBase*, InjectedScript*&);
   void reset();
   void discardInjectedScripts();
   void reportAllContexts(V8RuntimeAgentImpl*);
@@ -57,9 +57,8 @@
       v8::Local<v8::Context>, v8::Local<v8::Value> table,
       v8::Local<v8::Value> columns);
   std::vector<std::unique_ptr<protocol::Schema::Domain>> supportedDomainsImpl();
-  bool unwrapObject(ErrorString*, const String16& objectId,
-                    v8::Local<v8::Value>*, v8::Local<v8::Context>*,
-                    String16* objectGroup);
+  Response unwrapObject(const String16& objectId, v8::Local<v8::Value>*,
+                        v8::Local<v8::Context>*, String16* objectGroup);
   void releaseObjectGroup(const String16& objectGroup);
 
   // V8InspectorSession implementation.
diff --git a/src/inspector/v8-profiler-agent-impl.cc b/src/inspector/v8-profiler-agent-impl.cc
index 0511ca3..8b888a0 100644
--- a/src/inspector/v8-profiler-agent-impl.cc
+++ b/src/inspector/v8-profiler-agent-impl.cc
@@ -201,34 +201,34 @@
                                     resolvedTitle);
 }
 
-void V8ProfilerAgentImpl::enable(ErrorString*) {
-  if (m_enabled) return;
+Response V8ProfilerAgentImpl::enable() {
+  if (m_enabled) return Response::OK();
   m_enabled = true;
   DCHECK(!m_profiler);
   m_profiler = v8::CpuProfiler::New(m_isolate);
   m_state->setBoolean(ProfilerAgentState::profilerEnabled, true);
+  return Response::OK();
 }
 
-void V8ProfilerAgentImpl::disable(ErrorString* errorString) {
-  if (!m_enabled) return;
+Response V8ProfilerAgentImpl::disable() {
+  if (!m_enabled) return Response::OK();
   for (size_t i = m_startedProfiles.size(); i > 0; --i)
     stopProfiling(m_startedProfiles[i - 1].m_id, false);
   m_startedProfiles.clear();
-  stop(nullptr, nullptr);
+  stop(nullptr);
   m_profiler->Dispose();
   m_profiler = nullptr;
   m_enabled = false;
   m_state->setBoolean(ProfilerAgentState::profilerEnabled, false);
+  return Response::OK();
 }
 
-void V8ProfilerAgentImpl::setSamplingInterval(ErrorString* error,
-                                              int interval) {
-  if (m_recordingCPUProfile) {
-    *error = "Cannot change sampling interval when profiling.";
-    return;
-  }
+Response V8ProfilerAgentImpl::setSamplingInterval(int interval) {
+  if (m_recordingCPUProfile)
+    return Response::Error("Cannot change sampling interval when profiling.");
   m_state->setInteger(ProfilerAgentState::samplingInterval, interval);
   m_profiler->SetSamplingInterval(interval);
+  return Response::OK();
 }
 
 void V8ProfilerAgentImpl::restore() {
@@ -243,39 +243,34 @@
   if (interval) m_profiler->SetSamplingInterval(interval);
   if (m_state->booleanProperty(ProfilerAgentState::userInitiatedProfiling,
                                false)) {
-    ErrorString error;
-    start(&error);
+    start();
   }
 }
 
-void V8ProfilerAgentImpl::start(ErrorString* error) {
-  if (m_recordingCPUProfile) return;
-  if (!m_enabled) {
-    *error = "Profiler is not enabled";
-    return;
-  }
+Response V8ProfilerAgentImpl::start() {
+  if (m_recordingCPUProfile) return Response::OK();
+  if (!m_enabled) return Response::Error("Profiler is not enabled");
   m_recordingCPUProfile = true;
   m_frontendInitiatedProfileId = nextProfileId();
   startProfiling(m_frontendInitiatedProfileId);
   m_state->setBoolean(ProfilerAgentState::userInitiatedProfiling, true);
+  return Response::OK();
 }
 
-void V8ProfilerAgentImpl::stop(
-    ErrorString* errorString,
+Response V8ProfilerAgentImpl::stop(
     std::unique_ptr<protocol::Profiler::Profile>* profile) {
-  if (!m_recordingCPUProfile) {
-    if (errorString) *errorString = "No recording profiles found";
-    return;
-  }
+  if (!m_recordingCPUProfile)
+    return Response::Error("No recording profiles found");
   m_recordingCPUProfile = false;
   std::unique_ptr<protocol::Profiler::Profile> cpuProfile =
       stopProfiling(m_frontendInitiatedProfileId, !!profile);
   if (profile) {
     *profile = std::move(cpuProfile);
-    if (!profile->get() && errorString) *errorString = "Profile is not found";
+    if (!profile->get()) return Response::Error("Profile is not found");
   }
   m_frontendInitiatedProfileId = String16();
   m_state->setBoolean(ProfilerAgentState::userInitiatedProfiling, false);
+  return Response::OK();
 }
 
 String16 V8ProfilerAgentImpl::nextProfileId() {
diff --git a/src/inspector/v8-profiler-agent-impl.h b/src/inspector/v8-profiler-agent-impl.h
index ee89976..a634ff3 100644
--- a/src/inspector/v8-profiler-agent-impl.h
+++ b/src/inspector/v8-profiler-agent-impl.h
@@ -20,7 +20,7 @@
 
 class V8InspectorSessionImpl;
 
-using protocol::ErrorString;
+using protocol::Response;
 
 class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
  public:
@@ -31,12 +31,11 @@
   bool enabled() const { return m_enabled; }
   void restore();
 
-  void enable(ErrorString*) override;
-  void disable(ErrorString*) override;
-  void setSamplingInterval(ErrorString*, int) override;
-  void start(ErrorString*) override;
-  void stop(ErrorString*,
-            std::unique_ptr<protocol::Profiler::Profile>*) override;
+  Response enable() override;
+  Response disable() override;
+  Response setSamplingInterval(int) override;
+  Response start() override;
+  Response stop(std::unique_ptr<protocol::Profiler::Profile>*) override;
 
   void consoleProfile(const String16& title);
   void consoleProfileEnd(const String16& title);
diff --git a/src/inspector/v8-runtime-agent-impl.cc b/src/inspector/v8-runtime-agent-impl.cc
index 640ec31..4dbe60f 100644
--- a/src/inspector/v8-runtime-agent-impl.cc
+++ b/src/inspector/v8-runtime-agent-impl.cc
@@ -41,6 +41,7 @@
 #include "src/inspector/v8-inspector-impl.h"
 #include "src/inspector/v8-inspector-session-impl.h"
 #include "src/inspector/v8-stack-trace-impl.h"
+#include "src/tracing/trace-event.h"
 
 #include "include/v8-inspector.h"
 
@@ -54,11 +55,6 @@
 
 using protocol::Runtime::RemoteObject;
 
-static bool hasInternalError(ErrorString* errorString, bool hasError) {
-  if (hasError) *errorString = "Internal error";
-  return hasError;
-}
-
 namespace {
 
 template <typename Callback>
@@ -71,11 +67,11 @@
                   bool returnByValue, bool generatePreview,
                   std::unique_ptr<Callback> callback) {
     if (value.IsEmpty()) {
-      callback->sendFailure("Internal error");
+      callback->sendFailure(Response::InternalError());
       return;
     }
     if (!value.ToLocalChecked()->IsPromise()) {
-      callback->sendFailure(notPromiseError);
+      callback->sendFailure(Response::Error(notPromiseError));
       return;
     }
     v8::MicrotasksScope microtasks_scope(inspector->isolate(),
@@ -93,7 +89,7 @@
                           v8::ConstructorBehavior::kThrow)
             .ToLocalChecked();
     if (promise->Then(context, thenCallbackFunction).IsEmpty()) {
-      rawCallback->sendFailure("Internal error");
+      rawCallback->sendFailure(Response::InternalError());
       return;
     }
     v8::Local<v8::Function> catchCallbackFunction =
@@ -101,7 +97,7 @@
                           v8::ConstructorBehavior::kThrow)
             .ToLocalChecked();
     if (promise->Catch(context, catchCallbackFunction).IsEmpty()) {
-      rawCallback->sendFailure("Internal error");
+      rawCallback->sendFailure(Response::InternalError());
       return;
     }
   }
@@ -179,25 +175,27 @@
       data.GetParameter()->m_wrapper.Reset();
       data.SetSecondPassCallback(cleanup);
     } else {
-      data.GetParameter()->m_callback->sendFailure("Promise was collected");
+      data.GetParameter()->m_callback->sendFailure(
+          Response::Error("Promise was collected"));
       delete data.GetParameter();
     }
   }
 
   std::unique_ptr<protocol::Runtime::RemoteObject> wrapObject(
       v8::Local<v8::Value> value) {
-    ErrorString errorString;
-    InjectedScript::ContextScope scope(&errorString, m_inspector,
-                                       m_contextGroupId, m_executionContextId);
-    if (!scope.initialize()) {
-      m_callback->sendFailure(errorString);
+    InjectedScript::ContextScope scope(m_inspector, m_contextGroupId,
+                                       m_executionContextId);
+    Response response = scope.initialize();
+    if (!response.isSuccess()) {
+      m_callback->sendFailure(response);
       return nullptr;
     }
-    std::unique_ptr<protocol::Runtime::RemoteObject> wrappedValue =
-        scope.injectedScript()->wrapObject(&errorString, value, m_objectGroup,
-                                           m_returnByValue, m_generatePreview);
-    if (!wrappedValue) {
-      m_callback->sendFailure(errorString);
+    std::unique_ptr<protocol::Runtime::RemoteObject> wrappedValue;
+    response = scope.injectedScript()->wrapObject(
+        value, m_objectGroup, m_returnByValue, m_generatePreview,
+        &wrappedValue);
+    if (!response.isSuccess()) {
+      m_callback->sendFailure(response);
       return nullptr;
     }
     return wrappedValue;
@@ -222,34 +220,30 @@
   std::unique_ptr<RemoteObject> result;
   Maybe<protocol::Runtime::ExceptionDetails> exceptionDetails;
 
-  ErrorString errorString;
-  injectedScript->wrapEvaluateResult(
-      &errorString, maybeResultValue, tryCatch, objectGroup, returnByValue,
-      generatePreview, &result, &exceptionDetails);
-  if (errorString.isEmpty()) {
-    callback->sendSuccess(std::move(result), exceptionDetails);
+  Response response = injectedScript->wrapEvaluateResult(
+      maybeResultValue, tryCatch, objectGroup, returnByValue, generatePreview,
+      &result, &exceptionDetails);
+  if (response.isSuccess()) {
+    callback->sendSuccess(std::move(result), std::move(exceptionDetails));
     return true;
   }
-  callback->sendFailure(errorString);
+  callback->sendFailure(response);
   return false;
 }
 
-int ensureContext(ErrorString* errorString, V8InspectorImpl* inspector,
-                  int contextGroupId, const Maybe<int>& executionContextId) {
-  int contextId;
+Response ensureContext(V8InspectorImpl* inspector, int contextGroupId,
+                       Maybe<int> executionContextId, int* contextId) {
   if (executionContextId.isJust()) {
-    contextId = executionContextId.fromJust();
+    *contextId = executionContextId.fromJust();
   } else {
     v8::HandleScope handles(inspector->isolate());
     v8::Local<v8::Context> defaultContext =
         inspector->client()->ensureDefaultContextInGroup(contextGroupId);
-    if (defaultContext.IsEmpty()) {
-      *errorString = "Cannot find default execution context";
-      return 0;
-    }
-    contextId = V8Debugger::contextId(defaultContext);
+    if (defaultContext.IsEmpty())
+      return Response::Error("Cannot find default execution context");
+    *contextId = V8Debugger::contextId(defaultContext);
   }
-  return contextId;
+  return Response::OK();
 }
 
 }  // namespace
@@ -266,36 +260,33 @@
 V8RuntimeAgentImpl::~V8RuntimeAgentImpl() {}
 
 void V8RuntimeAgentImpl::evaluate(
-    const String16& expression, const Maybe<String16>& objectGroup,
-    const Maybe<bool>& includeCommandLineAPI, const Maybe<bool>& silent,
-    const Maybe<int>& executionContextId, const Maybe<bool>& returnByValue,
-    const Maybe<bool>& generatePreview, const Maybe<bool>& userGesture,
-    const Maybe<bool>& awaitPromise,
-    std::unique_ptr<EvaluateCallback> callback) {
-  ErrorString errorString;
-  int contextId =
-      ensureContext(&errorString, m_inspector, m_session->contextGroupId(),
-                    executionContextId);
-  if (!errorString.isEmpty()) {
-    callback->sendFailure(errorString);
+    const String16& expression, Maybe<String16> objectGroup,
+    Maybe<bool> includeCommandLineAPI, Maybe<bool> silent,
+    Maybe<int> executionContextId, Maybe<bool> returnByValue,
+    Maybe<bool> generatePreview, Maybe<bool> userGesture,
+    Maybe<bool> awaitPromise, std::unique_ptr<EvaluateCallback> callback) {
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
+               "EvaluateScript");
+  int contextId = 0;
+  Response response = ensureContext(m_inspector, m_session->contextGroupId(),
+                                    std::move(executionContextId), &contextId);
+  if (!response.isSuccess()) {
+    callback->sendFailure(response);
     return;
   }
 
-  InjectedScript::ContextScope scope(&errorString, m_inspector,
-                                     m_session->contextGroupId(), contextId);
-  if (!scope.initialize()) {
-    callback->sendFailure(errorString);
+  InjectedScript::ContextScope scope(m_inspector, m_session->contextGroupId(),
+                                     contextId);
+  response = scope.initialize();
+  if (!response.isSuccess()) {
+    callback->sendFailure(response);
     return;
   }
 
   if (silent.fromMaybe(false)) scope.ignoreExceptionsAndMuteConsole();
   if (userGesture.fromMaybe(false)) scope.pretendUserGesture();
 
-  if (includeCommandLineAPI.fromMaybe(false) &&
-      !scope.installCommandLineAPI()) {
-    callback->sendFailure(errorString);
-    return;
-  }
+  if (includeCommandLineAPI.fromMaybe(false)) scope.installCommandLineAPI();
 
   bool evalIsDisabled = !scope.context()->IsCodeGenerationFromStringsAllowed();
   // Temporarily enable allow evals for inspector.
@@ -312,8 +303,9 @@
 
   // Re-initialize after running client's code, as it could have destroyed
   // context or session.
-  if (!scope.initialize()) {
-    callback->sendFailure(errorString);
+  response = scope.initialize();
+  if (!response.isSuccess()) {
+    callback->sendFailure(response);
     return;
   }
 
@@ -333,14 +325,14 @@
 }
 
 void V8RuntimeAgentImpl::awaitPromise(
-    const String16& promiseObjectId, const Maybe<bool>& returnByValue,
-    const Maybe<bool>& generatePreview,
+    const String16& promiseObjectId, Maybe<bool> returnByValue,
+    Maybe<bool> generatePreview,
     std::unique_ptr<AwaitPromiseCallback> callback) {
-  ErrorString errorString;
-  InjectedScript::ObjectScope scope(
-      &errorString, m_inspector, m_session->contextGroupId(), promiseObjectId);
-  if (!scope.initialize()) {
-    callback->sendFailure(errorString);
+  InjectedScript::ObjectScope scope(m_inspector, m_session->contextGroupId(),
+                                    promiseObjectId);
+  Response response = scope.initialize();
+  if (!response.isSuccess()) {
+    callback->sendFailure(response);
     return;
   }
   ProtocolPromiseHandler<AwaitPromiseCallback>::add(
@@ -353,17 +345,15 @@
 
 void V8RuntimeAgentImpl::callFunctionOn(
     const String16& objectId, const String16& expression,
-    const Maybe<protocol::Array<protocol::Runtime::CallArgument>>&
-        optionalArguments,
-    const Maybe<bool>& silent, const Maybe<bool>& returnByValue,
-    const Maybe<bool>& generatePreview, const Maybe<bool>& userGesture,
-    const Maybe<bool>& awaitPromise,
+    Maybe<protocol::Array<protocol::Runtime::CallArgument>> optionalArguments,
+    Maybe<bool> silent, Maybe<bool> returnByValue, Maybe<bool> generatePreview,
+    Maybe<bool> userGesture, Maybe<bool> awaitPromise,
     std::unique_ptr<CallFunctionOnCallback> callback) {
-  ErrorString errorString;
-  InjectedScript::ObjectScope scope(&errorString, m_inspector,
-                                    m_session->contextGroupId(), objectId);
-  if (!scope.initialize()) {
-    callback->sendFailure(errorString);
+  InjectedScript::ObjectScope scope(m_inspector, m_session->contextGroupId(),
+                                    objectId);
+  Response response = scope.initialize();
+  if (!response.isSuccess()) {
+    callback->sendFailure(response);
     return;
   }
 
@@ -376,10 +366,10 @@
     argv.reset(new v8::Local<v8::Value>[argc]);
     for (int i = 0; i < argc; ++i) {
       v8::Local<v8::Value> argumentValue;
-      if (!scope.injectedScript()
-               ->resolveCallArgument(&errorString, arguments->get(i))
-               .ToLocal(&argumentValue)) {
-        callback->sendFailure(errorString);
+      response = scope.injectedScript()->resolveCallArgument(arguments->get(i),
+                                                             &argumentValue);
+      if (!response.isSuccess()) {
+        callback->sendFailure(response);
         return;
       }
       argv[i] = argumentValue;
@@ -395,8 +385,9 @@
           toV8String(m_inspector->isolate(), "(" + expression + ")"));
   // Re-initialize after running client's code, as it could have destroyed
   // context or session.
-  if (!scope.initialize()) {
-    callback->sendFailure(errorString);
+  response = scope.initialize();
+  if (!response.isSuccess()) {
+    callback->sendFailure(response);
     return;
   }
 
@@ -410,7 +401,8 @@
   v8::Local<v8::Value> functionValue;
   if (!maybeFunctionValue.ToLocal(&functionValue) ||
       !functionValue->IsFunction()) {
-    callback->sendFailure("Given expression does not evaluate to a function");
+    callback->sendFailure(
+        Response::Error("Given expression does not evaluate to a function"));
     return;
   }
 
@@ -419,8 +411,9 @@
       argv.get());
   // Re-initialize after running client's code, as it could have destroyed
   // context or session.
-  if (!scope.initialize()) {
-    callback->sendFailure(errorString);
+  response = scope.initialize();
+  if (!response.isSuccess()) {
+    callback->sendFailure(response);
     return;
   }
 
@@ -441,10 +434,9 @@
       std::move(callback));
 }
 
-void V8RuntimeAgentImpl::getProperties(
-    ErrorString* errorString, const String16& objectId,
-    const Maybe<bool>& ownProperties, const Maybe<bool>& accessorPropertiesOnly,
-    const Maybe<bool>& generatePreview,
+Response V8RuntimeAgentImpl::getProperties(
+    const String16& objectId, Maybe<bool> ownProperties,
+    Maybe<bool> accessorPropertiesOnly, Maybe<bool> generatePreview,
     std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
         result,
     Maybe<protocol::Array<protocol::Runtime::InternalPropertyDescriptor>>*
@@ -452,105 +444,103 @@
     Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
   using protocol::Runtime::InternalPropertyDescriptor;
 
-  InjectedScript::ObjectScope scope(errorString, m_inspector,
-                                    m_session->contextGroupId(), objectId);
-  if (!scope.initialize()) return;
+  InjectedScript::ObjectScope scope(m_inspector, m_session->contextGroupId(),
+                                    objectId);
+  Response response = scope.initialize();
+  if (!response.isSuccess()) return response;
 
   scope.ignoreExceptionsAndMuteConsole();
-  if (!scope.object()->IsObject()) {
-    *errorString = "Value with given id is not an object";
-    return;
-  }
+  if (!scope.object()->IsObject())
+    return Response::Error("Value with given id is not an object");
 
   v8::Local<v8::Object> object = scope.object().As<v8::Object>();
-  scope.injectedScript()->getProperties(
-      errorString, object, scope.objectGroupName(),
-      ownProperties.fromMaybe(false), accessorPropertiesOnly.fromMaybe(false),
-      generatePreview.fromMaybe(false), result, exceptionDetails);
-  if (!errorString->isEmpty() || exceptionDetails->isJust() ||
-      accessorPropertiesOnly.fromMaybe(false))
-    return;
+  response = scope.injectedScript()->getProperties(
+      object, scope.objectGroupName(), ownProperties.fromMaybe(false),
+      accessorPropertiesOnly.fromMaybe(false), generatePreview.fromMaybe(false),
+      result, exceptionDetails);
+  if (!response.isSuccess()) return response;
+  if (exceptionDetails->isJust() || accessorPropertiesOnly.fromMaybe(false))
+    return Response::OK();
   v8::Local<v8::Array> propertiesArray;
-  if (hasInternalError(errorString, !m_inspector->debugger()
-                                         ->internalProperties(scope.context(),
-                                                              scope.object())
-                                         .ToLocal(&propertiesArray)))
-    return;
+  if (!m_inspector->debugger()
+           ->internalProperties(scope.context(), scope.object())
+           .ToLocal(&propertiesArray)) {
+    return Response::InternalError();
+  }
   std::unique_ptr<protocol::Array<InternalPropertyDescriptor>>
       propertiesProtocolArray =
           protocol::Array<InternalPropertyDescriptor>::create();
   for (uint32_t i = 0; i < propertiesArray->Length(); i += 2) {
     v8::Local<v8::Value> name;
-    if (hasInternalError(
-            errorString,
-            !propertiesArray->Get(scope.context(), i).ToLocal(&name)) ||
-        !name->IsString())
-      return;
+    if (!propertiesArray->Get(scope.context(), i).ToLocal(&name) ||
+        !name->IsString()) {
+      return Response::InternalError();
+    }
     v8::Local<v8::Value> value;
-    if (hasInternalError(
-            errorString,
-            !propertiesArray->Get(scope.context(), i + 1).ToLocal(&value)))
-      return;
-    std::unique_ptr<RemoteObject> wrappedValue =
-        scope.injectedScript()->wrapObject(errorString, value,
-                                           scope.objectGroupName());
-    if (!wrappedValue) return;
+    if (!propertiesArray->Get(scope.context(), i + 1).ToLocal(&value))
+      return Response::InternalError();
+    std::unique_ptr<RemoteObject> wrappedValue;
+    protocol::Response response = scope.injectedScript()->wrapObject(
+        value, scope.objectGroupName(), false, false, &wrappedValue);
+    if (!response.isSuccess()) return response;
     propertiesProtocolArray->addItem(
         InternalPropertyDescriptor::create()
             .setName(toProtocolString(name.As<v8::String>()))
             .setValue(std::move(wrappedValue))
             .build());
   }
-  if (!propertiesProtocolArray->length()) return;
-  *internalProperties = std::move(propertiesProtocolArray);
+  if (propertiesProtocolArray->length())
+    *internalProperties = std::move(propertiesProtocolArray);
+  return Response::OK();
 }
 
-void V8RuntimeAgentImpl::releaseObject(ErrorString* errorString,
-                                       const String16& objectId) {
-  InjectedScript::ObjectScope scope(errorString, m_inspector,
-                                    m_session->contextGroupId(), objectId);
-  if (!scope.initialize()) return;
+Response V8RuntimeAgentImpl::releaseObject(const String16& objectId) {
+  InjectedScript::ObjectScope scope(m_inspector, m_session->contextGroupId(),
+                                    objectId);
+  Response response = scope.initialize();
+  if (!response.isSuccess()) return response;
   scope.injectedScript()->releaseObject(objectId);
+  return Response::OK();
 }
 
-void V8RuntimeAgentImpl::releaseObjectGroup(ErrorString*,
-                                            const String16& objectGroup) {
+Response V8RuntimeAgentImpl::releaseObjectGroup(const String16& objectGroup) {
   m_session->releaseObjectGroup(objectGroup);
+  return Response::OK();
 }
 
-void V8RuntimeAgentImpl::runIfWaitingForDebugger(ErrorString* errorString) {
+Response V8RuntimeAgentImpl::runIfWaitingForDebugger() {
   m_inspector->client()->runIfWaitingForDebugger(m_session->contextGroupId());
+  return Response::OK();
 }
 
-void V8RuntimeAgentImpl::setCustomObjectFormatterEnabled(ErrorString*,
-                                                         bool enabled) {
+Response V8RuntimeAgentImpl::setCustomObjectFormatterEnabled(bool enabled) {
   m_state->setBoolean(V8RuntimeAgentImplState::customObjectFormatterEnabled,
                       enabled);
   m_session->setCustomObjectFormatterEnabled(enabled);
+  return Response::OK();
 }
 
-void V8RuntimeAgentImpl::discardConsoleEntries(ErrorString*) {
+Response V8RuntimeAgentImpl::discardConsoleEntries() {
   V8ConsoleMessageStorage* storage =
       m_inspector->ensureConsoleMessageStorage(m_session->contextGroupId());
   storage->clear();
+  return Response::OK();
 }
 
-void V8RuntimeAgentImpl::compileScript(
-    ErrorString* errorString, const String16& expression,
-    const String16& sourceURL, bool persistScript,
-    const Maybe<int>& executionContextId, Maybe<String16>* scriptId,
+Response V8RuntimeAgentImpl::compileScript(
+    const String16& expression, const String16& sourceURL, bool persistScript,
+    Maybe<int> executionContextId, Maybe<String16>* scriptId,
     Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
-  if (!m_enabled) {
-    *errorString = "Runtime agent is not enabled";
-    return;
-  }
-  int contextId =
-      ensureContext(errorString, m_inspector, m_session->contextGroupId(),
-                    executionContextId);
-  if (!errorString->isEmpty()) return;
-  InjectedScript::ContextScope scope(errorString, m_inspector,
-                                     m_session->contextGroupId(), contextId);
-  if (!scope.initialize()) return;
+  if (!m_enabled) return Response::Error("Runtime agent is not enabled");
+
+  int contextId = 0;
+  Response response = ensureContext(m_inspector, m_session->contextGroupId(),
+                                    std::move(executionContextId), &contextId);
+  if (!response.isSuccess()) return response;
+  InjectedScript::ContextScope scope(m_inspector, m_session->contextGroupId(),
+                                     contextId);
+  response = scope.initialize();
+  if (!response.isSuccess()) return response;
 
   if (!persistScript) m_inspector->debugger()->muteScriptParsedEvents();
   v8::Local<v8::Script> script = m_inspector->compileScript(
@@ -558,15 +548,17 @@
       sourceURL, false);
   if (!persistScript) m_inspector->debugger()->unmuteScriptParsedEvents();
   if (script.IsEmpty()) {
-    if (scope.tryCatch().HasCaught())
-      *exceptionDetails = scope.injectedScript()->createExceptionDetails(
-          errorString, scope.tryCatch(), String16(), false);
-    else
-      *errorString = "Script compilation failed";
-    return;
+    if (scope.tryCatch().HasCaught()) {
+      response = scope.injectedScript()->createExceptionDetails(
+          scope.tryCatch(), String16(), false, exceptionDetails);
+      if (!response.isSuccess()) return response;
+      return Response::OK();
+    } else {
+      return Response::Error("Script compilation failed");
+    }
   }
 
-  if (!persistScript) return;
+  if (!persistScript) return Response::OK();
 
   String16 scriptValueId =
       String16::fromInteger(script->GetUnboundScript()->GetId());
@@ -574,38 +566,39 @@
       new v8::Global<v8::Script>(m_inspector->isolate(), script));
   m_compiledScripts[scriptValueId] = std::move(global);
   *scriptId = scriptValueId;
+  return Response::OK();
 }
 
 void V8RuntimeAgentImpl::runScript(
-    const String16& scriptId, const Maybe<int>& executionContextId,
-    const Maybe<String16>& objectGroup, const Maybe<bool>& silent,
-    const Maybe<bool>& includeCommandLineAPI, const Maybe<bool>& returnByValue,
-    const Maybe<bool>& generatePreview, const Maybe<bool>& awaitPromise,
+    const String16& scriptId, Maybe<int> executionContextId,
+    Maybe<String16> objectGroup, Maybe<bool> silent,
+    Maybe<bool> includeCommandLineAPI, Maybe<bool> returnByValue,
+    Maybe<bool> generatePreview, Maybe<bool> awaitPromise,
     std::unique_ptr<RunScriptCallback> callback) {
   if (!m_enabled) {
-    callback->sendFailure("Runtime agent is not enabled");
+    callback->sendFailure(Response::Error("Runtime agent is not enabled"));
     return;
   }
 
   auto it = m_compiledScripts.find(scriptId);
   if (it == m_compiledScripts.end()) {
-    callback->sendFailure("No script with given id");
+    callback->sendFailure(Response::Error("No script with given id"));
     return;
   }
 
-  ErrorString errorString;
-  int contextId =
-      ensureContext(&errorString, m_inspector, m_session->contextGroupId(),
-                    executionContextId);
-  if (!errorString.isEmpty()) {
-    callback->sendFailure(errorString);
+  int contextId = 0;
+  Response response = ensureContext(m_inspector, m_session->contextGroupId(),
+                                    std::move(executionContextId), &contextId);
+  if (!response.isSuccess()) {
+    callback->sendFailure(response);
     return;
   }
 
-  InjectedScript::ContextScope scope(&errorString, m_inspector,
-                                     m_session->contextGroupId(), contextId);
-  if (!scope.initialize()) {
-    callback->sendFailure(errorString);
+  InjectedScript::ContextScope scope(m_inspector, m_session->contextGroupId(),
+                                     contextId);
+  response = scope.initialize();
+  if (!response.isSuccess()) {
+    callback->sendFailure(response);
     return;
   }
 
@@ -615,19 +608,22 @@
   m_compiledScripts.erase(it);
   v8::Local<v8::Script> script = scriptWrapper->Get(m_inspector->isolate());
   if (script.IsEmpty()) {
-    callback->sendFailure("Script execution failed");
+    callback->sendFailure(Response::Error("Script execution failed"));
     return;
   }
 
-  if (includeCommandLineAPI.fromMaybe(false) && !scope.installCommandLineAPI())
-    return;
+  if (includeCommandLineAPI.fromMaybe(false)) scope.installCommandLineAPI();
 
   v8::MaybeLocal<v8::Value> maybeResultValue =
       m_inspector->runCompiledScript(scope.context(), script);
 
   // Re-initialize after running client's code, as it could have destroyed
   // context or session.
-  if (!scope.initialize()) return;
+  response = scope.initialize();
+  if (!response.isSuccess()) {
+    callback->sendFailure(response);
+    return;
+  }
 
   if (!awaitPromise.fromMaybe(false) || scope.tryCatch().HasCaught()) {
     wrapEvaluateResultAsync(scope.injectedScript(), maybeResultValue,
@@ -649,15 +645,14 @@
   if (!m_state->booleanProperty(V8RuntimeAgentImplState::runtimeEnabled, false))
     return;
   m_frontend.executionContextsCleared();
-  ErrorString error;
-  enable(&error);
+  enable();
   if (m_state->booleanProperty(
           V8RuntimeAgentImplState::customObjectFormatterEnabled, false))
     m_session->setCustomObjectFormatterEnabled(true);
 }
 
-void V8RuntimeAgentImpl::enable(ErrorString* errorString) {
-  if (m_enabled) return;
+Response V8RuntimeAgentImpl::enable() {
+  if (m_enabled) return Response::OK();
   m_inspector->client()->beginEnsureAllContextsInGroup(
       m_session->contextGroupId());
   m_enabled = true;
@@ -667,12 +662,13 @@
   V8ConsoleMessageStorage* storage =
       m_inspector->ensureConsoleMessageStorage(m_session->contextGroupId());
   for (const auto& message : storage->messages()) {
-    if (!reportMessage(message.get(), false)) return;
+    if (!reportMessage(message.get(), false)) break;
   }
+  return Response::OK();
 }
 
-void V8RuntimeAgentImpl::disable(ErrorString* errorString) {
-  if (!m_enabled) return;
+Response V8RuntimeAgentImpl::disable() {
+  if (!m_enabled) return Response::OK();
   m_enabled = false;
   m_state->setBoolean(V8RuntimeAgentImplState::runtimeEnabled, false);
   m_inspector->disableStackCapturingIfNeeded();
@@ -680,6 +676,7 @@
   reset();
   m_inspector->client()->endEnsureAllContextsInGroup(
       m_session->contextGroupId());
+  return Response::OK();
 }
 
 void V8RuntimeAgentImpl::reset() {
diff --git a/src/inspector/v8-runtime-agent-impl.h b/src/inspector/v8-runtime-agent-impl.h
index edeeed4..9caa1fb 100644
--- a/src/inspector/v8-runtime-agent-impl.h
+++ b/src/inspector/v8-runtime-agent-impl.h
@@ -46,7 +46,7 @@
 class V8InspectorImpl;
 class V8InspectorSessionImpl;
 
-using protocol::ErrorString;
+using protocol::Response;
 using protocol::Maybe;
 
 class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
@@ -57,51 +57,45 @@
   void restore();
 
   // Part of the protocol.
-  void enable(ErrorString*) override;
-  void disable(ErrorString*) override;
-  void evaluate(const String16& expression, const Maybe<String16>& objectGroup,
-                const Maybe<bool>& includeCommandLineAPI,
-                const Maybe<bool>& silent, const Maybe<int>& executionContextId,
-                const Maybe<bool>& returnByValue,
-                const Maybe<bool>& generatePreview,
-                const Maybe<bool>& userGesture, const Maybe<bool>& awaitPromise,
+  Response enable() override;
+  Response disable() override;
+  void evaluate(const String16& expression, Maybe<String16> objectGroup,
+                Maybe<bool> includeCommandLineAPI, Maybe<bool> silent,
+                Maybe<int> executionContextId, Maybe<bool> returnByValue,
+                Maybe<bool> generatePreview, Maybe<bool> userGesture,
+                Maybe<bool> awaitPromise,
                 std::unique_ptr<EvaluateCallback>) override;
-  void awaitPromise(const String16& promiseObjectId,
-                    const Maybe<bool>& returnByValue,
-                    const Maybe<bool>& generatePreview,
+  void awaitPromise(const String16& promiseObjectId, Maybe<bool> returnByValue,
+                    Maybe<bool> generatePreview,
                     std::unique_ptr<AwaitPromiseCallback>) override;
   void callFunctionOn(
       const String16& objectId, const String16& expression,
-      const Maybe<protocol::Array<protocol::Runtime::CallArgument>>&
-          optionalArguments,
-      const Maybe<bool>& silent, const Maybe<bool>& returnByValue,
-      const Maybe<bool>& generatePreview, const Maybe<bool>& userGesture,
-      const Maybe<bool>& awaitPromise,
+      Maybe<protocol::Array<protocol::Runtime::CallArgument>> optionalArguments,
+      Maybe<bool> silent, Maybe<bool> returnByValue,
+      Maybe<bool> generatePreview, Maybe<bool> userGesture,
+      Maybe<bool> awaitPromise,
       std::unique_ptr<CallFunctionOnCallback>) override;
-  void releaseObject(ErrorString*, const String16& objectId) override;
-  void getProperties(
-      ErrorString*, const String16& objectId, const Maybe<bool>& ownProperties,
-      const Maybe<bool>& accessorPropertiesOnly,
-      const Maybe<bool>& generatePreview,
+  Response releaseObject(const String16& objectId) override;
+  Response getProperties(
+      const String16& objectId, Maybe<bool> ownProperties,
+      Maybe<bool> accessorPropertiesOnly, Maybe<bool> generatePreview,
       std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
           result,
       Maybe<protocol::Array<protocol::Runtime::InternalPropertyDescriptor>>*
           internalProperties,
       Maybe<protocol::Runtime::ExceptionDetails>*) override;
-  void releaseObjectGroup(ErrorString*, const String16& objectGroup) override;
-  void runIfWaitingForDebugger(ErrorString*) override;
-  void setCustomObjectFormatterEnabled(ErrorString*, bool) override;
-  void discardConsoleEntries(ErrorString*) override;
-  void compileScript(ErrorString*, const String16& expression,
-                     const String16& sourceURL, bool persistScript,
-                     const Maybe<int>& executionContextId, Maybe<String16>*,
-                     Maybe<protocol::Runtime::ExceptionDetails>*) override;
-  void runScript(const String16&, const Maybe<int>& executionContextId,
-                 const Maybe<String16>& objectGroup, const Maybe<bool>& silent,
-                 const Maybe<bool>& includeCommandLineAPI,
-                 const Maybe<bool>& returnByValue,
-                 const Maybe<bool>& generatePreview,
-                 const Maybe<bool>& awaitPromise,
+  Response releaseObjectGroup(const String16& objectGroup) override;
+  Response runIfWaitingForDebugger() override;
+  Response setCustomObjectFormatterEnabled(bool) override;
+  Response discardConsoleEntries() override;
+  Response compileScript(const String16& expression, const String16& sourceURL,
+                         bool persistScript, Maybe<int> executionContextId,
+                         Maybe<String16>*,
+                         Maybe<protocol::Runtime::ExceptionDetails>*) override;
+  void runScript(const String16&, Maybe<int> executionContextId,
+                 Maybe<String16> objectGroup, Maybe<bool> silent,
+                 Maybe<bool> includeCommandLineAPI, Maybe<bool> returnByValue,
+                 Maybe<bool> generatePreview, Maybe<bool> awaitPromise,
                  std::unique_ptr<RunScriptCallback>) override;
 
   void reset();
diff --git a/src/inspector/v8-schema-agent-impl.cc b/src/inspector/v8-schema-agent-impl.cc
index 9eed5bd..d7b6cdc 100644
--- a/src/inspector/v8-schema-agent-impl.cc
+++ b/src/inspector/v8-schema-agent-impl.cc
@@ -16,14 +16,14 @@
 
 V8SchemaAgentImpl::~V8SchemaAgentImpl() {}
 
-void V8SchemaAgentImpl::getDomains(
-    ErrorString*,
+Response V8SchemaAgentImpl::getDomains(
     std::unique_ptr<protocol::Array<protocol::Schema::Domain>>* result) {
   std::vector<std::unique_ptr<protocol::Schema::Domain>> domains =
       m_session->supportedDomainsImpl();
   *result = protocol::Array<protocol::Schema::Domain>::create();
   for (size_t i = 0; i < domains.size(); ++i)
     (*result)->addItem(std::move(domains[i]));
+  return Response::OK();
 }
 
 }  // namespace v8_inspector
diff --git a/src/inspector/v8-schema-agent-impl.h b/src/inspector/v8-schema-agent-impl.h
index 6150201..e733aa0 100644
--- a/src/inspector/v8-schema-agent-impl.h
+++ b/src/inspector/v8-schema-agent-impl.h
@@ -13,7 +13,7 @@
 
 class V8InspectorSessionImpl;
 
-using protocol::ErrorString;
+using protocol::Response;
 
 class V8SchemaAgentImpl : public protocol::Schema::Backend {
  public:
@@ -21,8 +21,7 @@
                     protocol::DictionaryValue* state);
   ~V8SchemaAgentImpl() override;
 
-  void getDomains(
-      ErrorString*,
+  Response getDomains(
       std::unique_ptr<protocol::Array<protocol::Schema::Domain>>*) override;
 
  private:
diff --git a/src/inspector/v8-value-copier.cc b/src/inspector/v8-value-copier.cc
index 09d86b7..fcaeb61 100644
--- a/src/inspector/v8-value-copier.cc
+++ b/src/inspector/v8-value-copier.cc
@@ -73,6 +73,96 @@
   int m_calls;
 };
 
+protocol::Response toProtocolValue(v8::Local<v8::Context> context,
+                                   v8::Local<v8::Value> value, int maxDepth,
+                                   std::unique_ptr<protocol::Value>* result) {
+  using protocol::Response;
+  if (value.IsEmpty()) {
+    UNREACHABLE();
+    return Response::InternalError();
+  }
+
+  if (!maxDepth) return Response::Error("Object reference chain is too long");
+  maxDepth--;
+
+  if (value->IsNull() || value->IsUndefined()) {
+    *result = protocol::Value::null();
+    return Response::OK();
+  }
+  if (value->IsBoolean()) {
+    *result =
+        protocol::FundamentalValue::create(value.As<v8::Boolean>()->Value());
+    return Response::OK();
+  }
+  if (value->IsNumber()) {
+    double doubleValue = value.As<v8::Number>()->Value();
+    int intValue = static_cast<int>(doubleValue);
+    if (intValue == doubleValue) {
+      *result = protocol::FundamentalValue::create(intValue);
+      return Response::OK();
+    }
+    *result = protocol::FundamentalValue::create(doubleValue);
+    return Response::OK();
+  }
+  if (value->IsString()) {
+    *result =
+        protocol::StringValue::create(toProtocolString(value.As<v8::String>()));
+    return Response::OK();
+  }
+  if (value->IsArray()) {
+    v8::Local<v8::Array> array = value.As<v8::Array>();
+    std::unique_ptr<protocol::ListValue> inspectorArray =
+        protocol::ListValue::create();
+    uint32_t length = array->Length();
+    for (uint32_t i = 0; i < length; i++) {
+      v8::Local<v8::Value> value;
+      if (!array->Get(context, i).ToLocal(&value))
+        return Response::InternalError();
+      std::unique_ptr<protocol::Value> element;
+      Response response = toProtocolValue(context, value, maxDepth, &element);
+      if (!response.isSuccess()) return response;
+      inspectorArray->pushValue(std::move(element));
+    }
+    *result = std::move(inspectorArray);
+    return Response::OK();
+  }
+  if (value->IsObject()) {
+    std::unique_ptr<protocol::DictionaryValue> jsonObject =
+        protocol::DictionaryValue::create();
+    v8::Local<v8::Object> object = v8::Local<v8::Object>::Cast(value);
+    v8::Local<v8::Array> propertyNames;
+    if (!object->GetPropertyNames(context).ToLocal(&propertyNames))
+      return Response::InternalError();
+    uint32_t length = propertyNames->Length();
+    for (uint32_t i = 0; i < length; i++) {
+      v8::Local<v8::Value> name;
+      if (!propertyNames->Get(context, i).ToLocal(&name))
+        return Response::InternalError();
+      // FIXME(yurys): v8::Object should support GetOwnPropertyNames
+      if (name->IsString()) {
+        v8::Maybe<bool> hasRealNamedProperty = object->HasRealNamedProperty(
+            context, v8::Local<v8::String>::Cast(name));
+        if (!hasRealNamedProperty.IsJust() || !hasRealNamedProperty.FromJust())
+          continue;
+      }
+      v8::Local<v8::String> propertyName;
+      if (!name->ToString(context).ToLocal(&propertyName)) continue;
+      v8::Local<v8::Value> property;
+      if (!object->Get(context, name).ToLocal(&property))
+        return Response::InternalError();
+      std::unique_ptr<protocol::Value> propertyValue;
+      Response response =
+          toProtocolValue(context, property, maxDepth, &propertyValue);
+      if (!response.isSuccess()) return response;
+      jsonObject->setValue(toProtocolString(propertyName),
+                           std::move(propertyValue));
+    }
+    *result = std::move(jsonObject);
+    return Response::OK();
+  }
+  return Response::Error("Object couldn't be returned by value");
+}
+
 }  // namespace
 
 v8::MaybeLocal<v8::Value> copyValueFromDebuggerContext(
@@ -107,4 +197,10 @@
   return array->CreateDataProperty(context, index, value);
 }
 
+protocol::Response toProtocolValue(v8::Local<v8::Context> context,
+                                   v8::Local<v8::Value> value,
+                                   std::unique_ptr<protocol::Value>* result) {
+  return toProtocolValue(context, value, 1000, result);
+}
+
 }  // namespace v8_inspector
diff --git a/src/inspector/v8-value-copier.h b/src/inspector/v8-value-copier.h
index c24a564..ee887e5 100644
--- a/src/inspector/v8-value-copier.h
+++ b/src/inspector/v8-value-copier.h
@@ -5,6 +5,8 @@
 #ifndef V8_INSPECTOR_V8VALUECOPIER_H_
 #define V8_INSPECTOR_V8VALUECOPIER_H_
 
+#include "src/inspector/protocol/Protocol.h"
+
 #include "include/v8.h"
 
 namespace v8_inspector {
@@ -19,6 +21,9 @@
 v8::Maybe<bool> createDataProperty(v8::Local<v8::Context>, v8::Local<v8::Array>,
                                    int index, v8::Local<v8::Value>);
 
+protocol::Response toProtocolValue(v8::Local<v8::Context>, v8::Local<v8::Value>,
+                                   std::unique_ptr<protocol::Value>* result);
+
 }  // namespace v8_inspector
 
 #endif  // V8_INSPECTOR_V8VALUECOPIER_H_
diff --git a/src/interface-descriptors.cc b/src/interface-descriptors.cc
index 2628b9f..d14b1a1 100644
--- a/src/interface-descriptors.cc
+++ b/src/interface-descriptors.cc
@@ -183,22 +183,6 @@
   data->InitializePlatformSpecific(len, registers);
 }
 
-void StoreGlobalViaContextDescriptor::InitializePlatformIndependent(
-    CallInterfaceDescriptorData* data) {
-  // kSlot, kValue
-  MachineType machine_types[] = {MachineType::Int32(),
-                                 MachineType::AnyTagged()};
-  data->InitializePlatformIndependent(arraysize(machine_types), 0,
-                                      machine_types);
-}
-
-void StoreGlobalViaContextDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {SlotRegister(), ValueRegister()};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 void StringCompareDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {LeftRegister(), RightRegister()};
@@ -233,7 +217,6 @@
                                       machine_types);
 }
 
-
 void LoadWithVectorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister(),
@@ -241,6 +224,24 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void LoadICProtoArrayDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kReceiver, kName, kSlot, kVector, kHandler
+  MachineType machine_types[] = {
+      MachineType::AnyTagged(), MachineType::AnyTagged(),
+      MachineType::TaggedSigned(), MachineType::AnyTagged(),
+      MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
+}
+
+void LoadICProtoArrayDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister(),
+                          VectorRegister(), HandlerRegister()};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 void StoreWithVectorDescriptor::InitializePlatformIndependent(
     CallInterfaceDescriptorData* data) {
   // kReceiver, kName, kValue, kSlot, kVector
@@ -378,14 +379,35 @@
 
 void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformIndependent(
     CallInterfaceDescriptorData* data) {
-  // kFunction, kSlot, kVector
-  MachineType machine_types[] = {MachineType::TaggedPointer(),
-                                 MachineType::TaggedSigned(),
-                                 MachineType::AnyTagged()};
+  // kFunction, kActualArgumentsCount, kSlot, kVector
+  MachineType machine_types[] = {
+      MachineType::TaggedPointer(), MachineType::Int32(),
+      MachineType::TaggedSigned(), MachineType::AnyTagged()};
   data->InitializePlatformIndependent(arraysize(machine_types), 0,
                                       machine_types);
 }
 
+void BuiltinDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  MachineType machine_types[] = {MachineType::AnyTagged(),
+                                 MachineType::Int32()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
+}
+
+void BuiltinDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {NewTargetRegister(), ArgumentsCountRegister()};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+const Register BuiltinDescriptor::ArgumentsCountRegister() {
+  return kJavaScriptCallArgCountRegister;
+}
+const Register BuiltinDescriptor::NewTargetRegister() {
+  return kJavaScriptCallNewTargetRegister;
+}
+
 void ArrayNoArgumentConstructorDescriptor::InitializePlatformIndependent(
     CallInterfaceDescriptorData* data) {
   // kFunction, kAllocationSite, kActualArgumentsCount, kFunctionParameter
diff --git a/src/interface-descriptors.h b/src/interface-descriptors.h
index 09dc377..3b49041 100644
--- a/src/interface-descriptors.h
+++ b/src/interface-descriptors.h
@@ -8,6 +8,7 @@
 #include <memory>
 
 #include "src/assembler.h"
+#include "src/globals.h"
 #include "src/macro-assembler.h"
 
 namespace v8 {
@@ -20,6 +21,7 @@
   V(ContextOnly)                          \
   V(Load)                                 \
   V(LoadWithVector)                       \
+  V(LoadICProtoArray)                     \
   V(LoadGlobal)                           \
   V(LoadGlobalWithVector)                 \
   V(Store)                                \
@@ -48,7 +50,6 @@
   V(ConstructStub)                        \
   V(ConstructTrampoline)                  \
   V(RegExpExec)                           \
-  V(RegExpConstructResult)                \
   V(CopyFastSmiOrObjectElements)          \
   V(TransitionElementsKind)               \
   V(AllocateHeapNumber)                   \
@@ -62,6 +63,7 @@
   V(AllocateInt8x16)                      \
   V(AllocateUint8x16)                     \
   V(AllocateBool8x16)                     \
+  V(Builtin)                              \
   V(ArrayNoArgumentConstructor)           \
   V(ArraySingleArgumentConstructor)       \
   V(ArrayNArgumentsConstructor)           \
@@ -82,7 +84,6 @@
   V(ArgumentAdaptor)                      \
   V(ApiCallback)                          \
   V(ApiGetter)                            \
-  V(StoreGlobalViaContext)                \
   V(MathPowTagged)                        \
   V(MathPowInteger)                       \
   V(GrowArrayElements)                    \
@@ -93,7 +94,7 @@
   V(InterpreterCEntry)                    \
   V(ResumeGenerator)
 
-class CallInterfaceDescriptorData {
+class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
  public:
   CallInterfaceDescriptorData() : register_param_count_(-1), param_count_(-1) {}
 
@@ -389,6 +390,15 @@
   static const Register VectorRegister();
 };
 
+class LoadICProtoArrayDescriptor : public LoadWithVectorDescriptor {
+ public:
+  DEFINE_PARAMETERS(kReceiver, kName, kSlot, kVector, kHandler)
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadICProtoArrayDescriptor,
+                                               LoadWithVectorDescriptor)
+
+  static const Register HandlerRegister();
+};
+
 class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
  public:
   DEFINE_PARAMETERS(kSlot, kVector)
@@ -553,7 +563,7 @@
 class CallFunctionWithFeedbackAndVectorDescriptor
     : public CallInterfaceDescriptor {
  public:
-  DEFINE_PARAMETERS(kFunction, kSlot, kVector)
+  DEFINE_PARAMETERS(kFunction, kActualArgumentsCount, kSlot, kVector)
   DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
       CallFunctionWithFeedbackAndVectorDescriptor, CallInterfaceDescriptor)
 };
@@ -571,23 +581,6 @@
                                      CallInterfaceDescriptor)
 };
 
-class RegExpConstructResultDescriptor : public CallInterfaceDescriptor {
- public:
-  DEFINE_PARAMETERS(kLength, kIndex, kInput)
-  DECLARE_DESCRIPTOR(RegExpConstructResultDescriptor, CallInterfaceDescriptor)
-};
-
-
-class StoreGlobalViaContextDescriptor : public CallInterfaceDescriptor {
- public:
-  DEFINE_PARAMETERS(kSlot, kValue)
-  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreGlobalViaContextDescriptor,
-                                               CallInterfaceDescriptor)
-
-  static const Register SlotRegister();
-  static const Register ValueRegister();
-};
-
 class CopyFastSmiOrObjectElementsDescriptor : public CallInterfaceDescriptor {
  public:
   DEFINE_PARAMETERS(kObject)
@@ -615,6 +608,15 @@
 SIMD128_TYPES(SIMD128_ALLOC_DESC)
 #undef SIMD128_ALLOC_DESC
 
+class BuiltinDescriptor : public CallInterfaceDescriptor {
+ public:
+  DEFINE_PARAMETERS(kNewTarget, kArgumentsCount)
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(BuiltinDescriptor,
+                                               CallInterfaceDescriptor)
+  static const Register ArgumentsCountRegister();
+  static const Register NewTargetRegister();
+};
+
 class ArrayNoArgumentConstructorDescriptor : public CallInterfaceDescriptor {
  public:
   DEFINE_PARAMETERS(kFunction, kAllocationSite, kActualArgumentsCount,
diff --git a/src/interpreter/bytecode-array-builder.cc b/src/interpreter/bytecode-array-builder.cc
index dfa3950..904a8e0 100644
--- a/src/interpreter/bytecode-array-builder.cc
+++ b/src/interpreter/bytecode-array-builder.cc
@@ -31,7 +31,8 @@
       register_allocator_(fixed_register_count()),
       bytecode_array_writer_(zone, &constant_array_builder_,
                              source_position_mode),
-      pipeline_(&bytecode_array_writer_) {
+      pipeline_(&bytecode_array_writer_),
+      register_optimizer_(nullptr) {
   DCHECK_GE(parameter_count_, 0);
   DCHECK_GE(context_register_count_, 0);
   DCHECK_GE(local_register_count_, 0);
@@ -45,14 +46,12 @@
   }
 
   if (FLAG_ignition_reo) {
-    pipeline_ = new (zone) BytecodeRegisterOptimizer(
+    register_optimizer_ = new (zone) BytecodeRegisterOptimizer(
         zone, &register_allocator_, fixed_register_count(), parameter_count,
         pipeline_);
   }
 
-  return_position_ =
-      literal ? std::max(literal->start_position(), literal->end_position() - 1)
-              : kNoSourcePosition;
+  return_position_ = literal ? literal->return_position() : kNoSourcePosition;
 }
 
 Register BytecodeArrayBuilder::first_context_register() const {
@@ -75,108 +74,222 @@
   DCHECK(!bytecode_generated_);
   bytecode_generated_ = true;
 
+  int register_count = total_register_count();
+
+  if (register_optimizer_) {
+    register_optimizer_->Flush();
+    register_count = register_optimizer_->maxiumum_register_index() + 1;
+  }
+
   Handle<FixedArray> handler_table =
       handler_table_builder()->ToHandlerTable(isolate);
-  return pipeline_->ToBytecodeArray(isolate, total_register_count(),
-                                    parameter_count(), handler_table);
+  return pipeline_->ToBytecodeArray(isolate, register_count, parameter_count(),
+                                    handler_table);
 }
 
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
-                                  uint32_t operand1, uint32_t operand2,
-                                  uint32_t operand3) {
-  DCHECK(OperandsAreValid(bytecode, 4, operand0, operand1, operand2, operand3));
-  BytecodeNode node(bytecode, operand0, operand1, operand2, operand3,
-                    &latest_source_info_);
-  pipeline()->Write(&node);
+BytecodeSourceInfo BytecodeArrayBuilder::CurrentSourcePosition(
+    Bytecode bytecode) {
+  BytecodeSourceInfo source_position;
+  if (latest_source_info_.is_valid()) {
+    // Statement positions need to be emitted immediately.  Expression
+    // positions can be pushed back until a bytecode is found that can
+    // throw (if expression position filtering is turned on). We only
+    // invalidate the existing source position information if it is used.
+    if (latest_source_info_.is_statement() ||
+        !FLAG_ignition_filter_expression_positions ||
+        !Bytecodes::IsWithoutExternalSideEffects(bytecode)) {
+      source_position = latest_source_info_;
+      latest_source_info_.set_invalid();
+    }
+  }
+  return source_position;
 }
 
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
-                                  uint32_t operand1, uint32_t operand2) {
-  DCHECK(OperandsAreValid(bytecode, 3, operand0, operand1, operand2));
-  BytecodeNode node(bytecode, operand0, operand1, operand2,
-                    &latest_source_info_);
-  pipeline()->Write(&node);
-}
+namespace {
 
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
-                                  uint32_t operand1) {
-  DCHECK(OperandsAreValid(bytecode, 2, operand0, operand1));
-  BytecodeNode node(bytecode, operand0, operand1, &latest_source_info_);
-  pipeline()->Write(&node);
-}
+template <OperandTypeInfo type_info>
+class UnsignedOperandHelper {
+ public:
+  INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, size_t value)) {
+    DCHECK(IsValid(value));
+    return static_cast<uint32_t>(value);
+  }
 
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0) {
-  DCHECK(OperandsAreValid(bytecode, 1, operand0));
-  BytecodeNode node(bytecode, operand0, &latest_source_info_);
-  pipeline()->Write(&node);
-}
+  INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, int value)) {
+    DCHECK_GE(value, 0);
+    return Convert(builder, static_cast<size_t>(value));
+  }
 
-void BytecodeArrayBuilder::Output(Bytecode bytecode) {
-  DCHECK(OperandsAreValid(bytecode, 0));
-  BytecodeNode node(bytecode, &latest_source_info_);
-  pipeline()->Write(&node);
-}
+ private:
+  static bool IsValid(size_t value) {
+    switch (type_info) {
+      case OperandTypeInfo::kFixedUnsignedByte:
+        return value <= kMaxUInt8;
+      case OperandTypeInfo::kFixedUnsignedShort:
+        return value <= kMaxUInt16;
+      case OperandTypeInfo::kScalableUnsignedByte:
+        return value <= kMaxUInt32;
+      default:
+        UNREACHABLE();
+        return false;
+    }
+  }
+};
 
-void BytecodeArrayBuilder::OutputJump(Bytecode bytecode, BytecodeLabel* label) {
-  BytecodeNode node(bytecode, 0, &latest_source_info_);
-  pipeline_->WriteJump(&node, label);
-  LeaveBasicBlock();
-}
+template <OperandType>
+class OperandHelper {};
 
-void BytecodeArrayBuilder::OutputJump(Bytecode bytecode, uint32_t operand0,
-                                      BytecodeLabel* label) {
-  BytecodeNode node(bytecode, 0, operand0, &latest_source_info_);
-  pipeline_->WriteJump(&node, label);
-  LeaveBasicBlock();
-}
+#define DEFINE_UNSIGNED_OPERAND_HELPER(Name, Type) \
+  template <>                                      \
+  class OperandHelper<OperandType::k##Name>        \
+      : public UnsignedOperandHelper<Type> {};
+UNSIGNED_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER)
+#undef DEFINE_UNSIGNED_OPERAND_HELPER
+
+template <>
+class OperandHelper<OperandType::kImm> {
+ public:
+  INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, int value)) {
+    return static_cast<uint32_t>(value);
+  }
+};
+
+template <>
+class OperandHelper<OperandType::kReg> {
+ public:
+  INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, Register reg)) {
+    return builder->GetInputRegisterOperand(reg);
+  }
+};
+
+template <>
+class OperandHelper<OperandType::kRegList> {
+ public:
+  INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
+                                 RegisterList reg_list)) {
+    return builder->GetInputRegisterListOperand(reg_list);
+  }
+};
+
+template <>
+class OperandHelper<OperandType::kRegPair> {
+ public:
+  INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
+                                 RegisterList reg_list)) {
+    DCHECK_EQ(reg_list.register_count(), 2);
+    return builder->GetInputRegisterListOperand(reg_list);
+  }
+};
+
+template <>
+class OperandHelper<OperandType::kRegOut> {
+ public:
+  INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, Register reg)) {
+    return builder->GetOutputRegisterOperand(reg);
+  }
+};
+
+template <>
+class OperandHelper<OperandType::kRegOutPair> {
+ public:
+  INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
+                                 RegisterList reg_list)) {
+    DCHECK_EQ(2, reg_list.register_count());
+    return builder->GetOutputRegisterListOperand(reg_list);
+  }
+};
+
+template <>
+class OperandHelper<OperandType::kRegOutTriple> {
+ public:
+  INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
+                                 RegisterList reg_list)) {
+    DCHECK_EQ(3, reg_list.register_count());
+    return builder->GetOutputRegisterListOperand(reg_list);
+  }
+};
+
+}  // namespace
+
+template <OperandType... operand_types>
+class BytecodeNodeBuilder {
+ public:
+  template <typename... Operands>
+  INLINE(static BytecodeNode Make(BytecodeArrayBuilder* builder,
+                                  BytecodeSourceInfo source_info,
+                                  Bytecode bytecode, Operands... operands)) {
+    builder->PrepareToOutputBytecode(bytecode);
+    // The "OperandHelper<operand_types>::Convert(builder, operands)..." will
+    // expand both the OperandType... and Operands... parameter packs e.g. for:
+    //   BytecodeNodeBuilder<OperandType::kReg, OperandType::kImm>::Make<
+    //       Register, int>(..., Register reg, int immediate)
+    // the code will expand into:
+    //    OperandHelper<OperandType::kReg>::Convert(builder, reg),
+    //    OperandHelper<OperandType::kImm>::Convert(builder, immediate),
+    return BytecodeNode(
+        bytecode, OperandHelper<operand_types>::Convert(builder, operands)...,
+        source_info);
+  }
+};
+
+#define DEFINE_BYTECODE_OUTPUT(name, accumulator_use, ...)                 \
+  template <typename... Operands>                                          \
+  void BytecodeArrayBuilder::Output##name(Operands... operands) {          \
+    BytecodeNode node(BytecodeNodeBuilder<__VA_ARGS__>::Make<Operands...>( \
+        this, CurrentSourcePosition(Bytecode::k##name), Bytecode::k##name, \
+        operands...));                                                     \
+    pipeline()->Write(&node);                                              \
+  }                                                                        \
+                                                                           \
+  template <typename... Operands>                                          \
+  void BytecodeArrayBuilder::Output##name(BytecodeLabel* label,            \
+                                          Operands... operands) {          \
+    DCHECK(Bytecodes::IsJump(Bytecode::k##name));                          \
+    BytecodeNode node(BytecodeNodeBuilder<__VA_ARGS__>::Make<Operands...>( \
+        this, CurrentSourcePosition(Bytecode::k##name), Bytecode::k##name, \
+        operands...));                                                     \
+    pipeline()->WriteJump(&node, label);                                   \
+    LeaveBasicBlock();                                                     \
+  }
+BYTECODE_LIST(DEFINE_BYTECODE_OUTPUT)
+#undef DEFINE_BYTECODE_OUTPUT
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
                                                             Register reg,
                                                             int feedback_slot) {
   switch (op) {
     case Token::Value::ADD:
-      Output(Bytecode::kAdd, RegisterOperand(reg),
-             UnsignedOperand(feedback_slot));
+      OutputAdd(reg, feedback_slot);
       break;
     case Token::Value::SUB:
-      Output(Bytecode::kSub, RegisterOperand(reg),
-             UnsignedOperand(feedback_slot));
+      OutputSub(reg, feedback_slot);
       break;
     case Token::Value::MUL:
-      Output(Bytecode::kMul, RegisterOperand(reg),
-             UnsignedOperand(feedback_slot));
+      OutputMul(reg, feedback_slot);
       break;
     case Token::Value::DIV:
-      Output(Bytecode::kDiv, RegisterOperand(reg),
-             UnsignedOperand(feedback_slot));
+      OutputDiv(reg, feedback_slot);
       break;
     case Token::Value::MOD:
-      Output(Bytecode::kMod, RegisterOperand(reg),
-             UnsignedOperand(feedback_slot));
+      OutputMod(reg, feedback_slot);
       break;
     case Token::Value::BIT_OR:
-      Output(Bytecode::kBitwiseOr, RegisterOperand(reg),
-             UnsignedOperand(feedback_slot));
+      OutputBitwiseOr(reg, feedback_slot);
       break;
     case Token::Value::BIT_XOR:
-      Output(Bytecode::kBitwiseXor, RegisterOperand(reg),
-             UnsignedOperand(feedback_slot));
+      OutputBitwiseXor(reg, feedback_slot);
       break;
     case Token::Value::BIT_AND:
-      Output(Bytecode::kBitwiseAnd, RegisterOperand(reg),
-             UnsignedOperand(feedback_slot));
+      OutputBitwiseAnd(reg, feedback_slot);
       break;
     case Token::Value::SHL:
-      Output(Bytecode::kShiftLeft, RegisterOperand(reg),
-             UnsignedOperand(feedback_slot));
+      OutputShiftLeft(reg, feedback_slot);
       break;
     case Token::Value::SAR:
-      Output(Bytecode::kShiftRight, RegisterOperand(reg),
-             UnsignedOperand(feedback_slot));
+      OutputShiftRight(reg, feedback_slot);
       break;
     case Token::Value::SHR:
-      Output(Bytecode::kShiftRightLogical, RegisterOperand(reg),
-             UnsignedOperand(feedback_slot));
+      OutputShiftRightLogical(reg, feedback_slot);
       break;
     default:
       UNREACHABLE();
@@ -187,21 +300,21 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op,
                                                            int feedback_slot) {
   if (op == Token::Value::ADD) {
-    Output(Bytecode::kInc, UnsignedOperand(feedback_slot));
+    OutputInc(feedback_slot);
   } else {
     DCHECK_EQ(op, Token::Value::SUB);
-    Output(Bytecode::kDec, UnsignedOperand(feedback_slot));
+    OutputDec(feedback_slot);
   }
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LogicalNot() {
-  Output(Bytecode::kToBooleanLogicalNot);
+  OutputToBooleanLogicalNot();
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::TypeOf() {
-  Output(Bytecode::kTypeOf);
+  OutputTypeOf();
   return *this;
 }
 
@@ -209,38 +322,31 @@
     Token::Value op, Register reg, int feedback_slot) {
   switch (op) {
     case Token::Value::EQ:
-      Output(Bytecode::kTestEqual, RegisterOperand(reg),
-             UnsignedOperand(feedback_slot));
+      OutputTestEqual(reg, feedback_slot);
       break;
     case Token::Value::NE:
-      Output(Bytecode::kTestNotEqual, RegisterOperand(reg),
-             UnsignedOperand(feedback_slot));
+      OutputTestNotEqual(reg, feedback_slot);
       break;
     case Token::Value::EQ_STRICT:
-      Output(Bytecode::kTestEqualStrict, RegisterOperand(reg),
-             UnsignedOperand(feedback_slot));
+      OutputTestEqualStrict(reg, feedback_slot);
       break;
     case Token::Value::LT:
-      Output(Bytecode::kTestLessThan, RegisterOperand(reg),
-             UnsignedOperand(feedback_slot));
+      OutputTestLessThan(reg, feedback_slot);
       break;
     case Token::Value::GT:
-      Output(Bytecode::kTestGreaterThan, RegisterOperand(reg),
-             UnsignedOperand(feedback_slot));
+      OutputTestGreaterThan(reg, feedback_slot);
       break;
     case Token::Value::LTE:
-      Output(Bytecode::kTestLessThanOrEqual, RegisterOperand(reg),
-             UnsignedOperand(feedback_slot));
+      OutputTestLessThanOrEqual(reg, feedback_slot);
       break;
     case Token::Value::GTE:
-      Output(Bytecode::kTestGreaterThanOrEqual, RegisterOperand(reg),
-             UnsignedOperand(feedback_slot));
+      OutputTestGreaterThanOrEqual(reg, feedback_slot);
       break;
     case Token::Value::INSTANCEOF:
-      Output(Bytecode::kTestInstanceOf, RegisterOperand(reg));
+      OutputTestInstanceOf(reg);
       break;
     case Token::Value::IN:
-      Output(Bytecode::kTestIn, RegisterOperand(reg));
+      OutputTestIn(reg);
       break;
     default:
       UNREACHABLE();
@@ -250,7 +356,7 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadConstantPoolEntry(
     size_t entry) {
-  Output(Bytecode::kLdaConstant, UnsignedOperand(entry));
+  OutputLdaConstant(entry);
   return *this;
 }
 
@@ -258,70 +364,82 @@
     v8::internal::Smi* smi) {
   int32_t raw_smi = smi->value();
   if (raw_smi == 0) {
-    Output(Bytecode::kLdaZero);
+    OutputLdaZero();
   } else {
-    Output(Bytecode::kLdaSmi, SignedOperand(raw_smi));
+    OutputLdaSmi(raw_smi);
   }
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Handle<Object> object) {
   size_t entry = GetConstantPoolEntry(object);
-  Output(Bytecode::kLdaConstant, UnsignedOperand(entry));
+  OutputLdaConstant(entry);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadUndefined() {
-  Output(Bytecode::kLdaUndefined);
+  OutputLdaUndefined();
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNull() {
-  Output(Bytecode::kLdaNull);
+  OutputLdaNull();
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadTheHole() {
-  Output(Bytecode::kLdaTheHole);
+  OutputLdaTheHole();
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadTrue() {
-  Output(Bytecode::kLdaTrue);
+  OutputLdaTrue();
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadFalse() {
-  Output(Bytecode::kLdaFalse);
+  OutputLdaFalse();
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
     Register reg) {
-  Output(Bytecode::kLdar, RegisterOperand(reg));
+  if (register_optimizer_) {
+    register_optimizer_->DoLdar(reg, CurrentSourcePosition(Bytecode::kLdar));
+  } else {
+    OutputLdar(reg);
+  }
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
     Register reg) {
-  Output(Bytecode::kStar, RegisterOperand(reg));
+  if (register_optimizer_) {
+    register_optimizer_->DoStar(reg, CurrentSourcePosition(Bytecode::kStar));
+  } else {
+    OutputStar(reg);
+  }
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
                                                          Register to) {
   DCHECK(from != to);
-  Output(Bytecode::kMov, RegisterOperand(from), RegisterOperand(to));
+  if (register_optimizer_) {
+    register_optimizer_->DoMov(from, to, CurrentSourcePosition(Bytecode::kMov));
+  } else {
+    OutputMov(from, to);
+  }
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(int feedback_slot,
                                                        TypeofMode typeof_mode) {
   if (typeof_mode == INSIDE_TYPEOF) {
-    Output(Bytecode::kLdaGlobalInsideTypeof, feedback_slot);
+    OutputLdaGlobalInsideTypeof(feedback_slot);
   } else {
     DCHECK_EQ(typeof_mode, NOT_INSIDE_TYPEOF);
-    Output(Bytecode::kLdaGlobal, UnsignedOperand(feedback_slot));
+    OutputLdaGlobal(feedback_slot);
   }
   return *this;
 }
@@ -330,12 +448,10 @@
     const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
   size_t name_index = GetConstantPoolEntry(name);
   if (language_mode == SLOPPY) {
-    Output(Bytecode::kStaGlobalSloppy, UnsignedOperand(name_index),
-           UnsignedOperand(feedback_slot));
+    OutputStaGlobalSloppy(name_index, feedback_slot);
   } else {
     DCHECK_EQ(language_mode, STRICT);
-    Output(Bytecode::kStaGlobalStrict, UnsignedOperand(name_index),
-           UnsignedOperand(feedback_slot));
+    OutputStaGlobalStrict(name_index, feedback_slot);
   }
   return *this;
 }
@@ -343,16 +459,22 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
                                                             int slot_index,
                                                             int depth) {
-  Output(Bytecode::kLdaContextSlot, RegisterOperand(context),
-         UnsignedOperand(slot_index), UnsignedOperand(depth));
+  if (context.is_current_context() && depth == 0) {
+    OutputLdaCurrentContextSlot(slot_index);
+  } else {
+    OutputLdaContextSlot(context, slot_index, depth);
+  }
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
                                                              int slot_index,
                                                              int depth) {
-  Output(Bytecode::kStaContextSlot, RegisterOperand(context),
-         UnsignedOperand(slot_index), UnsignedOperand(depth));
+  if (context.is_current_context() && depth == 0) {
+    OutputStaCurrentContextSlot(slot_index);
+  } else {
+    OutputStaContextSlot(context, slot_index, depth);
+  }
   return *this;
 }
 
@@ -360,10 +482,10 @@
     const Handle<String> name, TypeofMode typeof_mode) {
   size_t name_index = GetConstantPoolEntry(name);
   if (typeof_mode == INSIDE_TYPEOF) {
-    Output(Bytecode::kLdaLookupSlotInsideTypeof, UnsignedOperand(name_index));
+    OutputLdaLookupSlotInsideTypeof(name_index);
   } else {
     DCHECK_EQ(typeof_mode, NOT_INSIDE_TYPEOF);
-    Output(Bytecode::kLdaLookupSlot, UnsignedOperand(name_index));
+    OutputLdaLookupSlot(name_index);
   }
   return *this;
 }
@@ -371,24 +493,26 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupContextSlot(
     const Handle<String> name, TypeofMode typeof_mode, int slot_index,
     int depth) {
-  Bytecode bytecode = (typeof_mode == INSIDE_TYPEOF)
-                          ? Bytecode::kLdaLookupContextSlotInsideTypeof
-                          : Bytecode::kLdaLookupContextSlot;
   size_t name_index = GetConstantPoolEntry(name);
-  Output(bytecode, UnsignedOperand(name_index), UnsignedOperand(slot_index),
-         UnsignedOperand(depth));
+  if (typeof_mode == INSIDE_TYPEOF) {
+    OutputLdaLookupContextSlotInsideTypeof(name_index, slot_index, depth);
+  } else {
+    DCHECK(typeof_mode == NOT_INSIDE_TYPEOF);
+    OutputLdaLookupContextSlot(name_index, slot_index, depth);
+  }
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupGlobalSlot(
     const Handle<String> name, TypeofMode typeof_mode, int feedback_slot,
     int depth) {
-  Bytecode bytecode = (typeof_mode == INSIDE_TYPEOF)
-                          ? Bytecode::kLdaLookupGlobalSlotInsideTypeof
-                          : Bytecode::kLdaLookupGlobalSlot;
   size_t name_index = GetConstantPoolEntry(name);
-  Output(bytecode, UnsignedOperand(name_index), UnsignedOperand(feedback_slot),
-         UnsignedOperand(depth));
+  if (typeof_mode == INSIDE_TYPEOF) {
+    OutputLdaLookupGlobalSlotInsideTypeof(name_index, feedback_slot, depth);
+  } else {
+    DCHECK(typeof_mode == NOT_INSIDE_TYPEOF);
+    OutputLdaLookupGlobalSlot(name_index, feedback_slot, depth);
+  }
   return *this;
 }
 
@@ -396,10 +520,10 @@
     const Handle<String> name, LanguageMode language_mode) {
   size_t name_index = GetConstantPoolEntry(name);
   if (language_mode == SLOPPY) {
-    Output(Bytecode::kStaLookupSlotSloppy, UnsignedOperand(name_index));
+    OutputStaLookupSlotSloppy(name_index);
   } else {
     DCHECK_EQ(language_mode, STRICT);
-    Output(Bytecode::kStaLookupSlotStrict, UnsignedOperand(name_index));
+    OutputStaLookupSlotStrict(name_index);
   }
   return *this;
 }
@@ -407,15 +531,13 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
     Register object, const Handle<Name> name, int feedback_slot) {
   size_t name_index = GetConstantPoolEntry(name);
-  Output(Bytecode::kLdaNamedProperty, RegisterOperand(object),
-         UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+  OutputLdaNamedProperty(object, name_index, feedback_slot);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
     Register object, int feedback_slot) {
-  Output(Bytecode::kLdaKeyedProperty, RegisterOperand(object),
-         UnsignedOperand(feedback_slot));
+  OutputLdaKeyedProperty(object, feedback_slot);
   return *this;
 }
 
@@ -424,12 +546,10 @@
     LanguageMode language_mode) {
   size_t name_index = GetConstantPoolEntry(name);
   if (language_mode == SLOPPY) {
-    Output(Bytecode::kStaNamedPropertySloppy, RegisterOperand(object),
-           UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+    OutputStaNamedPropertySloppy(object, name_index, feedback_slot);
   } else {
     DCHECK_EQ(language_mode, STRICT);
-    Output(Bytecode::kStaNamedPropertyStrict, RegisterOperand(object),
-           UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+    OutputStaNamedPropertyStrict(object, name_index, feedback_slot);
   }
   return *this;
 }
@@ -438,27 +558,24 @@
     Register object, Register key, int feedback_slot,
     LanguageMode language_mode) {
   if (language_mode == SLOPPY) {
-    Output(Bytecode::kStaKeyedPropertySloppy, RegisterOperand(object),
-           RegisterOperand(key), UnsignedOperand(feedback_slot));
+    OutputStaKeyedPropertySloppy(object, key, feedback_slot);
   } else {
     DCHECK_EQ(language_mode, STRICT);
-    Output(Bytecode::kStaKeyedPropertyStrict, RegisterOperand(object),
-           RegisterOperand(key), UnsignedOperand(feedback_slot));
+    OutputStaKeyedPropertyStrict(object, key, feedback_slot);
   }
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(size_t entry,
                                                           int flags) {
-  Output(Bytecode::kCreateClosure, UnsignedOperand(entry),
-         UnsignedOperand(flags));
+  OutputCreateClosure(entry, flags);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateBlockContext(
     Handle<ScopeInfo> scope_info) {
   size_t entry = GetConstantPoolEntry(scope_info);
-  Output(Bytecode::kCreateBlockContext, UnsignedOperand(entry));
+  OutputCreateBlockContext(entry);
   return *this;
 }
 
@@ -466,21 +583,19 @@
     Register exception, Handle<String> name, Handle<ScopeInfo> scope_info) {
   size_t name_index = GetConstantPoolEntry(name);
   size_t scope_info_index = GetConstantPoolEntry(scope_info);
-  Output(Bytecode::kCreateCatchContext, RegisterOperand(exception),
-         UnsignedOperand(name_index), UnsignedOperand(scope_info_index));
+  OutputCreateCatchContext(exception, name_index, scope_info_index);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateFunctionContext(int slots) {
-  Output(Bytecode::kCreateFunctionContext, UnsignedOperand(slots));
+  OutputCreateFunctionContext(slots);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateWithContext(
     Register object, Handle<ScopeInfo> scope_info) {
   size_t scope_info_index = GetConstantPoolEntry(scope_info);
-  Output(Bytecode::kCreateWithContext, RegisterOperand(object),
-         UnsignedOperand(scope_info_index));
+  OutputCreateWithContext(object, scope_info_index);
   return *this;
 }
 
@@ -488,13 +603,13 @@
     CreateArgumentsType type) {
   switch (type) {
     case CreateArgumentsType::kMappedArguments:
-      Output(Bytecode::kCreateMappedArguments);
+      OutputCreateMappedArguments();
       break;
     case CreateArgumentsType::kUnmappedArguments:
-      Output(Bytecode::kCreateUnmappedArguments);
+      OutputCreateUnmappedArguments();
       break;
     case CreateArgumentsType::kRestParameter:
-      Output(Bytecode::kCreateRestParameter);
+      OutputCreateRestParameter();
       break;
     default:
       UNREACHABLE();
@@ -505,17 +620,14 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
     Handle<String> pattern, int literal_index, int flags) {
   size_t pattern_entry = GetConstantPoolEntry(pattern);
-  Output(Bytecode::kCreateRegExpLiteral, UnsignedOperand(pattern_entry),
-         UnsignedOperand(literal_index), UnsignedOperand(flags));
+  OutputCreateRegExpLiteral(pattern_entry, literal_index, flags);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
     Handle<FixedArray> constant_elements, int literal_index, int flags) {
   size_t constant_elements_entry = GetConstantPoolEntry(constant_elements);
-  Output(Bytecode::kCreateArrayLiteral,
-         UnsignedOperand(constant_elements_entry),
-         UnsignedOperand(literal_index), UnsignedOperand(flags));
+  OutputCreateArrayLiteral(constant_elements_entry, literal_index, flags);
   return *this;
 }
 
@@ -523,42 +635,43 @@
     Handle<FixedArray> constant_properties, int literal_index, int flags,
     Register output) {
   size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
-  Output(Bytecode::kCreateObjectLiteral,
-         UnsignedOperand(constant_properties_entry),
-         UnsignedOperand(literal_index), UnsignedOperand(flags),
-         RegisterOperand(output));
+  OutputCreateObjectLiteral(constant_properties_entry, literal_index, flags,
+                            output);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::PushContext(Register context) {
-  Output(Bytecode::kPushContext, RegisterOperand(context));
+  OutputPushContext(context);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
-  Output(Bytecode::kPopContext, RegisterOperand(context));
+  OutputPopContext(context);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ConvertAccumulatorToObject(
     Register out) {
-  Output(Bytecode::kToObject, RegisterOperand(out));
+  OutputToObject(out);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ConvertAccumulatorToName(
     Register out) {
-  Output(Bytecode::kToName, RegisterOperand(out));
+  OutputToName(out);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ConvertAccumulatorToNumber(
     Register out) {
-  Output(Bytecode::kToNumber, RegisterOperand(out));
+  OutputToNumber(out);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
+  // Flush the register optimizer when binding a label to ensure all
+  // expected registers are valid when jumping to this label.
+  if (register_optimizer_) register_optimizer_->Flush();
   pipeline_->BindLabel(label);
   LeaveBasicBlock();
   return *this;
@@ -572,42 +685,42 @@
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Jump(BytecodeLabel* label) {
-  OutputJump(Bytecode::kJump, label);
+  OutputJump(label, 0);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfTrue(BytecodeLabel* label) {
   // The peephole optimizer attempts to simplify JumpIfToBooleanTrue
   // to JumpIfTrue.
-  OutputJump(Bytecode::kJumpIfToBooleanTrue, label);
+  OutputJumpIfToBooleanTrue(label, 0);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfFalse(BytecodeLabel* label) {
-  OutputJump(Bytecode::kJumpIfToBooleanFalse, label);
+  OutputJumpIfToBooleanFalse(label, 0);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNull(BytecodeLabel* label) {
-  OutputJump(Bytecode::kJumpIfNull, label);
+  OutputJumpIfNull(label, 0);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfUndefined(
     BytecodeLabel* label) {
-  OutputJump(Bytecode::kJumpIfUndefined, label);
+  OutputJumpIfUndefined(label, 0);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotHole(
     BytecodeLabel* label) {
-  OutputJump(Bytecode::kJumpIfNotHole, label);
+  OutputJumpIfNotHole(label, 0);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpLoop(BytecodeLabel* label,
                                                      int loop_depth) {
-  OutputJump(Bytecode::kJumpLoop, UnsignedOperand(loop_depth), label);
+  OutputJumpLoop(label, 0, loop_depth);
   return *this;
 }
 
@@ -625,44 +738,42 @@
     // statement's position.
     latest_source_info_.ForceExpressionPosition(position);
   }
-  Output(Bytecode::kStackCheck);
+  OutputStackCheck();
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
-  Output(Bytecode::kThrow);
+  OutputThrow();
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ReThrow() {
-  Output(Bytecode::kReThrow);
+  OutputReThrow();
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
   SetReturnPosition();
-  Output(Bytecode::kReturn);
+  OutputReturn();
   return_seen_in_block_ = true;
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Debugger() {
-  Output(Bytecode::kDebugger);
+  OutputDebugger();
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
     Register receiver, RegisterList cache_info_triple) {
   DCHECK_EQ(3, cache_info_triple.register_count());
-  Output(Bytecode::kForInPrepare, RegisterOperand(receiver),
-         RegisterOperand(cache_info_triple.first_register()));
+  OutputForInPrepare(receiver, cache_info_triple);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInContinue(
     Register index, Register cache_length) {
-  Output(Bytecode::kForInContinue, RegisterOperand(index),
-         RegisterOperand(cache_length));
+  OutputForInContinue(index, cache_length);
   return *this;
 }
 
@@ -670,27 +781,36 @@
     Register receiver, Register index, RegisterList cache_type_array_pair,
     int feedback_slot) {
   DCHECK_EQ(2, cache_type_array_pair.register_count());
-  Output(Bytecode::kForInNext, RegisterOperand(receiver),
-         RegisterOperand(index),
-         RegisterOperand(cache_type_array_pair.first_register()),
-         UnsignedOperand(feedback_slot));
+  OutputForInNext(receiver, index, cache_type_array_pair, feedback_slot);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInStep(Register index) {
-  Output(Bytecode::kForInStep, RegisterOperand(index));
+  OutputForInStep(index);
+  return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreModuleVariable(int cell_index,
+                                                                int depth) {
+  OutputStaModuleVariable(cell_index, depth);
+  return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadModuleVariable(int cell_index,
+                                                               int depth) {
+  OutputLdaModuleVariable(cell_index, depth);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::SuspendGenerator(
     Register generator) {
-  Output(Bytecode::kSuspendGenerator, RegisterOperand(generator));
+  OutputSuspendGenerator(generator);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ResumeGenerator(
     Register generator) {
-  Output(Bytecode::kResumeGenerator, RegisterOperand(generator));
+  OutputResumeGenerator(generator);
   return *this;
 }
 
@@ -722,18 +842,18 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
                                                  RegisterList args,
                                                  int feedback_slot,
+                                                 Call::CallType call_type,
                                                  TailCallMode tail_call_mode) {
   if (tail_call_mode == TailCallMode::kDisallow) {
-    Output(Bytecode::kCall, RegisterOperand(callable),
-           RegisterOperand(args.first_register()),
-           UnsignedOperand(args.register_count()),
-           UnsignedOperand(feedback_slot));
+    if (call_type == Call::NAMED_PROPERTY_CALL ||
+        call_type == Call::KEYED_PROPERTY_CALL) {
+      OutputCallProperty(callable, args, args.register_count(), feedback_slot);
+    } else {
+      OutputCall(callable, args, args.register_count(), feedback_slot);
+    }
   } else {
     DCHECK(tail_call_mode == TailCallMode::kAllow);
-    Output(Bytecode::kTailCall, RegisterOperand(callable),
-           RegisterOperand(args.first_register()),
-           UnsignedOperand(args.register_count()),
-           UnsignedOperand(feedback_slot));
+    OutputTailCall(callable, args, args.register_count(), feedback_slot);
   }
   return *this;
 }
@@ -741,10 +861,7 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
                                                 RegisterList args,
                                                 int feedback_slot_id) {
-  Output(Bytecode::kNew, RegisterOperand(constructor),
-         RegisterOperand(args.first_register()),
-         UnsignedOperand(args.register_count()),
-         UnsignedOperand(feedback_slot_id));
+  OutputNew(constructor, args, args.register_count(), feedback_slot_id);
   return *this;
 }
 
@@ -752,17 +869,15 @@
     Runtime::FunctionId function_id, RegisterList args) {
   DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
   DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
-  Bytecode bytecode;
-  uint32_t id;
   if (IntrinsicsHelper::IsSupported(function_id)) {
-    bytecode = Bytecode::kInvokeIntrinsic;
-    id = static_cast<uint32_t>(IntrinsicsHelper::FromRuntimeId(function_id));
+    IntrinsicsHelper::IntrinsicId intrinsic_id =
+        IntrinsicsHelper::FromRuntimeId(function_id);
+    OutputInvokeIntrinsic(static_cast<int>(intrinsic_id), args,
+                          args.register_count());
   } else {
-    bytecode = Bytecode::kCallRuntime;
-    id = static_cast<uint32_t>(function_id);
+    OutputCallRuntime(static_cast<int>(function_id), args,
+                      args.register_count());
   }
-  Output(bytecode, id, RegisterOperand(args.first_register()),
-         UnsignedOperand(args.register_count()));
   return *this;
 }
 
@@ -782,10 +897,8 @@
   DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
   DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
   DCHECK_EQ(2, return_pair.register_count());
-  Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
-         RegisterOperand(args.first_register()),
-         UnsignedOperand(args.register_count()),
-         RegisterOperand(return_pair.first_register()));
+  OutputCallRuntimeForPair(static_cast<uint16_t>(function_id), args,
+                           args.register_count(), return_pair);
   return *this;
 }
 
@@ -797,19 +910,17 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
                                                           RegisterList args) {
-  Output(Bytecode::kCallJSRuntime, UnsignedOperand(context_index),
-         RegisterOperand(args.first_register()),
-         UnsignedOperand(args.register_count()));
+  OutputCallJSRuntime(context_index, args, args.register_count());
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
                                                    LanguageMode language_mode) {
   if (language_mode == SLOPPY) {
-    Output(Bytecode::kDeletePropertySloppy, RegisterOperand(object));
+    OutputDeletePropertySloppy(object);
   } else {
     DCHECK_EQ(language_mode, STRICT);
-    Output(Bytecode::kDeletePropertyStrict, RegisterOperand(object));
+    OutputDeletePropertyStrict(object);
   }
   return *this;
 }
@@ -850,88 +961,50 @@
   }
 }
 
-bool BytecodeArrayBuilder::OperandsAreValid(
-    Bytecode bytecode, int operand_count, uint32_t operand0, uint32_t operand1,
-    uint32_t operand2, uint32_t operand3) const {
-  if (Bytecodes::NumberOfOperands(bytecode) != operand_count) {
-    return false;
-  }
-
-  uint32_t operands[] = {operand0, operand1, operand2, operand3};
-  const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
-  for (int i = 0; i < operand_count; ++i) {
-    switch (operand_types[i]) {
-      case OperandType::kNone:
+bool BytecodeArrayBuilder::RegisterListIsValid(RegisterList reg_list) const {
+  if (reg_list.register_count() == 0) {
+    return reg_list.first_register() == Register(0);
+  } else {
+    int first_reg_index = reg_list.first_register().index();
+    for (int i = 0; i < reg_list.register_count(); i++) {
+      if (!RegisterIsValid(Register(first_reg_index + i))) {
         return false;
-      case OperandType::kFlag8:
-      case OperandType::kIntrinsicId:
-        if (Bytecodes::SizeForUnsignedOperand(operands[i]) >
-            OperandSize::kByte) {
-          return false;
-        }
-        break;
-      case OperandType::kRuntimeId:
-        if (Bytecodes::SizeForUnsignedOperand(operands[i]) >
-            OperandSize::kShort) {
-          return false;
-        }
-        break;
-      case OperandType::kIdx:
-        // TODO(leszeks): Possibly split this up into constant pool indices and
-        // other indices, for checking.
-        break;
-      case OperandType::kUImm:
-      case OperandType::kImm:
-        break;
-      case OperandType::kRegList: {
-        CHECK_LT(i, operand_count - 1);
-        CHECK(operand_types[i + 1] == OperandType::kRegCount);
-        int reg_count = static_cast<int>(operands[i + 1]);
-        if (reg_count == 0) {
-          return Register::FromOperand(operands[i]) == Register(0);
-        } else {
-          Register start = Register::FromOperand(operands[i]);
-          Register end(start.index() + reg_count - 1);
-          if (!RegisterIsValid(start) || !RegisterIsValid(end) || start > end) {
-            return false;
-          }
-        }
-        i++;  // Skip past kRegCount operand.
-        break;
       }
-      case OperandType::kReg:
-      case OperandType::kRegOut: {
-        Register reg = Register::FromOperand(operands[i]);
-        if (!RegisterIsValid(reg)) {
-          return false;
-        }
-        break;
-      }
-      case OperandType::kRegOutPair:
-      case OperandType::kRegPair: {
-        Register reg0 = Register::FromOperand(operands[i]);
-        Register reg1 = Register(reg0.index() + 1);
-        if (!RegisterIsValid(reg0) || !RegisterIsValid(reg1)) {
-          return false;
-        }
-        break;
-      }
-      case OperandType::kRegOutTriple: {
-        Register reg0 = Register::FromOperand(operands[i]);
-        Register reg1 = Register(reg0.index() + 1);
-        Register reg2 = Register(reg0.index() + 2);
-        if (!RegisterIsValid(reg0) || !RegisterIsValid(reg1) ||
-            !RegisterIsValid(reg2)) {
-          return false;
-        }
-        break;
-      }
-      case OperandType::kRegCount:
-        UNREACHABLE();  // Dealt with in kRegList above.
     }
+    return true;
   }
+}
 
-  return true;
+void BytecodeArrayBuilder::PrepareToOutputBytecode(Bytecode bytecode) {
+  if (register_optimizer_) register_optimizer_->PrepareForBytecode(bytecode);
+}
+
+uint32_t BytecodeArrayBuilder::GetInputRegisterOperand(Register reg) {
+  DCHECK(RegisterIsValid(reg));
+  if (register_optimizer_) reg = register_optimizer_->GetInputRegister(reg);
+  return static_cast<uint32_t>(reg.ToOperand());
+}
+
+uint32_t BytecodeArrayBuilder::GetOutputRegisterOperand(Register reg) {
+  DCHECK(RegisterIsValid(reg));
+  if (register_optimizer_) register_optimizer_->PrepareOutputRegister(reg);
+  return static_cast<uint32_t>(reg.ToOperand());
+}
+
+uint32_t BytecodeArrayBuilder::GetInputRegisterListOperand(
+    RegisterList reg_list) {
+  DCHECK(RegisterListIsValid(reg_list));
+  if (register_optimizer_)
+    reg_list = register_optimizer_->GetInputRegisterList(reg_list);
+  return static_cast<uint32_t>(reg_list.first_register().ToOperand());
+}
+
+uint32_t BytecodeArrayBuilder::GetOutputRegisterListOperand(
+    RegisterList reg_list) {
+  DCHECK(RegisterListIsValid(reg_list));
+  if (register_optimizer_)
+    register_optimizer_->PrepareOutputRegisterList(reg_list);
+  return static_cast<uint32_t>(reg_list.first_register().ToOperand());
 }
 
 }  // namespace interpreter
diff --git a/src/interpreter/bytecode-array-builder.h b/src/interpreter/bytecode-array-builder.h
index a9fa7a7..cc5b5e7 100644
--- a/src/interpreter/bytecode-array-builder.h
+++ b/src/interpreter/bytecode-array-builder.h
@@ -6,6 +6,8 @@
 #define V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
 
 #include "src/ast/ast.h"
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
 #include "src/interpreter/bytecode-array-writer.h"
 #include "src/interpreter/bytecode-register-allocator.h"
 #include "src/interpreter/bytecode-register.h"
@@ -24,9 +26,11 @@
 class BytecodeLabel;
 class BytecodeNode;
 class BytecodePipelineStage;
+class BytecodeRegisterOptimizer;
 class Register;
 
-class BytecodeArrayBuilder final : public ZoneObject {
+class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
+    : public NON_EXPORTED_BASE(ZoneObject) {
  public:
   BytecodeArrayBuilder(
       Isolate* isolate, Zone* zone, int parameter_count, int context_count,
@@ -95,6 +99,14 @@
   BytecodeArrayBuilder& StoreContextSlot(Register context, int slot_index,
                                          int depth);
 
+  // Load from a module variable into the accumulator. |depth| is the depth of
+  // the current context relative to the module context.
+  BytecodeArrayBuilder& LoadModuleVariable(int cell_index, int depth);
+
+  // Store from the accumulator into a module variable. |depth| is the depth of
+  // the current context relative to the module context.
+  BytecodeArrayBuilder& StoreModuleVariable(int cell_index, int depth);
+
   // Register-accumulator transfers.
   BytecodeArrayBuilder& LoadAccumulatorWithRegister(Register reg);
   BytecodeArrayBuilder& StoreAccumulatorInRegister(Register reg);
@@ -183,10 +195,11 @@
 
   // Call a JS function. The JSFunction or Callable to be called should be in
   // |callable|. The arguments should be in |args|, with the receiver in
-  // |args[0]|. Type feedback is recorded in the |feedback_slot| in the type
-  // feedback vector.
+  // |args[0]|. The call type of the expression is in |call_type|. Type feedback
+  // is recorded in the |feedback_slot| in the type feedback vector.
   BytecodeArrayBuilder& Call(
       Register callable, RegisterList args, int feedback_slot,
+      Call::CallType call_type,
       TailCallMode tail_call_mode = TailCallMode::kDisallow);
 
   // Call the new operator. The accumulator holds the |new_target|.
@@ -317,6 +330,12 @@
 
   bool RequiresImplicitReturn() const { return !return_seen_in_block_; }
 
+  // Returns the raw operand value for the given register or register list.
+  uint32_t GetInputRegisterOperand(Register reg);
+  uint32_t GetOutputRegisterOperand(Register reg);
+  uint32_t GetInputRegisterListOperand(RegisterList reg_list);
+  uint32_t GetOutputRegisterListOperand(RegisterList reg_list);
+
   // Accessors
   BytecodeRegisterAllocator* register_allocator() {
     return &register_allocator_;
@@ -328,41 +347,22 @@
 
  private:
   friend class BytecodeRegisterAllocator;
+  template <OperandType... operand_types>
+  friend class BytecodeNodeBuilder;
 
-  INLINE(void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
-                     uint32_t operand2, uint32_t operand3));
-  INLINE(void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
-                     uint32_t operand2));
-  INLINE(void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1));
-  INLINE(void Output(Bytecode bytecode, uint32_t operand0));
-  INLINE(void Output(Bytecode bytecode));
+  // Returns the current source position for the given |bytecode|.
+  INLINE(BytecodeSourceInfo CurrentSourcePosition(Bytecode bytecode));
 
-  INLINE(void OutputJump(Bytecode bytecode, BytecodeLabel* label));
-  INLINE(void OutputJump(Bytecode bytecode, uint32_t operand0,
-                         BytecodeLabel* label));
+#define DECLARE_BYTECODE_OUTPUT(Name, ...)         \
+  template <typename... Operands>                  \
+  INLINE(void Output##Name(Operands... operands)); \
+  template <typename... Operands>                  \
+  INLINE(void Output##Name(BytecodeLabel* label, Operands... operands));
+  BYTECODE_LIST(DECLARE_BYTECODE_OUTPUT)
+#undef DECLARE_OPERAND_TYPE_INFO
 
   bool RegisterIsValid(Register reg) const;
-  bool OperandsAreValid(Bytecode bytecode, int operand_count,
-                        uint32_t operand0 = 0, uint32_t operand1 = 0,
-                        uint32_t operand2 = 0, uint32_t operand3 = 0) const;
-
-  static uint32_t RegisterOperand(Register reg) {
-    return static_cast<uint32_t>(reg.ToOperand());
-  }
-
-  static uint32_t SignedOperand(int value) {
-    return static_cast<uint32_t>(value);
-  }
-
-  static uint32_t UnsignedOperand(int value) {
-    DCHECK_GE(value, 0);
-    return static_cast<uint32_t>(value);
-  }
-
-  static uint32_t UnsignedOperand(size_t value) {
-    DCHECK_LE(value, kMaxUInt32);
-    return static_cast<uint32_t>(value);
-  }
+  bool RegisterListIsValid(RegisterList reg_list) const;
 
   // Set position for return.
   void SetReturnPosition();
@@ -375,6 +375,8 @@
   // during bytecode generation.
   BytecodeArrayBuilder& Illegal();
 
+  void PrepareToOutputBytecode(Bytecode bytecode);
+
   void LeaveBasicBlock() { return_seen_in_block_ = false; }
 
   BytecodeArrayWriter* bytecode_array_writer() {
@@ -403,6 +405,7 @@
   BytecodeRegisterAllocator register_allocator_;
   BytecodeArrayWriter bytecode_array_writer_;
   BytecodePipelineStage* pipeline_;
+  BytecodeRegisterOptimizer* register_optimizer_;
   BytecodeSourceInfo latest_source_info_;
 
   static int const kNoFeedbackSlot = 0;
diff --git a/src/interpreter/bytecode-array-iterator.h b/src/interpreter/bytecode-array-iterator.h
index 0922625..03279cb 100644
--- a/src/interpreter/bytecode-array-iterator.h
+++ b/src/interpreter/bytecode-array-iterator.h
@@ -5,6 +5,7 @@
 #ifndef V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
 #define V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
 
+#include "src/globals.h"
 #include "src/handles.h"
 #include "src/interpreter/bytecode-register.h"
 #include "src/interpreter/bytecodes.h"
@@ -15,7 +16,7 @@
 namespace internal {
 namespace interpreter {
 
-class BytecodeArrayIterator {
+class V8_EXPORT_PRIVATE BytecodeArrayIterator {
  public:
   explicit BytecodeArrayIterator(Handle<BytecodeArray> bytecode_array);
 
diff --git a/src/interpreter/bytecode-array-writer.cc b/src/interpreter/bytecode-array-writer.cc
index fb38768..28f997b 100644
--- a/src/interpreter/bytecode-array-writer.cc
+++ b/src/interpreter/bytecode-array-writer.cc
@@ -94,9 +94,9 @@
   int bytecode_offset = static_cast<int>(bytecodes()->size());
   const BytecodeSourceInfo& source_info = node->source_info();
   if (source_info.is_valid()) {
-    source_position_table_builder()->AddPosition(bytecode_offset,
-                                                 source_info.source_position(),
-                                                 source_info.is_statement());
+    source_position_table_builder()->AddPosition(
+        bytecode_offset, SourcePosition(source_info.source_position()),
+        source_info.is_statement());
   }
 }
 
@@ -211,8 +211,6 @@
     // and update the jump instruction and operand.
     size_t entry = constant_array_builder()->CommitReservedEntry(
         OperandSize::kShort, Smi::FromInt(delta));
-    DCHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<uint32_t>(entry)),
-              OperandSize::kShort);
     jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
     bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
     WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
@@ -275,7 +273,7 @@
 
 void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
   DCHECK(Bytecodes::IsJump(node->bytecode()));
-  DCHECK_EQ(0, node->operand(0));
+  DCHECK_EQ(0u, node->operand(0));
 
   size_t current_offset = bytecodes()->size();
 
diff --git a/src/interpreter/bytecode-array-writer.h b/src/interpreter/bytecode-array-writer.h
index 712fcb9..3810ca0 100644
--- a/src/interpreter/bytecode-array-writer.h
+++ b/src/interpreter/bytecode-array-writer.h
@@ -5,6 +5,8 @@
 #ifndef V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
 #define V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
 
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
 #include "src/interpreter/bytecode-pipeline.h"
 #include "src/source-position-table.h"
 
@@ -20,7 +22,8 @@
 
 // Class for emitting bytecode as the final stage of the bytecode
 // generation pipeline.
-class BytecodeArrayWriter final : public BytecodePipelineStage {
+class V8_EXPORT_PRIVATE BytecodeArrayWriter final
+    : public NON_EXPORTED_BASE(BytecodePipelineStage) {
  public:
   BytecodeArrayWriter(
       Zone* zone, ConstantArrayBuilder* constant_array_builder,
diff --git a/src/interpreter/bytecode-dead-code-optimizer.h b/src/interpreter/bytecode-dead-code-optimizer.h
index 188d610..7350981 100644
--- a/src/interpreter/bytecode-dead-code-optimizer.h
+++ b/src/interpreter/bytecode-dead-code-optimizer.h
@@ -5,6 +5,8 @@
 #ifndef V8_INTERPRETER_BYTECODE_DEAD_CODE_OPTIMIZER_H_
 #define V8_INTERPRETER_BYTECODE_DEAD_CODE_OPTIMIZER_H_
 
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
 #include "src/interpreter/bytecode-pipeline.h"
 
 namespace v8 {
@@ -13,8 +15,9 @@
 
 // An optimization stage for eliminating obviously dead code in bytecode
 // generation.
-class BytecodeDeadCodeOptimizer final : public BytecodePipelineStage,
-                                        public ZoneObject {
+class V8_EXPORT_PRIVATE BytecodeDeadCodeOptimizer final
+    : public NON_EXPORTED_BASE(BytecodePipelineStage),
+      public NON_EXPORTED_BASE(ZoneObject) {
  public:
   explicit BytecodeDeadCodeOptimizer(BytecodePipelineStage* next_stage);
 
diff --git a/src/interpreter/bytecode-decoder.h b/src/interpreter/bytecode-decoder.h
index d1749ef..51d0e41 100644
--- a/src/interpreter/bytecode-decoder.h
+++ b/src/interpreter/bytecode-decoder.h
@@ -7,6 +7,7 @@
 
 #include <iosfwd>
 
+#include "src/globals.h"
 #include "src/interpreter/bytecode-register.h"
 #include "src/interpreter/bytecodes.h"
 
@@ -14,7 +15,7 @@
 namespace internal {
 namespace interpreter {
 
-class BytecodeDecoder final {
+class V8_EXPORT_PRIVATE BytecodeDecoder final {
  public:
   // Decodes a register operand in a byte array.
   static Register DecodeRegisterOperand(const uint8_t* operand_start,
diff --git a/src/interpreter/bytecode-generator.cc b/src/interpreter/bytecode-generator.cc
index db5a596..99e7672 100644
--- a/src/interpreter/bytecode-generator.cc
+++ b/src/interpreter/bytecode-generator.cc
@@ -14,6 +14,7 @@
 #include "src/interpreter/bytecode-register-allocator.h"
 #include "src/interpreter/control-flow-builders.h"
 #include "src/objects.h"
+#include "src/parsing/parse-info.h"
 #include "src/parsing/token.h"
 
 namespace v8 {
@@ -361,7 +362,7 @@
       return;
     }
     current = current->outer();
-    if (current->context() != context) {
+    if (current->context() != context && context->ShouldPopContext()) {
       // Pop context to the expected depth.
       // TODO(rmcilroy): Only emit a single context pop.
       generator()->builder()->PopContext(current->context()->reg());
@@ -571,7 +572,11 @@
       generator_state_(),
       loop_depth_(0),
       home_object_symbol_(info->isolate()->factory()->home_object_symbol()),
-      prototype_string_(info->isolate()->factory()->prototype_string()) {
+      empty_fixed_array_(info->isolate()->factory()->empty_fixed_array()) {
+  AstValueFactory* ast_value_factory = info->parse_info()->ast_value_factory();
+  const AstRawString* prototype_string = ast_value_factory->prototype_string();
+  ast_value_factory->Internalize(info->isolate());
+  prototype_string_ = prototype_string->string();
 }
 
 Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(Isolate* isolate) {
@@ -678,6 +683,9 @@
   // Visit declarations within the function scope.
   VisitDeclarations(scope()->declarations());
 
+  // Emit initializing assignments for module namespace imports (if any).
+  VisitModuleNamespaceImports();
+
   // Perform a stack-check before the body.
   builder()->StackCheck(info()->literal()->start_position());
 
@@ -826,8 +834,9 @@
     case VariableLocation::MODULE:
       if (variable->IsExport() && variable->binding_needs_init()) {
         builder()->LoadTheHole();
-        VisitVariableAssignment(variable, Token::INIT,
-                                FeedbackVectorSlot::Invalid());
+        BuildVariableAssignment(variable, Token::INIT,
+                                FeedbackVectorSlot::Invalid(),
+                                HoleCheckMode::kElided);
       }
       // Nothing to do for imports.
       break;
@@ -846,8 +855,9 @@
     case VariableLocation::PARAMETER:
     case VariableLocation::LOCAL: {
       VisitForAccumulatorValue(decl->fun());
-      VisitVariableAssignment(variable, Token::INIT,
-                              FeedbackVectorSlot::Invalid());
+      BuildVariableAssignment(variable, Token::INIT,
+                              FeedbackVectorSlot::Invalid(),
+                              HoleCheckMode::kElided);
       break;
     }
     case VariableLocation::CONTEXT: {
@@ -871,19 +881,38 @@
       DCHECK_EQ(variable->mode(), LET);
       DCHECK(variable->IsExport());
       VisitForAccumulatorValue(decl->fun());
-      VisitVariableAssignment(variable, Token::INIT,
-                              FeedbackVectorSlot::Invalid());
+      BuildVariableAssignment(variable, Token::INIT,
+                              FeedbackVectorSlot::Invalid(),
+                              HoleCheckMode::kElided);
       break;
   }
 }
 
-void BytecodeGenerator::VisitDeclarations(
-    ZoneList<Declaration*>* declarations) {
+void BytecodeGenerator::VisitModuleNamespaceImports() {
+  if (!scope()->is_module_scope()) return;
+
+  RegisterAllocationScope register_scope(this);
+  Register module_request = register_allocator()->NewRegister();
+
+  ModuleDescriptor* descriptor = scope()->AsModuleScope()->module();
+  for (auto entry : descriptor->namespace_imports()) {
+    builder()
+        ->LoadLiteral(Smi::FromInt(entry->module_request))
+        .StoreAccumulatorInRegister(module_request)
+        .CallRuntime(Runtime::kGetModuleNamespace, module_request);
+    Variable* var = scope()->LookupLocal(entry->local_name);
+    DCHECK_NOT_NULL(var);
+    BuildVariableAssignment(var, Token::INIT, FeedbackVectorSlot::Invalid(),
+                            HoleCheckMode::kElided);
+  }
+}
+
+void BytecodeGenerator::VisitDeclarations(Declaration::List* declarations) {
   RegisterAllocationScope register_scope(this);
   DCHECK(globals_builder()->empty());
-  for (int i = 0; i < declarations->length(); i++) {
+  for (Declaration* decl : *declarations) {
     RegisterAllocationScope register_scope(this);
-    Visit(declarations->at(i));
+    Visit(decl);
   }
   if (globals_builder()->empty()) return;
 
@@ -1126,8 +1155,9 @@
   LhsKind assign_type = Property::GetAssignType(property);
   switch (assign_type) {
     case VARIABLE: {
-      Variable* variable = expr->AsVariableProxy()->var();
-      VisitVariableAssignment(variable, Token::ASSIGN, slot);
+      VariableProxy* proxy = expr->AsVariableProxy();
+      BuildVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+                              proxy->hole_check_mode());
       break;
     }
     case NAMED_PROPERTY: {
@@ -1206,7 +1236,7 @@
 
   // Set up loop counter
   Register index = register_allocator()->NewRegister();
-  builder()->LoadLiteral(Smi::FromInt(0));
+  builder()->LoadLiteral(Smi::kZero);
   builder()->StoreAccumulatorInRegister(index);
 
   // The loop
@@ -1374,11 +1404,12 @@
   builder()->CallRuntime(Runtime::kToFastProperties, literal);
   // Assign to class variable.
   if (expr->class_variable_proxy() != nullptr) {
-    Variable* var = expr->class_variable_proxy()->var();
+    VariableProxy* proxy = expr->class_variable_proxy();
     FeedbackVectorSlot slot = expr->NeedsProxySlot()
                                   ? expr->ProxySlot()
                                   : FeedbackVectorSlot::Invalid();
-    VisitVariableAssignment(var, Token::INIT, slot);
+    BuildVariableAssignment(proxy->var(), Token::INIT, slot,
+                            HoleCheckMode::kElided);
   }
 }
 
@@ -1541,11 +1572,14 @@
       FastCloneShallowObjectStub::IsSupported(expr),
       FastCloneShallowObjectStub::PropertiesCount(expr->properties_count()),
       expr->ComputeFlags());
-  // Allocate in the outer scope since this register is used to return the
-  // expression's results to the caller.
+  // If constant properties is an empty fixed array, use our cached
+  // empty_fixed_array to ensure it's only added to the constant pool once.
+  Handle<FixedArray> constant_properties = expr->properties_count() == 0
+                                               ? empty_fixed_array()
+                                               : expr->constant_properties();
   Register literal = register_allocator()->NewRegister();
-  builder()->CreateObjectLiteral(expr->constant_properties(),
-                                 expr->literal_index(), flags, literal);
+  builder()->CreateObjectLiteral(constant_properties, expr->literal_index(),
+                                 flags, literal);
 
   // Store computed values into the literal.
   int property_index = 0;
@@ -1752,17 +1786,13 @@
 
 void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
   builder()->SetExpressionPosition(proxy);
-  VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
+  BuildVariableLoad(proxy->var(), proxy->VariableFeedbackSlot(),
+                    proxy->hole_check_mode());
 }
 
-void BytecodeGenerator::BuildHoleCheckForVariableLoad(Variable* variable) {
-  if (variable->binding_needs_init()) {
-    BuildThrowIfHole(variable->name());
-  }
-}
-
-void BytecodeGenerator::VisitVariableLoad(Variable* variable,
+void BytecodeGenerator::BuildVariableLoad(Variable* variable,
                                           FeedbackVectorSlot slot,
+                                          HoleCheckMode hole_check_mode,
                                           TypeofMode typeof_mode) {
   switch (variable->location()) {
     case VariableLocation::LOCAL: {
@@ -1771,7 +1801,9 @@
       // VisitForRegisterScope, in order to avoid register aliasing if
       // subsequent expressions assign to the same variable.
       builder()->LoadAccumulatorWithRegister(source);
-      BuildHoleCheckForVariableLoad(variable);
+      if (hole_check_mode == HoleCheckMode::kRequired) {
+        BuildThrowIfHole(variable->name());
+      }
       break;
     }
     case VariableLocation::PARAMETER: {
@@ -1782,7 +1814,9 @@
       // VisitForRegisterScope, in order to avoid register aliasing if
       // subsequent expressions assign to the same variable.
       builder()->LoadAccumulatorWithRegister(source);
-      BuildHoleCheckForVariableLoad(variable);
+      if (hole_check_mode == HoleCheckMode::kRequired) {
+        BuildThrowIfHole(variable->name());
+      }
       break;
     }
     case VariableLocation::UNALLOCATED: {
@@ -1801,7 +1835,9 @@
       }
 
       builder()->LoadContextSlot(context_reg, variable->index(), depth);
-      BuildHoleCheckForVariableLoad(variable);
+      if (hole_check_mode == HoleCheckMode::kRequired) {
+        BuildThrowIfHole(variable->name());
+      }
       break;
     }
     case VariableLocation::LOOKUP: {
@@ -1812,7 +1848,9 @@
               execution_context()->ContextChainDepth(local_variable->scope());
           builder()->LoadLookupContextSlot(variable->name(), typeof_mode,
                                            local_variable->index(), depth);
-          BuildHoleCheckForVariableLoad(variable);
+          if (hole_check_mode == HoleCheckMode::kRequired) {
+            BuildThrowIfHole(variable->name());
+          }
           break;
         }
         case DYNAMIC_GLOBAL: {
@@ -1827,36 +1865,21 @@
       break;
     }
     case VariableLocation::MODULE: {
-      ModuleDescriptor* descriptor = scope()->GetModuleScope()->module();
-      if (variable->IsExport()) {
-        auto it = descriptor->regular_exports().find(variable->raw_name());
-        DCHECK(it != descriptor->regular_exports().end());
-        Register export_name = register_allocator()->NewRegister();
-        builder()
-            ->LoadLiteral(it->second->export_name->string())
-            .StoreAccumulatorInRegister(export_name)
-            .CallRuntime(Runtime::kLoadModuleExport, export_name);
-      } else {
-        auto it = descriptor->regular_imports().find(variable->raw_name());
-        DCHECK(it != descriptor->regular_imports().end());
-        RegisterList args = register_allocator()->NewRegisterList(2);
-        builder()
-            ->LoadLiteral(it->second->import_name->string())
-            .StoreAccumulatorInRegister(args[0])
-            .LoadLiteral(Smi::FromInt(it->second->module_request))
-            .StoreAccumulatorInRegister(args[1])
-            .CallRuntime(Runtime::kLoadModuleImport, args);
+      int depth = execution_context()->ContextChainDepth(variable->scope());
+      builder()->LoadModuleVariable(variable->index(), depth);
+      if (hole_check_mode == HoleCheckMode::kRequired) {
+        BuildThrowIfHole(variable->name());
       }
-      BuildHoleCheckForVariableLoad(variable);
       break;
     }
   }
 }
 
-void BytecodeGenerator::VisitVariableLoadForAccumulatorValue(
-    Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
+void BytecodeGenerator::BuildVariableLoadForAccumulatorValue(
+    Variable* variable, FeedbackVectorSlot slot, HoleCheckMode hole_check_mode,
+    TypeofMode typeof_mode) {
   ValueResultScope accumulator_result(this);
-  VisitVariableLoad(variable, slot, typeof_mode);
+  BuildVariableLoad(variable, slot, hole_check_mode, typeof_mode);
 }
 
 void BytecodeGenerator::BuildReturn() {
@@ -1911,29 +1934,26 @@
 
 void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
                                                             Token::Value op) {
-  if (op != Token::INIT) {
-    // Perform an initialization check for let/const declared variables.
-    // E.g. let x = (x = 20); is not allowed.
-    BuildThrowIfHole(variable->name());
-  } else {
-    DCHECK(variable->is_this() && variable->mode() == CONST &&
-           op == Token::INIT);
+  if (variable->is_this() && variable->mode() == CONST && op == Token::INIT) {
     // Perform an initialization check for 'this'. 'this' variable is the
     // only variable able to trigger bind operations outside the TDZ
     // via 'super' calls.
     BuildThrowIfNotHole(variable->name());
+  } else {
+    // Perform an initialization check for let/const declared variables.
+    // E.g. let x = (x = 20); is not allowed.
+    DCHECK(IsLexicalVariableMode(variable->mode()));
+    BuildThrowIfHole(variable->name());
   }
 }
 
-void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
+void BytecodeGenerator::BuildVariableAssignment(Variable* variable,
                                                 Token::Value op,
-                                                FeedbackVectorSlot slot) {
+                                                FeedbackVectorSlot slot,
+                                                HoleCheckMode hole_check_mode) {
   VariableMode mode = variable->mode();
   RegisterAllocationScope assignment_register_scope(this);
   BytecodeLabel end_label;
-  bool hole_check_required =
-      variable->binding_needs_init() &&
-      (op != Token::INIT || (mode == CONST && variable->is_this()));
   switch (variable->location()) {
     case VariableLocation::PARAMETER:
     case VariableLocation::LOCAL: {
@@ -1944,7 +1964,7 @@
         destination = Register(variable->index());
       }
 
-      if (hole_check_required) {
+      if (hole_check_mode == HoleCheckMode::kRequired) {
         // Load destination to check for hole.
         Register value_temp = register_allocator()->NewRegister();
         builder()
@@ -1979,7 +1999,7 @@
         context_reg = execution_context()->reg();
       }
 
-      if (hole_check_required) {
+      if (hole_check_mode == HoleCheckMode::kRequired) {
         // Load destination to check for hole.
         Register value_temp = register_allocator()->NewRegister();
         builder()
@@ -2014,18 +2034,16 @@
       // assignments for them.
       DCHECK(variable->IsExport());
 
-      ModuleDescriptor* mod = scope()->GetModuleScope()->module();
-      // There may be several export names for this local name, but it doesn't
-      // matter which one we pick, as they all map to the same cell.
-      auto it = mod->regular_exports().find(variable->raw_name());
-      DCHECK(it != mod->regular_exports().end());
-
-      RegisterList args = register_allocator()->NewRegisterList(2);
-      builder()
-          ->StoreAccumulatorInRegister(args[1])
-          .LoadLiteral(it->second->export_name->string())
-          .StoreAccumulatorInRegister(args[0])
-          .CallRuntime(Runtime::kStoreModuleExport, args);
+      int depth = execution_context()->ContextChainDepth(variable->scope());
+      if (hole_check_mode == HoleCheckMode::kRequired) {
+        Register value_temp = register_allocator()->NewRegister();
+        builder()
+            ->StoreAccumulatorInRegister(value_temp)
+            .LoadModuleVariable(variable->index(), depth);
+        BuildHoleCheckForVariableAssignment(variable, op);
+        builder()->LoadAccumulatorWithRegister(value_temp);
+      }
+      builder()->StoreModuleVariable(variable->index(), depth);
       break;
     }
   }
@@ -2087,7 +2105,8 @@
     switch (assign_type) {
       case VARIABLE: {
         VariableProxy* proxy = expr->target()->AsVariableProxy();
-        VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
+        BuildVariableLoad(proxy->var(), proxy->VariableFeedbackSlot(),
+                          proxy->hole_check_mode());
         builder()->StoreAccumulatorInRegister(old_value);
         break;
       }
@@ -2136,10 +2155,11 @@
   FeedbackVectorSlot slot = expr->AssignmentSlot();
   switch (assign_type) {
     case VARIABLE: {
-      // TODO(oth): The VisitVariableAssignment() call is hard to reason about.
+      // TODO(oth): The BuildVariableAssignment() call is hard to reason about.
       // Is the value in the accumulator safe? Yes, but scary.
-      Variable* variable = expr->target()->AsVariableProxy()->var();
-      VisitVariableAssignment(variable, expr->op(), slot);
+      VariableProxy* proxy = expr->target()->AsVariableProxy();
+      BuildVariableAssignment(proxy->var(), expr->op(), slot,
+                              proxy->hole_check_mode());
       break;
     }
     case NAMED_PROPERTY:
@@ -2273,10 +2293,12 @@
   }
 }
 
-void BytecodeGenerator::VisitPropertyLoadForAccumulator(Register obj,
-                                                        Property* expr) {
+void BytecodeGenerator::VisitPropertyLoadForRegister(Register obj,
+                                                     Property* expr,
+                                                     Register destination) {
   ValueResultScope result_scope(this);
   VisitPropertyLoad(obj, expr);
+  builder()->StoreAccumulatorInRegister(destination);
 }
 
 void BytecodeGenerator::VisitNamedSuperPropertyLoad(Property* property,
@@ -2325,11 +2347,10 @@
 }
 
 void BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args,
-                                       RegisterList arg_regs,
-                                       size_t first_argument_register) {
+                                       RegisterList* arg_regs) {
   // Visit arguments.
   for (int i = 0; i < static_cast<int>(args->length()); i++) {
-    VisitForRegisterValue(args->at(i), arg_regs[first_argument_register + i]);
+    VisitAndPushIntoRegisterList(args->at(i), arg_regs);
   }
 }
 
@@ -2342,11 +2363,11 @@
   }
 
   Register callee = register_allocator()->NewRegister();
-
-  // Add an argument register for the receiver.
-  RegisterList args =
-      register_allocator()->NewRegisterList(expr->arguments()->length() + 1);
-  Register receiver = args[0];
+  // Grow the args list as we visit receiver / arguments to avoid allocating all
+  // the registers up-front. Otherwise these registers are unavailable during
+  // receiver / argument visiting and we can end up with memory leaks due to
+  // registers keeping objects alive.
+  RegisterList args = register_allocator()->NewGrowableRegisterList();
 
   // Prepare the callee and the receiver to the function call. This depends on
   // the semantics of the underlying call type.
@@ -2354,54 +2375,55 @@
     case Call::NAMED_PROPERTY_CALL:
     case Call::KEYED_PROPERTY_CALL: {
       Property* property = callee_expr->AsProperty();
-      VisitForAccumulatorValue(property->obj());
-      builder()->StoreAccumulatorInRegister(receiver);
-      VisitPropertyLoadForAccumulator(receiver, property);
-      builder()->StoreAccumulatorInRegister(callee);
+      VisitAndPushIntoRegisterList(property->obj(), &args);
+      VisitPropertyLoadForRegister(args[0], property, callee);
       break;
     }
     case Call::GLOBAL_CALL: {
       // Receiver is undefined for global calls.
-      builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
+      BuildPushUndefinedIntoRegisterList(&args);
       // Load callee as a global variable.
       VariableProxy* proxy = callee_expr->AsVariableProxy();
-      VisitVariableLoadForAccumulatorValue(proxy->var(),
-                                           proxy->VariableFeedbackSlot());
+      BuildVariableLoadForAccumulatorValue(proxy->var(),
+                                           proxy->VariableFeedbackSlot(),
+                                           proxy->hole_check_mode());
       builder()->StoreAccumulatorInRegister(callee);
       break;
     }
-    case Call::LOOKUP_SLOT_CALL:
-    case Call::POSSIBLY_EVAL_CALL: {
-      if (callee_expr->AsVariableProxy()->var()->IsLookupSlot()) {
+    case Call::WITH_CALL: {
+      Register receiver = register_allocator()->GrowRegisterList(&args);
+      DCHECK(callee_expr->AsVariableProxy()->var()->IsLookupSlot());
+      {
         RegisterAllocationScope inner_register_scope(this);
         Register name = register_allocator()->NewRegister();
 
         // Call %LoadLookupSlotForCall to get the callee and receiver.
         DCHECK(Register::AreContiguous(callee, receiver));
         RegisterList result_pair(callee.index(), 2);
+        USE(receiver);
         Variable* variable = callee_expr->AsVariableProxy()->var();
         builder()
             ->LoadLiteral(variable->name())
             .StoreAccumulatorInRegister(name)
             .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, name,
                                 result_pair);
-        break;
       }
-      // Fall through.
-      DCHECK_EQ(call_type, Call::POSSIBLY_EVAL_CALL);
+      break;
     }
     case Call::OTHER_CALL: {
-      builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
+      BuildPushUndefinedIntoRegisterList(&args);
       VisitForRegisterValue(callee_expr, callee);
       break;
     }
     case Call::NAMED_SUPER_PROPERTY_CALL: {
+      Register receiver = register_allocator()->GrowRegisterList(&args);
       Property* property = callee_expr->AsProperty();
       VisitNamedSuperPropertyLoad(property, receiver);
       builder()->StoreAccumulatorInRegister(callee);
       break;
     }
     case Call::KEYED_SUPER_PROPERTY_CALL: {
+      Register receiver = register_allocator()->GrowRegisterList(&args);
       Property* property = callee_expr->AsProperty();
       VisitKeyedSuperPropertyLoad(property, receiver);
       builder()->StoreAccumulatorInRegister(callee);
@@ -2414,12 +2436,12 @@
 
   // Evaluate all arguments to the function call and store in sequential args
   // registers.
-  VisitArguments(expr->arguments(), args, 1);
+  VisitArguments(expr->arguments(), &args);
+  CHECK_EQ(expr->arguments()->length() + 1, args.register_count());
 
   // Resolve callee for a potential direct eval call. This block will mutate the
   // callee value.
-  if (call_type == Call::POSSIBLY_EVAL_CALL &&
-      expr->arguments()->length() > 0) {
+  if (expr->is_possibly_eval() && expr->arguments()->length() > 0) {
     RegisterAllocationScope inner_register_scope(this);
     // Set up arguments for ResolvePossiblyDirectEval by copying callee, source
     // strings and function closure, and loading language and
@@ -2445,18 +2467,9 @@
 
   builder()->SetExpressionPosition(expr);
 
-  int feedback_slot_index;
-  if (expr->CallFeedbackICSlot().IsInvalid()) {
-    DCHECK(call_type == Call::POSSIBLY_EVAL_CALL);
-    // Valid type feedback slots can only be greater than kReservedIndexCount.
-    // We use 0 to indicate an invalid slot id. Statically assert that 0 cannot
-    // be a valid slot id.
-    STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
-    feedback_slot_index = 0;
-  } else {
-    feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
-  }
-  builder()->Call(callee, args, feedback_slot_index, expr->tail_call_mode());
+  int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
+  builder()->Call(callee, args, feedback_slot_index, call_type,
+                  expr->tail_call_mode());
 }
 
 void BytecodeGenerator::VisitCallSuper(Call* expr) {
@@ -2470,9 +2483,8 @@
   Register constructor = this_function;  // Re-use dead this_function register.
   builder()->StoreAccumulatorInRegister(constructor);
 
-  RegisterList args =
-      register_allocator()->NewRegisterList(expr->arguments()->length());
-  VisitArguments(expr->arguments(), args);
+  RegisterList args = register_allocator()->NewGrowableRegisterList();
+  VisitArguments(expr->arguments(), &args);
 
   // The new target is loaded into the accumulator from the
   // {new.target} variable.
@@ -2480,20 +2492,20 @@
 
   // Call construct.
   builder()->SetExpressionPosition(expr);
-  // Valid type feedback slots can only be greater than kReservedIndexCount.
-  // Assert that 0 cannot be valid a valid slot id.
-  STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
-  // Type feedback is not necessary for super constructor calls. The type
-  // information can be inferred in most cases. Slot id 0 indicates type
-  // feedback is not required.
-  builder()->New(constructor, args, 0);
+  // TODO(turbofan): For now we do gather feedback on super constructor
+  // calls, utilizing the existing machinery to inline the actual call
+  // target and the JSCreate for the implicit receiver allocation. This
+  // is not an ideal solution for super constructor calls, but it gets
+  // the job done for now. In the long run we might want to revisit this
+  // and come up with a better way.
+  int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
+  builder()->New(constructor, args, feedback_slot_index);
 }
 
 void BytecodeGenerator::VisitCallNew(CallNew* expr) {
   Register constructor = VisitForRegisterValue(expr->expression());
-  RegisterList args =
-      register_allocator()->NewRegisterList(expr->arguments()->length());
-  VisitArguments(expr->arguments(), args);
+  RegisterList args = register_allocator()->NewGrowableRegisterList();
+  VisitArguments(expr->arguments(), &args);
 
   builder()->SetExpressionPosition(expr);
   // The accumulator holds new target which is the same as the
@@ -2505,18 +2517,15 @@
 
 void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   if (expr->is_jsruntime()) {
+    RegisterList args = register_allocator()->NewGrowableRegisterList();
     // Allocate a register for the receiver and load it with undefined.
-    RegisterList args =
-        register_allocator()->NewRegisterList(expr->arguments()->length() + 1);
-    Register receiver = args[0];
-    builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
-    VisitArguments(expr->arguments(), args, 1);
+    BuildPushUndefinedIntoRegisterList(&args);
+    VisitArguments(expr->arguments(), &args);
     builder()->CallJSRuntime(expr->context_index(), args);
   } else {
     // Evaluate all arguments to the runtime call.
-    RegisterList args =
-        register_allocator()->NewRegisterList(expr->arguments()->length());
-    VisitArguments(expr->arguments(), args);
+    RegisterList args = register_allocator()->NewGrowableRegisterList();
+    VisitArguments(expr->arguments(), &args);
     Runtime::FunctionId function_id = expr->function()->function_id;
     builder()->CallRuntime(function_id, args);
   }
@@ -2532,8 +2541,9 @@
     // Typeof does not throw a reference error on global variables, hence we
     // perform a non-contextual load in case the operand is a variable proxy.
     VariableProxy* proxy = expr->expression()->AsVariableProxy();
-    VisitVariableLoadForAccumulatorValue(
-        proxy->var(), proxy->VariableFeedbackSlot(), INSIDE_TYPEOF);
+    BuildVariableLoadForAccumulatorValue(
+        proxy->var(), proxy->VariableFeedbackSlot(), proxy->hole_check_mode(),
+        INSIDE_TYPEOF);
   } else {
     VisitForAccumulatorValue(expr->expression());
   }
@@ -2657,8 +2667,9 @@
   switch (assign_type) {
     case VARIABLE: {
       VariableProxy* proxy = expr->expression()->AsVariableProxy();
-      VisitVariableLoadForAccumulatorValue(proxy->var(),
-                                           proxy->VariableFeedbackSlot());
+      BuildVariableLoadForAccumulatorValue(proxy->var(),
+                                           proxy->VariableFeedbackSlot(),
+                                           proxy->hole_check_mode());
       break;
     }
     case NAMED_PROPERTY: {
@@ -2709,7 +2720,9 @@
   if (is_postfix) {
     // Convert old value into a number before saving it.
     old_value = register_allocator()->NewRegister();
-    builder()->ConvertAccumulatorToNumber(old_value);
+    builder()
+        ->ConvertAccumulatorToNumber(old_value)
+        .LoadAccumulatorWithRegister(old_value);
   }
 
   // Perform +1/-1 operation.
@@ -2721,8 +2734,9 @@
   FeedbackVectorSlot feedback_slot = expr->CountSlot();
   switch (assign_type) {
     case VARIABLE: {
-      Variable* variable = expr->expression()->AsVariableProxy()->var();
-      VisitVariableAssignment(variable, expr->op(), feedback_slot);
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
+      BuildVariableAssignment(proxy->var(), expr->op(), feedback_slot,
+                              proxy->hole_check_mode());
       break;
     }
     case NAMED_PROPERTY: {
@@ -2821,7 +2835,7 @@
   if (execution_result()->IsTest()) {
     TestResultScope* test_result = execution_result()->AsTest();
 
-    if (left->ToBooleanIsTrue() || right->ToBooleanIsTrue()) {
+    if (left->ToBooleanIsTrue()) {
       builder()->Jump(test_result->NewThenLabel());
     } else if (left->ToBooleanIsFalse() && right->ToBooleanIsFalse()) {
       builder()->Jump(test_result->NewElseLabel());
@@ -2856,7 +2870,7 @@
   if (execution_result()->IsTest()) {
     TestResultScope* test_result = execution_result()->AsTest();
 
-    if (left->ToBooleanIsFalse() || right->ToBooleanIsFalse()) {
+    if (left->ToBooleanIsFalse()) {
       builder()->Jump(test_result->NewElseLabel());
     } else if (left->ToBooleanIsTrue() && right->ToBooleanIsTrue()) {
       builder()->Jump(test_result->NewThenLabel());
@@ -3019,8 +3033,9 @@
           ? CreateArgumentsType::kUnmappedArguments
           : CreateArgumentsType::kMappedArguments;
   builder()->CreateArguments(type);
-  VisitVariableAssignment(variable, Token::ASSIGN,
-                          FeedbackVectorSlot::Invalid());
+  BuildVariableAssignment(variable, Token::ASSIGN,
+                          FeedbackVectorSlot::Invalid(),
+                          HoleCheckMode::kElided);
 }
 
 void BytecodeGenerator::VisitRestArgumentsArray(Variable* rest) {
@@ -3030,7 +3045,8 @@
   // variable.
   builder()->CreateArguments(CreateArgumentsType::kRestParameter);
   DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
-  VisitVariableAssignment(rest, Token::ASSIGN, FeedbackVectorSlot::Invalid());
+  BuildVariableAssignment(rest, Token::ASSIGN, FeedbackVectorSlot::Invalid(),
+                          HoleCheckMode::kElided);
 }
 
 void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
@@ -3038,7 +3054,8 @@
 
   // Store the closure we were called with in the given variable.
   builder()->LoadAccumulatorWithRegister(Register::function_closure());
-  VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
+  BuildVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid(),
+                          HoleCheckMode::kElided);
 }
 
 void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
@@ -3046,7 +3063,8 @@
 
   // Store the new target we were called with in the given variable.
   builder()->LoadAccumulatorWithRegister(Register::new_target());
-  VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
+  BuildVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid(),
+                          HoleCheckMode::kElided);
 
   // TODO(mstarzinger): The <new.target> register is not set by the deoptimizer
   // and we need to make sure {BytecodeRegisterOptimizer} flushes its state
@@ -3120,6 +3138,28 @@
   builder()->StoreAccumulatorInRegister(destination);
 }
 
+// Visits the expression |expr| and pushes the result into a new register
+// added to the end of |reg_list|.
+void BytecodeGenerator::VisitAndPushIntoRegisterList(Expression* expr,
+                                                     RegisterList* reg_list) {
+  {
+    ValueResultScope register_scope(this);
+    Visit(expr);
+  }
+  // Grow the register list after visiting the expression to avoid reserving
+  // the register across the expression evaluation, which could cause memory
+  // leaks for deep expressions due to dead objects being kept alive by pointers
+  // in registers.
+  Register destination = register_allocator()->GrowRegisterList(reg_list);
+  builder()->StoreAccumulatorInRegister(destination);
+}
+
+void BytecodeGenerator::BuildPushUndefinedIntoRegisterList(
+    RegisterList* reg_list) {
+  Register reg = register_allocator()->GrowRegisterList(reg_list);
+  builder()->LoadUndefined().StoreAccumulatorInRegister(reg);
+}
+
 // Visits the expression |expr| for testing its boolean value and jumping to the
 // |then| or |other| label depending on value and short-circuit semantics
 void BytecodeGenerator::VisitForTest(Expression* expr,
diff --git a/src/interpreter/bytecode-generator.h b/src/interpreter/bytecode-generator.h
index 03067de..bcab997 100644
--- a/src/interpreter/bytecode-generator.h
+++ b/src/interpreter/bytecode-generator.h
@@ -32,7 +32,7 @@
 #undef DECLARE_VISIT
 
   // Visiting function for declarations list and statements are overridden.
-  void VisitDeclarations(ZoneList<Declaration*>* declarations);
+  void VisitDeclarations(Declaration::List* declarations);
   void VisitStatements(ZoneList<Statement*>* statments);
 
  private:
@@ -72,10 +72,9 @@
   // Used by flow control routines to evaluate loop condition.
   void VisitCondition(Expression* expr);
 
-  // Visit the arguments expressions in |args| and store them in |args_regs|
-  // starting at register |first_argument_register| in the list.
-  void VisitArguments(ZoneList<Expression*>* args, RegisterList arg_regs,
-                      size_t first_argument_register = 0);
+  // Visit the arguments expressions in |args| and store them in |args_regs|,
+  // growing |args_regs| for each argument visited.
+  void VisitArguments(ZoneList<Expression*>* args, RegisterList* arg_regs);
 
   // Visit a keyed super property load. The optional
   // |opt_receiver_out| register will have the receiver stored to it
@@ -92,18 +91,19 @@
                                    Register opt_receiver_out);
 
   void VisitPropertyLoad(Register obj, Property* expr);
-  void VisitPropertyLoadForAccumulator(Register obj, Property* expr);
+  void VisitPropertyLoadForRegister(Register obj, Property* expr,
+                                    Register destination);
 
-  void VisitVariableLoad(Variable* variable, FeedbackVectorSlot slot,
+  void BuildVariableLoad(Variable* variable, FeedbackVectorSlot slot,
+                         HoleCheckMode hole_check_mode,
                          TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
-  void VisitVariableLoadForAccumulatorValue(
+  void BuildVariableLoadForAccumulatorValue(
       Variable* variable, FeedbackVectorSlot slot,
+      HoleCheckMode hole_check_mode,
       TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
-  MUST_USE_RESULT Register
-  VisitVariableLoadForRegisterValue(Variable* variable, FeedbackVectorSlot slot,
-                                    TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
-  void VisitVariableAssignment(Variable* variable, Token::Value op,
-                               FeedbackVectorSlot slot);
+  void BuildVariableAssignment(Variable* variable, Token::Value op,
+                               FeedbackVectorSlot slot,
+                               HoleCheckMode hole_check_mode);
 
   void BuildReturn();
   void BuildReThrow();
@@ -111,7 +111,6 @@
   void BuildThrowIfHole(Handle<String> name);
   void BuildThrowIfNotHole(Handle<String> name);
   void BuildThrowReferenceError(Handle<String> name);
-  void BuildHoleCheckForVariableLoad(Variable* variable);
   void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
 
   // Build jump to targets[value], where
@@ -143,6 +142,7 @@
                                   ObjectLiteralProperty* property,
                                   Register value_out);
   void VisitForInAssignment(Expression* expr, FeedbackVectorSlot slot);
+  void VisitModuleNamespaceImports();
 
   // Visit the header/body of a loop iteration.
   void VisitIterationHeader(IterationStatement* stmt,
@@ -152,12 +152,15 @@
   // Visit a statement and switch scopes, the context is in the accumulator.
   void VisitInScope(Statement* stmt, Scope* scope);
 
+  void BuildPushUndefinedIntoRegisterList(RegisterList* reg_list);
+
   // Visitors for obtaining expression result in the accumulator, in a
   // register, or just getting the effect.
   void VisitForAccumulatorValue(Expression* expr);
   void VisitForAccumulatorValueOrTheHole(Expression* expr);
   MUST_USE_RESULT Register VisitForRegisterValue(Expression* expr);
   void VisitForRegisterValue(Expression* expr, Register destination);
+  void VisitAndPushIntoRegisterList(Expression* expr, RegisterList* reg_list);
   void VisitForEffect(Expression* expr);
   void VisitForTest(Expression* expr, BytecodeLabels* then_labels,
                     BytecodeLabels* else_labels, TestFallthrough fallthrough);
@@ -194,6 +197,7 @@
 
   Handle<Name> home_object_symbol() const { return home_object_symbol_; }
   Handle<Name> prototype_string() const { return prototype_string_; }
+  Handle<FixedArray> empty_fixed_array() const { return empty_fixed_array_; }
 
   Zone* zone_;
   BytecodeArrayBuilder* builder_;
@@ -216,6 +220,7 @@
 
   Handle<Name> home_object_symbol_;
   Handle<Name> prototype_string_;
+  Handle<FixedArray> empty_fixed_array_;
 };
 
 }  // namespace interpreter
diff --git a/src/interpreter/bytecode-operands.h b/src/interpreter/bytecode-operands.h
index b35c486..5548502 100644
--- a/src/interpreter/bytecode-operands.h
+++ b/src/interpreter/bytecode-operands.h
@@ -14,8 +14,8 @@
 #define INVALID_OPERAND_TYPE_LIST(V) V(None, OperandTypeInfo::kNone)
 
 #define REGISTER_INPUT_OPERAND_TYPE_LIST(V)        \
-  V(RegList, OperandTypeInfo::kScalableSignedByte) \
   V(Reg, OperandTypeInfo::kScalableSignedByte)     \
+  V(RegList, OperandTypeInfo::kScalableSignedByte) \
   V(RegPair, OperandTypeInfo::kScalableSignedByte)
 
 #define REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)          \
@@ -23,22 +23,25 @@
   V(RegOutPair, OperandTypeInfo::kScalableSignedByte) \
   V(RegOutTriple, OperandTypeInfo::kScalableSignedByte)
 
-#define SCALAR_OPERAND_TYPE_LIST(V)                   \
+#define UNSIGNED_SCALAR_OPERAND_TYPE_LIST(V)          \
   V(Flag8, OperandTypeInfo::kFixedUnsignedByte)       \
   V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
   V(Idx, OperandTypeInfo::kScalableUnsignedByte)      \
   V(UImm, OperandTypeInfo::kScalableUnsignedByte)     \
-  V(Imm, OperandTypeInfo::kScalableSignedByte)        \
   V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \
   V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort)
 
+#define SIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
+  V(Imm, OperandTypeInfo::kScalableSignedByte)
+
 #define REGISTER_OPERAND_TYPE_LIST(V) \
   REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
   REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)
 
 #define NON_REGISTER_OPERAND_TYPE_LIST(V) \
   INVALID_OPERAND_TYPE_LIST(V)            \
-  SCALAR_OPERAND_TYPE_LIST(V)
+  UNSIGNED_SCALAR_OPERAND_TYPE_LIST(V)    \
+  SIGNED_SCALAR_OPERAND_TYPE_LIST(V)
 
 // The list of operand types used by bytecodes.
 #define OPERAND_TYPE_LIST(V)        \
@@ -114,9 +117,12 @@
   return static_cast<AccumulatorUse>(result);
 }
 
-std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use);
-std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale);
-std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+                                           const AccumulatorUse& use);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+                                           const OperandScale& operand_scale);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+                                           const OperandSize& operand_size);
 std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
 
 }  // namespace interpreter
diff --git a/src/interpreter/bytecode-peephole-optimizer.cc b/src/interpreter/bytecode-peephole-optimizer.cc
index c87d31c..4055294 100644
--- a/src/interpreter/bytecode-peephole-optimizer.cc
+++ b/src/interpreter/bytecode-peephole-optimizer.cc
@@ -13,7 +13,7 @@
 
 BytecodePeepholeOptimizer::BytecodePeepholeOptimizer(
     BytecodePipelineStage* next_stage)
-    : next_stage_(next_stage), last_(Bytecode::kIllegal) {
+    : next_stage_(next_stage), last_(Bytecode::kIllegal, BytecodeSourceInfo()) {
   InvalidateLast();
 }
 
@@ -77,8 +77,7 @@
   // source position information. NOP without source information can
   // always be elided.
   DCHECK(node->bytecode() != Bytecode::kNop || node->source_info().is_valid());
-
-  last_.Clone(node);
+  last_ = *node;
 }
 
 bool BytecodePeepholeOptimizer::CanElideLastBasedOnSourcePosition(
@@ -117,24 +116,6 @@
 
 namespace {
 
-void TransformLdaStarToLdrLdar(Bytecode new_bytecode, BytecodeNode* const last,
-                               BytecodeNode* const current) {
-  DCHECK_EQ(current->bytecode(), Bytecode::kStar);
-
-  //
-  // An example transformation here would be:
-  //
-  //   LdaGlobal i0, i1  ____\  LdrGlobal i0, i1, R
-  //   Star R            ====/  Ldar R
-  //
-  // which loads a global value into both a register and the
-  // accumulator. However, in the second form the Ldar can often be
-  // peephole optimized away unlike the Star in the first form.
-  //
-  last->Transform(new_bytecode, current->operand(0));
-  current->set_bytecode(Bytecode::kLdar, current->operand(0));
-}
-
 void TransformLdaSmiBinaryOpToBinaryOpWithSmi(Bytecode new_bytecode,
                                               BytecodeNode* const last,
                                               BytecodeNode* const current) {
@@ -142,7 +123,7 @@
   current->set_bytecode(new_bytecode, last->operand(0), current->operand(0),
                         current->operand(1));
   if (last->source_info().is_valid()) {
-    current->source_info_ptr()->Clone(last->source_info());
+    current->set_source_info(last->source_info());
   }
 }
 
@@ -153,7 +134,7 @@
   current->set_bytecode(new_bytecode, 0, current->operand(0),
                         current->operand(1));
   if (last->source_info().is_valid()) {
-    current->source_info_ptr()->Clone(last->source_info());
+    current->set_source_info(last->source_info());
   }
 }
 
@@ -223,7 +204,7 @@
       // |node| can not have a valid source position if the source
       // position of last() is valid (per rules in
       // CanElideLastBasedOnSourcePosition()).
-      node->source_info_ptr()->Clone(last()->source_info());
+      node->set_source_info(last()->source_info());
     }
     SetLast(node);
   } else {
@@ -240,17 +221,6 @@
   DefaultAction(node);
 }
 
-void BytecodePeepholeOptimizer::TransformLdaStarToLdrLdarAction(
-    BytecodeNode* const node, const PeepholeActionAndData* action_data) {
-  DCHECK(LastIsValid());
-  DCHECK(!Bytecodes::IsJump(node->bytecode()));
-
-  if (!node->source_info().is_statement()) {
-    TransformLdaStarToLdrLdar(action_data->bytecode, last(), node);
-  }
-  DefaultAction(node);
-}
-
 void BytecodePeepholeOptimizer::TransformLdaSmiBinaryOpToBinaryOpWithSmiAction(
     BytecodeNode* const node, const PeepholeActionAndData* action_data) {
   DCHECK(LastIsValid());
@@ -314,7 +284,7 @@
   if (!CanElideLastBasedOnSourcePosition(node)) {
     next_stage()->Write(last());
   } else if (!node->source_info().is_valid()) {
-    node->source_info_ptr()->Clone(last()->source_info());
+    node->set_source_info(last()->source_info());
   }
   InvalidateLast();
 }
diff --git a/src/interpreter/bytecode-peephole-optimizer.h b/src/interpreter/bytecode-peephole-optimizer.h
index cedd742..7e7e02a 100644
--- a/src/interpreter/bytecode-peephole-optimizer.h
+++ b/src/interpreter/bytecode-peephole-optimizer.h
@@ -5,6 +5,8 @@
 #ifndef V8_INTERPRETER_BYTECODE_PEEPHOLE_OPTIMIZER_H_
 #define V8_INTERPRETER_BYTECODE_PEEPHOLE_OPTIMIZER_H_
 
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
 #include "src/interpreter/bytecode-peephole-table.h"
 #include "src/interpreter/bytecode-pipeline.h"
 
@@ -17,8 +19,9 @@
 // An optimization stage for performing peephole optimizations on
 // generated bytecode. The optimizer may buffer one bytecode
 // internally.
-class BytecodePeepholeOptimizer final : public BytecodePipelineStage,
-                                        public ZoneObject {
+class V8_EXPORT_PRIVATE BytecodePeepholeOptimizer final
+    : public NON_EXPORTED_BASE(BytecodePipelineStage),
+      public NON_EXPORTED_BASE(ZoneObject) {
  public:
   explicit BytecodePeepholeOptimizer(BytecodePipelineStage* next_stage);
 
diff --git a/src/interpreter/bytecode-peephole-table.h b/src/interpreter/bytecode-peephole-table.h
index e716aef..1790f5a 100644
--- a/src/interpreter/bytecode-peephole-table.h
+++ b/src/interpreter/bytecode-peephole-table.h
@@ -19,7 +19,6 @@
   V(ElideCurrentIfOperand0MatchesAction)            \
   V(ElideLastAction)                                \
   V(ChangeBytecodeAction)                           \
-  V(TransformLdaStarToLdrLdarAction)                \
   V(TransformLdaSmiBinaryOpToBinaryOpWithSmiAction) \
   V(TransformLdaZeroBinaryOpToBinaryOpWithZeroAction)
 
diff --git a/src/interpreter/bytecode-pipeline.cc b/src/interpreter/bytecode-pipeline.cc
index 6e6a6b6..06accd7 100644
--- a/src/interpreter/bytecode-pipeline.cc
+++ b/src/interpreter/bytecode-pipeline.cc
@@ -11,19 +11,6 @@
 namespace internal {
 namespace interpreter {
 
-BytecodeNode::BytecodeNode(const BytecodeNode& other) {
-  memcpy(this, &other, sizeof(other));
-}
-
-BytecodeNode& BytecodeNode::operator=(const BytecodeNode& other) {
-  memcpy(this, &other, sizeof(other));
-  return *this;
-}
-
-void BytecodeNode::Clone(const BytecodeNode* const other) {
-  memcpy(this, other, sizeof(*other));
-}
-
 void BytecodeNode::Print(std::ostream& os) const {
 #ifdef DEBUG
   std::ios saved_state(nullptr);
diff --git a/src/interpreter/bytecode-pipeline.h b/src/interpreter/bytecode-pipeline.h
index 0b1a1f1..d508def 100644
--- a/src/interpreter/bytecode-pipeline.h
+++ b/src/interpreter/bytecode-pipeline.h
@@ -5,6 +5,8 @@
 #ifndef V8_INTERPRETER_BYTECODE_PIPELINE_H_
 #define V8_INTERPRETER_BYTECODE_PIPELINE_H_
 
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
 #include "src/interpreter/bytecode-register-allocator.h"
 #include "src/interpreter/bytecode-register.h"
 #include "src/interpreter/bytecodes.h"
@@ -93,14 +95,6 @@
     source_position_ = source_position;
   }
 
-  // Clones a source position. The current instance is expected to be
-  // invalid.
-  void Clone(const BytecodeSourceInfo& other) {
-    DCHECK(!is_valid());
-    position_type_ = other.position_type_;
-    source_position_ = other.source_position_;
-  }
-
   int source_position() const {
     DCHECK(is_valid());
     return source_position_;
@@ -138,81 +132,79 @@
 
 // A container for a generated bytecode, it's operands, and source information.
 // These must be allocated by a BytecodeNodeAllocator instance.
-class BytecodeNode final : ZoneObject {
+class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
  public:
-  INLINE(BytecodeNode(const Bytecode bytecode,
-                      BytecodeSourceInfo* source_info = nullptr))
+  INLINE(BytecodeNode(Bytecode bytecode,
+                      BytecodeSourceInfo source_info = BytecodeSourceInfo()))
       : bytecode_(bytecode),
         operand_count_(0),
-        operand_scale_(OperandScale::kSingle) {
+        operand_scale_(OperandScale::kSingle),
+        source_info_(source_info) {
     DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
-    AttachSourceInfo(source_info);
   }
 
-  INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
-                      BytecodeSourceInfo* source_info = nullptr))
+  INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0,
+                      BytecodeSourceInfo source_info = BytecodeSourceInfo()))
       : bytecode_(bytecode),
         operand_count_(1),
-        operand_scale_(OperandScale::kSingle) {
+        operand_scale_(OperandScale::kSingle),
+        source_info_(source_info) {
     DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
     SetOperand(0, operand0);
-    AttachSourceInfo(source_info);
   }
 
-  INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
-                      uint32_t operand1,
-                      BytecodeSourceInfo* source_info = nullptr))
+  INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+                      BytecodeSourceInfo source_info = BytecodeSourceInfo()))
       : bytecode_(bytecode),
         operand_count_(2),
-        operand_scale_(OperandScale::kSingle) {
+        operand_scale_(OperandScale::kSingle),
+        source_info_(source_info) {
     DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
     SetOperand(0, operand0);
     SetOperand(1, operand1);
-    AttachSourceInfo(source_info);
   }
 
-  INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
-                      uint32_t operand1, uint32_t operand2,
-                      BytecodeSourceInfo* source_info = nullptr))
+  INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+                      uint32_t operand2,
+                      BytecodeSourceInfo source_info = BytecodeSourceInfo()))
       : bytecode_(bytecode),
         operand_count_(3),
-        operand_scale_(OperandScale::kSingle) {
+        operand_scale_(OperandScale::kSingle),
+        source_info_(source_info) {
     DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
     SetOperand(0, operand0);
     SetOperand(1, operand1);
     SetOperand(2, operand2);
-    AttachSourceInfo(source_info);
   }
 
-  INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
-                      uint32_t operand1, uint32_t operand2, uint32_t operand3,
-                      BytecodeSourceInfo* source_info = nullptr))
+  INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+                      uint32_t operand2, uint32_t operand3,
+                      BytecodeSourceInfo source_info = BytecodeSourceInfo()))
       : bytecode_(bytecode),
         operand_count_(4),
-        operand_scale_(OperandScale::kSingle) {
+        operand_scale_(OperandScale::kSingle),
+        source_info_(source_info) {
     DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
     SetOperand(0, operand0);
     SetOperand(1, operand1);
     SetOperand(2, operand2);
     SetOperand(3, operand3);
-    AttachSourceInfo(source_info);
   }
 
-  BytecodeNode(const BytecodeNode& other);
-  BytecodeNode& operator=(const BytecodeNode& other);
-
   // Replace the bytecode of this node with |bytecode| and keep the operands.
   void replace_bytecode(Bytecode bytecode) {
     DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode_),
               Bytecodes::NumberOfOperands(bytecode));
     bytecode_ = bytecode;
   }
+
   void set_bytecode(Bytecode bytecode) {
     DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
     bytecode_ = bytecode;
     operand_count_ = 0;
     operand_scale_ = OperandScale::kSingle;
   }
+
   void set_bytecode(Bytecode bytecode, uint32_t operand0) {
     DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
     bytecode_ = bytecode;
@@ -220,6 +212,7 @@
     operand_scale_ = OperandScale::kSingle;
     SetOperand(0, operand0);
   }
+
   void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1) {
     DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
     bytecode_ = bytecode;
@@ -228,6 +221,7 @@
     SetOperand(0, operand0);
     SetOperand(1, operand1);
   }
+
   void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
                     uint32_t operand2) {
     DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
@@ -239,9 +233,6 @@
     SetOperand(2, operand2);
   }
 
-  // Clone |other|.
-  void Clone(const BytecodeNode* const other);
-
   // Print to stream |os|.
   void Print(std::ostream& os) const;
 
@@ -266,18 +257,6 @@
     SetOperand(operand_count() - 1, extra_operand);
   }
 
-  // Updates the operand at |operand_index| to |operand|.
-  void UpdateOperand(int operand_index, uint32_t operand) {
-    DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(bytecode()));
-    operands_[operand_index] = operand;
-    if ((Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index) &&
-         Bytecodes::ScaleForSignedOperand(operand) != operand_scale_) ||
-        (Bytecodes::OperandIsScalableUnsignedByte(bytecode(), operand_index) &&
-         Bytecodes::ScaleForUnsignedOperand(operand) != operand_scale_)) {
-      UpdateScale();
-    }
-  }
-
   Bytecode bytecode() const { return bytecode_; }
 
   uint32_t operand(int i) const {
@@ -290,27 +269,14 @@
   OperandScale operand_scale() const { return operand_scale_; }
 
   const BytecodeSourceInfo& source_info() const { return source_info_; }
-  BytecodeSourceInfo* source_info_ptr() { return &source_info_; }
+  void set_source_info(BytecodeSourceInfo source_info) {
+    source_info_ = source_info;
+  }
 
   bool operator==(const BytecodeNode& other) const;
   bool operator!=(const BytecodeNode& other) const { return !(*this == other); }
 
  private:
-  INLINE(void AttachSourceInfo(BytecodeSourceInfo* source_info)) {
-    if (source_info && source_info->is_valid()) {
-      // Statement positions need to be emitted immediately.  Expression
-      // positions can be pushed back until a bytecode is found that can
-      // throw (if expression position filtering is turned on). We only
-      // invalidate the existing source position information if it is used.
-      if (source_info->is_statement() ||
-          !FLAG_ignition_filter_expression_positions ||
-          !Bytecodes::IsWithoutExternalSideEffects(bytecode())) {
-        source_info_.Clone(*source_info);
-        source_info->set_invalid();
-      }
-    }
-  }
-
   INLINE(void UpdateScaleForOperand(int operand_index, uint32_t operand)) {
     if (Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index)) {
       operand_scale_ =
@@ -327,13 +293,6 @@
     UpdateScaleForOperand(operand_index, operand);
   }
 
-  void UpdateScale() {
-    operand_scale_ = OperandScale::kSingle;
-    for (int i = 0; i < operand_count(); i++) {
-      UpdateScaleForOperand(i, operands_[i]);
-    }
-  }
-
   Bytecode bytecode_;
   uint32_t operands_[Bytecodes::kMaxOperands];
   int operand_count_;
@@ -341,8 +300,10 @@
   BytecodeSourceInfo source_info_;
 };
 
-std::ostream& operator<<(std::ostream& os, const BytecodeSourceInfo& info);
-std::ostream& operator<<(std::ostream& os, const BytecodeNode& node);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+                                           const BytecodeSourceInfo& info);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+                                           const BytecodeNode& node);
 
 }  // namespace interpreter
 }  // namespace internal
diff --git a/src/interpreter/bytecode-register-allocator.h b/src/interpreter/bytecode-register-allocator.h
index e9de466..72e0133 100644
--- a/src/interpreter/bytecode-register-allocator.h
+++ b/src/interpreter/bytecode-register-allocator.h
@@ -52,6 +52,27 @@
     return reg_list;
   }
 
+  // Returns a growable register list.
+  RegisterList NewGrowableRegisterList() {
+    RegisterList reg_list(next_register_index_, 0);
+    return reg_list;
+  }
+
+  // Appends a new register to |reg_list| increasing it's count by one and
+  // returning the register added.
+  //
+  // Note: no other new registers must be currently allocated since the register
+  // list was originally allocated.
+  Register GrowRegisterList(RegisterList* reg_list) {
+    Register reg(NewRegister());
+    reg_list->IncrementRegisterCount();
+    // If the following CHECK fails then a register was allocated (and not
+    // freed) between the creation of the RegisterList and this call to add a
+    // Register.
+    CHECK_EQ(reg.index(), reg_list->last_register().index());
+    return reg;
+  }
+
   // Release all registers above |register_index|.
   void ReleaseRegisters(int register_index) {
     if (observer_) {
diff --git a/src/interpreter/bytecode-register-optimizer.cc b/src/interpreter/bytecode-register-optimizer.cc
index acbe0ba..563956e 100644
--- a/src/interpreter/bytecode-register-optimizer.cc
+++ b/src/interpreter/bytecode-register-optimizer.cc
@@ -8,7 +8,7 @@
 namespace internal {
 namespace interpreter {
 
-const uint32_t BytecodeRegisterOptimizer::kInvalidEquivalenceId;
+const uint32_t BytecodeRegisterOptimizer::kInvalidEquivalenceId = kMaxUInt32;
 
 // A class for tracking the state of a register. This class tracks
 // which equivalence set a register is a member of and also whether a
@@ -230,81 +230,7 @@
   DCHECK(accumulator_info_->register_value() == accumulator_);
 }
 
-// override
-Handle<BytecodeArray> BytecodeRegisterOptimizer::ToBytecodeArray(
-    Isolate* isolate, int register_count, int parameter_count,
-    Handle<FixedArray> handler_table) {
-  FlushState();
-  return next_stage_->ToBytecodeArray(isolate, max_register_index_ + 1,
-                                      parameter_count, handler_table);
-}
-
-// override
-void BytecodeRegisterOptimizer::Write(BytecodeNode* node) {
-  // Jumps are handled by WriteJump.
-  DCHECK(!Bytecodes::IsJump(node->bytecode()));
-  //
-  // Transfers with observable registers as the destination will be
-  // immediately materialized so the source position information will
-  // be ordered correctly.
-  //
-  // Transfers without observable destination registers will initially
-  // be emitted as Nop's with the source position. They may, or may
-  // not, be materialized by the optimizer. However, the source
-  // position is not lost and being attached to a Nop is fine as the
-  // destination register is not observable in the debugger.
-  //
-  switch (node->bytecode()) {
-    case Bytecode::kLdar: {
-      DoLdar(node);
-      return;
-    }
-    case Bytecode::kStar: {
-      DoStar(node);
-      return;
-    }
-    case Bytecode::kMov: {
-      DoMov(node);
-      return;
-    }
-    default:
-      break;
-  }
-
-  if (node->bytecode() == Bytecode::kDebugger ||
-      node->bytecode() == Bytecode::kSuspendGenerator) {
-    // All state must be flushed before emitting
-    // - a call to the debugger (as it can manipulate locals and parameters),
-    // - a generator suspend (as this involves saving all registers).
-    FlushState();
-  }
-
-  PrepareOperands(node);
-  next_stage_->Write(node);
-}
-
-// override
-void BytecodeRegisterOptimizer::WriteJump(BytecodeNode* node,
-                                          BytecodeLabel* label) {
-  FlushState();
-  next_stage_->WriteJump(node, label);
-}
-
-// override
-void BytecodeRegisterOptimizer::BindLabel(BytecodeLabel* label) {
-  FlushState();
-  next_stage_->BindLabel(label);
-}
-
-// override
-void BytecodeRegisterOptimizer::BindLabel(const BytecodeLabel& target,
-                                          BytecodeLabel* label) {
-  // There is no need to flush here, it will have been flushed when |target|
-  // was bound.
-  next_stage_->BindLabel(target, label);
-}
-
-void BytecodeRegisterOptimizer::FlushState() {
+void BytecodeRegisterOptimizer::Flush() {
   if (!flush_required_) {
     return;
   }
@@ -332,7 +258,7 @@
 
 void BytecodeRegisterOptimizer::OutputRegisterTransfer(
     RegisterInfo* input_info, RegisterInfo* output_info,
-    BytecodeSourceInfo* source_info) {
+    BytecodeSourceInfo source_info) {
   Register input = input_info->register_value();
   Register output = output_info->register_value();
   DCHECK_NE(input.index(), output.index());
@@ -404,7 +330,7 @@
 
 void BytecodeRegisterOptimizer::RegisterTransfer(
     RegisterInfo* input_info, RegisterInfo* output_info,
-    BytecodeSourceInfo* source_info) {
+    BytecodeSourceInfo source_info) {
   // Materialize an alternate in the equivalence set that
   // |output_info| is leaving.
   if (output_info->materialized()) {
@@ -423,7 +349,7 @@
     output_info->set_materialized(false);
     RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent();
     OutputRegisterTransfer(materialized_info, output_info, source_info);
-  } else if (source_info->is_valid()) {
+  } else if (source_info.is_valid()) {
     // Emit a placeholder nop to maintain source position info.
     EmitNopForSourceInfo(source_info);
   }
@@ -437,60 +363,32 @@
 }
 
 void BytecodeRegisterOptimizer::EmitNopForSourceInfo(
-    BytecodeSourceInfo* source_info) const {
-  DCHECK(source_info->is_valid());
+    BytecodeSourceInfo source_info) const {
+  DCHECK(source_info.is_valid());
   BytecodeNode nop(Bytecode::kNop, source_info);
   next_stage_->Write(&nop);
 }
 
-void BytecodeRegisterOptimizer::DoLdar(BytecodeNode* node) {
-  Register input = GetRegisterInputOperand(
-      0, node->bytecode(), node->operands(), node->operand_count());
-  RegisterInfo* input_info = GetRegisterInfo(input);
-  RegisterTransfer(input_info, accumulator_info_, node->source_info_ptr());
-}
-
-void BytecodeRegisterOptimizer::DoMov(BytecodeNode* node) {
-  Register input = GetRegisterInputOperand(
-      0, node->bytecode(), node->operands(), node->operand_count());
-  RegisterInfo* input_info = GetRegisterInfo(input);
-  Register output = GetRegisterOutputOperand(
-      1, node->bytecode(), node->operands(), node->operand_count());
-  RegisterInfo* output_info = GetRegisterInfo(output);
-  RegisterTransfer(input_info, output_info, node->source_info_ptr());
-}
-
-void BytecodeRegisterOptimizer::DoStar(BytecodeNode* node) {
-  Register output = GetRegisterOutputOperand(
-      0, node->bytecode(), node->operands(), node->operand_count());
-  RegisterInfo* output_info = GetRegisterInfo(output);
-  RegisterTransfer(accumulator_info_, output_info, node->source_info_ptr());
-}
-
-void BytecodeRegisterOptimizer::PrepareRegisterOutputOperand(
-    RegisterInfo* reg_info) {
+void BytecodeRegisterOptimizer::PrepareOutputRegister(Register reg) {
+  RegisterInfo* reg_info = GetRegisterInfo(reg);
   if (reg_info->materialized()) {
     CreateMaterializedEquivalent(reg_info);
   }
+  reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
   max_register_index_ =
       std::max(max_register_index_, reg_info->register_value().index());
-  reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
 }
 
-void BytecodeRegisterOptimizer::PrepareRegisterRangeOutputOperand(
-    Register start, int count) {
-  for (int i = 0; i < count; ++i) {
-    Register reg(start.index() + i);
-    RegisterInfo* reg_info = GetRegisterInfo(reg);
-    PrepareRegisterOutputOperand(reg_info);
+void BytecodeRegisterOptimizer::PrepareOutputRegisterList(
+    RegisterList reg_list) {
+  int start_index = reg_list.first_register().index();
+  for (int i = 0; i < reg_list.register_count(); ++i) {
+    Register current(start_index + i);
+    PrepareOutputRegister(current);
   }
 }
 
-Register BytecodeRegisterOptimizer::GetEquivalentRegisterForInputOperand(
-    Register reg) {
-  // For a temporary register, RegInfo state may need be created. For
-  // locals and parameters, the RegInfo state is created in the
-  // BytecodeRegisterOptimizer constructor.
+Register BytecodeRegisterOptimizer::GetInputRegister(Register reg) {
   RegisterInfo* reg_info = GetRegisterInfo(reg);
   if (reg_info->materialized()) {
     return reg;
@@ -501,124 +399,49 @@
   }
 }
 
-void BytecodeRegisterOptimizer::PrepareRegisterInputOperand(
-    BytecodeNode* const node, Register reg, int operand_index) {
-  Register equivalent = GetEquivalentRegisterForInputOperand(reg);
-  node->UpdateOperand(operand_index,
-                      static_cast<uint32_t>(equivalent.ToOperand()));
-}
-
-void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start,
-                                                                 int count) {
-  for (int i = 0; i < count; ++i) {
-    Register current(start.index() + i);
-    RegisterInfo* input_info = GetRegisterInfo(current);
-    Materialize(input_info);
+RegisterList BytecodeRegisterOptimizer::GetInputRegisterList(
+    RegisterList reg_list) {
+  if (reg_list.register_count() == 1) {
+    // If there is only a single register, treat it as a normal input register.
+    Register reg(GetInputRegister(reg_list.first_register()));
+    return RegisterList(reg.index(), 1);
+  } else {
+    int start_index = reg_list.first_register().index();
+    for (int i = 0; i < reg_list.register_count(); ++i) {
+      Register current(start_index + i);
+      RegisterInfo* input_info = GetRegisterInfo(current);
+      Materialize(input_info);
+    }
+    return reg_list;
   }
 }
 
-void BytecodeRegisterOptimizer::PrepareRegisterOperands(
-    BytecodeNode* const node) {
-  //
-  // For each input operand, get a materialized equivalent if it is
-  // just a single register, otherwise materialize register range.
-  // Update operand_scale if necessary.
-  //
-  // For each output register about to be clobbered, materialize an
-  // equivalent if it exists. Put each register in it's own equivalence set.
-  //
-  const uint32_t* operands = node->operands();
-  int operand_count = node->operand_count();
-  const OperandType* operand_types =
-      Bytecodes::GetOperandTypes(node->bytecode());
-  for (int i = 0; i < operand_count; ++i) {
-    int count;
-    if (operand_types[i] == OperandType::kRegList) {
-      DCHECK_LT(i, operand_count - 1);
-      DCHECK(operand_types[i + 1] == OperandType::kRegCount);
-      count = static_cast<int>(operands[i + 1]);
-    } else {
-      count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_types[i]);
-    }
-
-    if (count == 0) {
-      continue;
-    }
-
-    Register reg = Register::FromOperand(static_cast<int32_t>(operands[i]));
-    if (Bytecodes::IsRegisterInputOperandType(operand_types[i])) {
-      if (count == 1) {
-        PrepareRegisterInputOperand(node, reg, i);
-      } else if (count > 1) {
-        PrepareRegisterRangeInputOperand(reg, count);
-      }
-    } else if (Bytecodes::IsRegisterOutputOperandType(operand_types[i])) {
-      PrepareRegisterRangeOutputOperand(reg, count);
-    }
+void BytecodeRegisterOptimizer::PrepareForBytecode(Bytecode bytecode) {
+  if (Bytecodes::IsJump(bytecode) || bytecode == Bytecode::kDebugger ||
+      bytecode == Bytecode::kSuspendGenerator) {
+    // All state must be flushed before emitting
+    // - a jump bytecode (as the register equivalents at the jump target aren't
+    //   known.
+    // - a call to the debugger (as it can manipulate locals and parameters),
+    // - a generator suspend (as this involves saving all registers).
+    Flush();
   }
-}
 
-void BytecodeRegisterOptimizer::PrepareAccumulator(BytecodeNode* const node) {
   // Materialize the accumulator if it is read by the bytecode. The
   // accumulator is special and no other register can be materialized
   // in it's place.
-  if (Bytecodes::ReadsAccumulator(node->bytecode()) &&
+  if (Bytecodes::ReadsAccumulator(bytecode) &&
       !accumulator_info_->materialized()) {
     Materialize(accumulator_info_);
   }
 
   // Materialize an equivalent to the accumulator if it will be
   // clobbered when the bytecode is dispatched.
-  if (Bytecodes::WritesAccumulator(node->bytecode())) {
-    PrepareRegisterOutputOperand(accumulator_info_);
+  if (Bytecodes::WritesAccumulator(bytecode)) {
+    PrepareOutputRegister(accumulator_);
   }
 }
 
-void BytecodeRegisterOptimizer::PrepareOperands(BytecodeNode* const node) {
-  PrepareAccumulator(node);
-  PrepareRegisterOperands(node);
-}
-
-// static
-Register BytecodeRegisterOptimizer::GetRegisterInputOperand(
-    int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
-  DCHECK_LT(index, operand_count);
-  DCHECK(Bytecodes::IsRegisterInputOperandType(
-      Bytecodes::GetOperandType(bytecode, index)));
-  return OperandToRegister(operands[index]);
-}
-
-// static
-Register BytecodeRegisterOptimizer::GetRegisterOutputOperand(
-    int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
-  DCHECK_LT(index, operand_count);
-  DCHECK(Bytecodes::IsRegisterOutputOperandType(
-      Bytecodes::GetOperandType(bytecode, index)));
-  return OperandToRegister(operands[index]);
-}
-
-BytecodeRegisterOptimizer::RegisterInfo*
-BytecodeRegisterOptimizer::GetRegisterInfo(Register reg) {
-  size_t index = GetRegisterInfoTableIndex(reg);
-  DCHECK_LT(index, register_info_table_.size());
-  return register_info_table_[index];
-}
-
-BytecodeRegisterOptimizer::RegisterInfo*
-BytecodeRegisterOptimizer::GetOrCreateRegisterInfo(Register reg) {
-  size_t index = GetRegisterInfoTableIndex(reg);
-  return index < register_info_table_.size() ? register_info_table_[index]
-                                             : NewRegisterInfo(reg);
-}
-
-BytecodeRegisterOptimizer::RegisterInfo*
-BytecodeRegisterOptimizer::NewRegisterInfo(Register reg) {
-  size_t index = GetRegisterInfoTableIndex(reg);
-  DCHECK_GE(index, register_info_table_.size());
-  GrowRegisterMap(reg);
-  return register_info_table_[index];
-}
-
 void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) {
   DCHECK(RegisterIsTemporary(reg));
   size_t index = GetRegisterInfoTableIndex(reg);
diff --git a/src/interpreter/bytecode-register-optimizer.h b/src/interpreter/bytecode-register-optimizer.h
index eda22e5..e2a02cf 100644
--- a/src/interpreter/bytecode-register-optimizer.h
+++ b/src/interpreter/bytecode-register-optimizer.h
@@ -5,6 +5,8 @@
 #ifndef V8_INTERPRETER_BYTECODE_REGISTER_OPTIMIZER_H_
 #define V8_INTERPRETER_BYTECODE_REGISTER_OPTIMIZER_H_
 
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
 #include "src/interpreter/bytecode-pipeline.h"
 
 namespace v8 {
@@ -15,10 +17,9 @@
 // registers. The bytecode generator uses temporary registers
 // liberally for correctness and convenience and this stage removes
 // transfers that are not required and preserves correctness.
-class BytecodeRegisterOptimizer final
-    : public BytecodePipelineStage,
-      public BytecodeRegisterAllocator::Observer,
-      public ZoneObject {
+class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
+    : public NON_EXPORTED_BASE(BytecodeRegisterAllocator::Observer),
+      public NON_EXPORTED_BASE(ZoneObject) {
  public:
   BytecodeRegisterOptimizer(Zone* zone,
                             BytecodeRegisterAllocator* register_allocator,
@@ -26,17 +27,44 @@
                             BytecodePipelineStage* next_stage);
   virtual ~BytecodeRegisterOptimizer() {}
 
-  // BytecodePipelineStage interface.
-  void Write(BytecodeNode* node) override;
-  void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
-  void BindLabel(BytecodeLabel* label) override;
-  void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
-  Handle<BytecodeArray> ToBytecodeArray(
-      Isolate* isolate, int register_count, int parameter_count,
-      Handle<FixedArray> handler_table) override;
+  // Perform explicit register transfer operations.
+  void DoLdar(Register input, BytecodeSourceInfo source_info) {
+    RegisterInfo* input_info = GetRegisterInfo(input);
+    RegisterTransfer(input_info, accumulator_info_, source_info);
+  }
+  void DoStar(Register output, BytecodeSourceInfo source_info) {
+    RegisterInfo* output_info = GetRegisterInfo(output);
+    RegisterTransfer(accumulator_info_, output_info, source_info);
+  }
+  void DoMov(Register input, Register output, BytecodeSourceInfo source_info) {
+    RegisterInfo* input_info = GetRegisterInfo(input);
+    RegisterInfo* output_info = GetRegisterInfo(output);
+    RegisterTransfer(input_info, output_info, source_info);
+  }
+
+  // Materialize all live registers and flush equivalence sets.
+  void Flush();
+
+  // Prepares for |bytecode|.
+  void PrepareForBytecode(Bytecode bytecode);
+
+  // Prepares |reg| for being used as an output operand.
+  void PrepareOutputRegister(Register reg);
+
+  // Prepares registers in |reg_list| for being used as an output operand.
+  void PrepareOutputRegisterList(RegisterList reg_list);
+
+  // Returns an equivalent register to |reg| to be used as an input operand.
+  Register GetInputRegister(Register reg);
+
+  // Returns an equivalent register list to |reg_list| to be used as an input
+  // operand.
+  RegisterList GetInputRegisterList(RegisterList reg_list);
+
+  int maxiumum_register_index() const { return max_register_index_; }
 
  private:
-  static const uint32_t kInvalidEquivalenceId = kMaxUInt32;
+  static const uint32_t kInvalidEquivalenceId;
 
   class RegisterInfo;
 
@@ -45,48 +73,20 @@
   void RegisterListAllocateEvent(RegisterList reg_list) override;
   void RegisterListFreeEvent(RegisterList reg) override;
 
-  // Helpers for BytecodePipelineStage interface.
-  void FlushState();
-
   // Update internal state for register transfer from |input| to
   // |output| using |source_info| as source position information if
   // any bytecodes are emitted due to transfer.
   void RegisterTransfer(RegisterInfo* input, RegisterInfo* output,
-                        BytecodeSourceInfo* source_info);
+                        BytecodeSourceInfo source_info);
 
   // Emit a register transfer bytecode from |input| to |output|.
-  void OutputRegisterTransfer(RegisterInfo* input, RegisterInfo* output,
-                              BytecodeSourceInfo* source_info = nullptr);
+  void OutputRegisterTransfer(
+      RegisterInfo* input, RegisterInfo* output,
+      BytecodeSourceInfo source_info = BytecodeSourceInfo());
 
   // Emits a Nop to preserve source position information in the
   // bytecode pipeline.
-  void EmitNopForSourceInfo(BytecodeSourceInfo* source_info) const;
-
-  // Handlers for bytecode nodes for register to register transfers.
-  void DoLdar(BytecodeNode* node);
-  void DoMov(BytecodeNode* node);
-  void DoStar(BytecodeNode* node);
-
-  // Operand processing methods for bytecodes other than those
-  // performing register to register transfers.
-  void PrepareOperands(BytecodeNode* const node);
-  void PrepareAccumulator(BytecodeNode* const node);
-  void PrepareRegisterOperands(BytecodeNode* const node);
-
-  void PrepareRegisterOutputOperand(RegisterInfo* reg_info);
-  void PrepareRegisterRangeOutputOperand(Register start, int count);
-  void PrepareRegisterInputOperand(BytecodeNode* const node, Register reg,
-                                   int operand_index);
-  void PrepareRegisterRangeInputOperand(Register start, int count);
-
-  Register GetEquivalentRegisterForInputOperand(Register reg);
-
-  static Register GetRegisterInputOperand(int index, Bytecode bytecode,
-                                          const uint32_t* operands,
-                                          int operand_count);
-  static Register GetRegisterOutputOperand(int index, Bytecode bytecode,
-                                           const uint32_t* operands,
-                                           int operand_count);
+  void EmitNopForSourceInfo(BytecodeSourceInfo source_info) const;
 
   void CreateMaterializedEquivalent(RegisterInfo* info);
   RegisterInfo* GetMaterializedEquivalent(RegisterInfo* info);
@@ -96,9 +96,23 @@
                            RegisterInfo* non_set_member);
 
   // Methods for finding and creating metadata for each register.
-  RegisterInfo* GetOrCreateRegisterInfo(Register reg);
-  RegisterInfo* GetRegisterInfo(Register reg);
-  RegisterInfo* NewRegisterInfo(Register reg);
+  RegisterInfo* GetRegisterInfo(Register reg) {
+    size_t index = GetRegisterInfoTableIndex(reg);
+    DCHECK_LT(index, register_info_table_.size());
+    return register_info_table_[index];
+  }
+  RegisterInfo* GetOrCreateRegisterInfo(Register reg) {
+    size_t index = GetRegisterInfoTableIndex(reg);
+    return index < register_info_table_.size() ? register_info_table_[index]
+                                               : NewRegisterInfo(reg);
+  }
+  RegisterInfo* NewRegisterInfo(Register reg) {
+    size_t index = GetRegisterInfoTableIndex(reg);
+    DCHECK_GE(index, register_info_table_.size());
+    GrowRegisterMap(reg);
+    return register_info_table_[index];
+  }
+
   void GrowRegisterMap(Register reg);
 
   bool RegisterIsTemporary(Register reg) const {
@@ -123,7 +137,8 @@
 
   uint32_t NextEquivalenceId() {
     equivalence_id_++;
-    CHECK_NE(equivalence_id_, kInvalidEquivalenceId);
+    // TODO(rmcilroy): use the same type for these and remove static_cast.
+    CHECK_NE(static_cast<size_t>(equivalence_id_), kInvalidEquivalenceId);
     return equivalence_id_;
   }
 
diff --git a/src/interpreter/bytecode-register.h b/src/interpreter/bytecode-register.h
index d698d40..554bc23 100644
--- a/src/interpreter/bytecode-register.h
+++ b/src/interpreter/bytecode-register.h
@@ -8,6 +8,7 @@
 #include "src/interpreter/bytecodes.h"
 
 #include "src/frames.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -15,7 +16,7 @@
 
 // An interpreter Register which is located in the function's Register file
 // in its stack-frame. Register hold parameters, this, and expression values.
-class Register final {
+class V8_EXPORT_PRIVATE Register final {
  public:
   explicit Register(int index = kInvalidIndex) : index_(index) {}
 
@@ -104,6 +105,9 @@
   RegisterList(int first_reg_index, int register_count)
       : first_reg_index_(first_reg_index), register_count_(register_count) {}
 
+  // Increases the size of the register list by one.
+  void IncrementRegisterCount() { register_count_++; }
+
   // Returns a new RegisterList which is a truncated version of this list, with
   // |count| registers.
   const RegisterList Truncate(int new_count) {
diff --git a/src/interpreter/bytecodes.cc b/src/interpreter/bytecodes.cc
index c58f468..15c4e98 100644
--- a/src/interpreter/bytecodes.cc
+++ b/src/interpreter/bytecodes.cc
@@ -211,6 +211,12 @@
       case Bytecode::kLdaNull:
       case Bytecode::kLdaTheHole:
       case Bytecode::kLdaConstant:
+      case Bytecode::kLdaUndefined:
+      case Bytecode::kLdaGlobal:
+      case Bytecode::kLdaNamedProperty:
+      case Bytecode::kLdaKeyedProperty:
+      case Bytecode::kLdaContextSlot:
+      case Bytecode::kLdaCurrentContextSlot:
       case Bytecode::kAdd:
       case Bytecode::kSub:
       case Bytecode::kMul:
@@ -220,6 +226,7 @@
       case Bytecode::kDec:
       case Bytecode::kTypeOf:
       case Bytecode::kCall:
+      case Bytecode::kCallProperty:
       case Bytecode::kNew:
         return true;
       default:
diff --git a/src/interpreter/bytecodes.h b/src/interpreter/bytecodes.h
index 6232966..23d77f0 100644
--- a/src/interpreter/bytecodes.h
+++ b/src/interpreter/bytecodes.h
@@ -37,12 +37,8 @@
   V(LdaFalse, AccumulatorUse::kWrite)                                          \
   V(LdaConstant, AccumulatorUse::kWrite, OperandType::kIdx)                    \
                                                                                \
-  /* Loading registers */                                                      \
-  V(LdrUndefined, AccumulatorUse::kNone, OperandType::kRegOut)                 \
-                                                                               \
   /* Globals */                                                                \
   V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx)                      \
-  V(LdrGlobal, AccumulatorUse::kNone, OperandType::kIdx, OperandType::kRegOut) \
   V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx)          \
   V(StaGlobalSloppy, AccumulatorUse::kRead, OperandType::kIdx,                 \
     OperandType::kIdx)                                                         \
@@ -54,10 +50,10 @@
   V(PopContext, AccumulatorUse::kNone, OperandType::kReg)                      \
   V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg,                 \
     OperandType::kIdx, OperandType::kUImm)                                     \
-  V(LdrContextSlot, AccumulatorUse::kNone, OperandType::kReg,                  \
-    OperandType::kIdx, OperandType::kUImm, OperandType::kRegOut)               \
+  V(LdaCurrentContextSlot, AccumulatorUse::kWrite, OperandType::kIdx)          \
   V(StaContextSlot, AccumulatorUse::kRead, OperandType::kReg,                  \
     OperandType::kIdx, OperandType::kUImm)                                     \
+  V(StaCurrentContextSlot, AccumulatorUse::kRead, OperandType::kIdx)           \
                                                                                \
   /* Load-Store lookup slots */                                                \
   V(LdaLookupSlot, AccumulatorUse::kWrite, OperandType::kIdx)                  \
@@ -83,12 +79,14 @@
   /* Property loads (LoadIC) operations */                                     \
   V(LdaNamedProperty, AccumulatorUse::kWrite, OperandType::kReg,               \
     OperandType::kIdx, OperandType::kIdx)                                      \
-  V(LdrNamedProperty, AccumulatorUse::kNone, OperandType::kReg,                \
-    OperandType::kIdx, OperandType::kIdx, OperandType::kRegOut)                \
   V(LdaKeyedProperty, AccumulatorUse::kReadWrite, OperandType::kReg,           \
     OperandType::kIdx)                                                         \
-  V(LdrKeyedProperty, AccumulatorUse::kRead, OperandType::kReg,                \
-    OperandType::kIdx, OperandType::kRegOut)                                   \
+                                                                               \
+  /* Operations on module variables */                                         \
+  V(LdaModuleVariable, AccumulatorUse::kWrite, OperandType::kImm,              \
+    OperandType::kUImm)                                                        \
+  V(StaModuleVariable, AccumulatorUse::kRead, OperandType::kImm,               \
+    OperandType::kUImm)                                                        \
                                                                                \
   /* Propery stores (StoreIC) operations */                                    \
   V(StaNamedPropertySloppy, AccumulatorUse::kRead, OperandType::kReg,          \
@@ -145,6 +143,8 @@
   /* Call operations */                                                        \
   V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kRegList,    \
     OperandType::kRegCount, OperandType::kIdx)                                 \
+  V(CallProperty, AccumulatorUse::kWrite, OperandType::kReg,                   \
+    OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx)          \
   V(TailCall, AccumulatorUse::kWrite, OperandType::kReg,                       \
     OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx)          \
   V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId,              \
@@ -314,7 +314,7 @@
 #define CONSTEXPR constexpr
 #endif
 
-class Bytecodes final {
+class V8_EXPORT_PRIVATE Bytecodes final {
  public:
   //  The maximum number of operands a bytecode may have.
   static const int kMaxOperands = 4;
@@ -422,15 +422,16 @@
            bytecode == Bytecode::kLdaTrue || bytecode == Bytecode::kLdaFalse ||
            bytecode == Bytecode::kLdaUndefined ||
            bytecode == Bytecode::kLdaTheHole ||
-           bytecode == Bytecode::kLdaConstant;
+           bytecode == Bytecode::kLdaConstant ||
+           bytecode == Bytecode::kLdaContextSlot ||
+           bytecode == Bytecode::kLdaCurrentContextSlot;
   }
 
   // Return true if |bytecode| is a register load without effects,
-  // e.g. Mov, Star, LdrUndefined.
+  // e.g. Mov, Star.
   static CONSTEXPR bool IsRegisterLoadWithoutEffects(Bytecode bytecode) {
     return bytecode == Bytecode::kMov || bytecode == Bytecode::kPopContext ||
-           bytecode == Bytecode::kPushContext || bytecode == Bytecode::kStar ||
-           bytecode == Bytecode::kLdrUndefined;
+           bytecode == Bytecode::kPushContext || bytecode == Bytecode::kStar;
   }
 
   // Returns true if the bytecode is a conditional jump taking
@@ -525,8 +526,8 @@
 
   // Returns true if the bytecode is a call or a constructor call.
   static CONSTEXPR bool IsCallOrNew(Bytecode bytecode) {
-    return bytecode == Bytecode::kCall || bytecode == Bytecode::kTailCall ||
-           bytecode == Bytecode::kNew;
+    return bytecode == Bytecode::kCall || bytecode == Bytecode::kCallProperty ||
+           bytecode == Bytecode::kTailCall || bytecode == Bytecode::kNew;
   }
 
   // Returns true if the bytecode is a call to the runtime.
@@ -733,7 +734,8 @@
 // See crbug.com/603131.
 #undef CONSTEXPR
 
-std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+                                           const Bytecode& bytecode);
 
 }  // namespace interpreter
 }  // namespace internal
diff --git a/src/interpreter/constant-array-builder.h b/src/interpreter/constant-array-builder.h
index 78d36f5..8e95913 100644
--- a/src/interpreter/constant-array-builder.h
+++ b/src/interpreter/constant-array-builder.h
@@ -5,6 +5,7 @@
 #ifndef V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
 #define V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
 
+#include "src/globals.h"
 #include "src/identity-map.h"
 #include "src/interpreter/bytecodes.h"
 #include "src/zone/zone-containers.h"
@@ -20,7 +21,7 @@
 // interpreter. Each instance of this class is intended to be used to
 // generate exactly one FixedArray of constants via the ToFixedArray
 // method.
-class ConstantArrayBuilder final BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
  public:
   // Capacity of the 8-bit operand slice.
   static const size_t k8BitCapacity = 1u << kBitsPerByte;
diff --git a/src/interpreter/interpreter-assembler.cc b/src/interpreter/interpreter-assembler.cc
index 5767ffa..c8ce553 100644
--- a/src/interpreter/interpreter-assembler.cc
+++ b/src/interpreter/interpreter-assembler.cc
@@ -97,17 +97,17 @@
   Label context_search(this, 2, context_search_loop_variables);
 
   // Fast path if the depth is 0.
-  BranchIfWord32Equal(depth, Int32Constant(0), &context_found, &context_search);
+  Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
 
   // Loop until the depth is 0.
   Bind(&context_search);
   {
     cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
     cur_context.Bind(
-        LoadContextSlot(cur_context.value(), Context::PREVIOUS_INDEX));
+        LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
 
-    BranchIfWord32Equal(cur_depth.value(), Int32Constant(0), &context_found,
-                        &context_search);
+    Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
+           &context_search);
   }
 
   Bind(&context_found);
@@ -135,14 +135,14 @@
     // contexts actually need to be checked.
 
     Node* extension_slot =
-        LoadContextSlot(cur_context.value(), Context::EXTENSION_INDEX);
+        LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
 
     // Jump to the target if the extension slot is not a hole.
     GotoIf(WordNotEqual(extension_slot, TheHoleConstant()), target);
 
     cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
     cur_context.Bind(
-        LoadContextSlot(cur_context.value(), Context::PREVIOUS_INDEX));
+        LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
 
     GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
            &context_search);
@@ -485,26 +485,6 @@
   }
 }
 
-Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
-  return Load(MachineType::AnyTagged(), context,
-              IntPtrConstant(Context::SlotOffset(slot_index)));
-}
-
-Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
-  Node* offset =
-      IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
-                IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
-  return Load(MachineType::AnyTagged(), context, offset);
-}
-
-Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
-                                             Node* value) {
-  Node* offset =
-      IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
-                IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
-  return Store(MachineRepresentation::kTagged, context, offset, value);
-}
-
 Node* InterpreterAssembler::LoadTypeFeedbackVector() {
   Node* function = LoadRegister(Register::function_closure());
   Node* literals = LoadObjectField(function, JSFunction::kLiteralsOffset);
@@ -566,28 +546,22 @@
                 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
 
   Variable return_value(this, MachineRepresentation::kTagged);
-  Label handle_monomorphic(this), extra_checks(this), end(this), call(this),
-      call_function(this), call_without_feedback(this);
-
-  // Slot id of 0 is used to indicate no typefeedback is available. Call using
-  // call builtin.
-  STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
-  Node* is_feedback_unavailable = Word32Equal(slot_id, Int32Constant(0));
-  GotoIf(is_feedback_unavailable, &call_without_feedback);
+  Label call_function(this), extra_checks(this, Label::kDeferred), call(this),
+      end(this);
 
   // The checks. First, does function match the recorded monomorphic target?
   Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id);
-  Node* feedback_value = LoadWeakCellValue(feedback_element);
+  Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
   Node* is_monomorphic = WordEqual(function, feedback_value);
-  BranchIf(is_monomorphic, &handle_monomorphic, &extra_checks);
+  GotoUnless(is_monomorphic, &extra_checks);
 
-  Bind(&handle_monomorphic);
+  // The compare above could have been a SMI/SMI comparison. Guard against
+  // this convincing us that we have a monomorphic JSFunction.
+  Node* is_smi = TaggedIsSmi(function);
+  Branch(is_smi, &extra_checks, &call_function);
+
+  Bind(&call_function);
   {
-    // The compare above could have been a SMI/SMI comparison. Guard against
-    // this convincing us that we have a monomorphic JSFunction.
-    Node* is_smi = WordIsSmi(function);
-    GotoIf(is_smi, &extra_checks);
-
     // Increment the call count.
     IncrementCallCount(type_feedback_vector, slot_id);
 
@@ -603,56 +577,56 @@
 
   Bind(&extra_checks);
   {
-    Label check_initialized(this, Label::kDeferred), mark_megamorphic(this),
-        check_allocation_site(this),
-        create_allocation_site(this, Label::kDeferred);
-    // Check if it is a megamorphic target
+    Label check_initialized(this), mark_megamorphic(this),
+        create_allocation_site(this);
+
+    Comment("check if megamorphic");
+    // Check if it is a megamorphic target.
     Node* is_megamorphic = WordEqual(
         feedback_element,
         HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
-    BranchIf(is_megamorphic, &call, &check_allocation_site);
+    GotoIf(is_megamorphic, &call);
 
-    Bind(&check_allocation_site);
-    {
-      Node* is_allocation_site =
-          WordEqual(LoadMap(feedback_element),
-                    LoadRoot(Heap::kAllocationSiteMapRootIndex));
-      GotoUnless(is_allocation_site, &check_initialized);
+    Comment("check if it is an allocation site");
+    Node* is_allocation_site = WordEqual(
+        LoadMap(feedback_element), LoadRoot(Heap::kAllocationSiteMapRootIndex));
+    GotoUnless(is_allocation_site, &check_initialized);
 
-      // If it is not the Array() function, mark megamorphic.
-      Node* context_slot =
-          LoadFixedArrayElement(LoadNativeContext(context),
-                                Int32Constant(Context::ARRAY_FUNCTION_INDEX));
-      Node* is_array_function = WordEqual(context_slot, function);
-      GotoUnless(is_array_function, &mark_megamorphic);
+    // If it is not the Array() function, mark megamorphic.
+    Node* context_slot =
+        LoadFixedArrayElement(LoadNativeContext(context),
+                              Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+    Node* is_array_function = WordEqual(context_slot, function);
+    GotoUnless(is_array_function, &mark_megamorphic);
 
-      // It is a monomorphic Array function. Increment the call count.
-      IncrementCallCount(type_feedback_vector, slot_id);
+    // It is a monomorphic Array function. Increment the call count.
+    IncrementCallCount(type_feedback_vector, slot_id);
 
-      // Call ArrayConstructorStub.
-      Callable callable_call =
-          CodeFactory::InterpreterPushArgsAndConstructArray(isolate());
-      Node* code_target_call = HeapConstant(callable_call.code());
-      Node* ret_value =
-          CallStub(callable_call.descriptor(), code_target_call, context,
-                   arg_count, function, feedback_element, first_arg);
-      return_value.Bind(ret_value);
-      Goto(&end);
-    }
+    // Call ArrayConstructorStub.
+    Callable callable_call =
+        CodeFactory::InterpreterPushArgsAndConstructArray(isolate());
+    Node* code_target_call = HeapConstant(callable_call.code());
+    Node* ret_value =
+        CallStub(callable_call.descriptor(), code_target_call, context,
+                 arg_count, function, feedback_element, first_arg);
+    return_value.Bind(ret_value);
+    Goto(&end);
 
     Bind(&check_initialized);
     {
-      Label possibly_monomorphic(this);
-      // Check if it is uninitialized.
+      Comment("check if uninitialized");
+      // Check if it is uninitialized target first.
       Node* is_uninitialized = WordEqual(
           feedback_element,
           HeapConstant(TypeFeedbackVector::UninitializedSentinel(isolate())));
       GotoUnless(is_uninitialized, &mark_megamorphic);
 
-      Node* is_smi = WordIsSmi(function);
+      Comment("handle_unitinitialized");
+      // If it is not a JSFunction mark it as megamorphic.
+      Node* is_smi = TaggedIsSmi(function);
       GotoIf(is_smi, &mark_megamorphic);
 
-      // Check if function is an object of JSFunction type
+      // Check if function is an object of JSFunction type.
       Node* instance_type = LoadInstanceType(function);
       Node* is_js_function =
           WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
@@ -665,7 +639,7 @@
       Node* is_array_function = WordEqual(context_slot, function);
       GotoIf(is_array_function, &create_allocation_site);
 
-      // Check if the function belongs to the same native context
+      // Check if the function belongs to the same native context.
       Node* native_context = LoadNativeContext(
           LoadObjectField(function, JSFunction::kContextOffset));
       Node* is_same_native_context =
@@ -704,22 +678,9 @@
     }
   }
 
-  Bind(&call_function);
-  {
-    // Increment the call count.
-    IncrementCallCount(type_feedback_vector, slot_id);
-
-    Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
-        isolate(), tail_call_mode, CallableType::kJSFunction);
-    Node* code_target_call = HeapConstant(callable_call.code());
-    Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
-                               context, arg_count, first_arg, function);
-    return_value.Bind(ret_value);
-    Goto(&end);
-  }
-
   Bind(&call);
   {
+    Comment("Increment call count and call using Call builtin");
     // Increment the call count.
     IncrementCallCount(type_feedback_vector, slot_id);
 
@@ -733,18 +694,6 @@
     Goto(&end);
   }
 
-  Bind(&call_without_feedback);
-  {
-    // Call using call builtin.
-    Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
-        isolate(), tail_call_mode, CallableType::kAny);
-    Node* code_target_call = HeapConstant(callable_call.code());
-    Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
-                               context, arg_count, first_arg, function);
-    return_value.Bind(ret_value);
-    Goto(&end);
-  }
-
   Bind(&end);
   return return_value.value();
 }
@@ -763,10 +712,10 @@
                                           Node* new_target, Node* first_arg,
                                           Node* arg_count, Node* slot_id,
                                           Node* type_feedback_vector) {
-  Label call_construct(this), js_function(this), end(this);
   Variable return_value(this, MachineRepresentation::kTagged);
   Variable allocation_feedback(this, MachineRepresentation::kTagged);
-  allocation_feedback.Bind(UndefinedConstant());
+  Label call_construct_function(this, &allocation_feedback),
+      extra_checks(this, Label::kDeferred), call_construct(this), end(this);
 
   // Slot id of 0 is used to indicate no type feedback is available.
   STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
@@ -774,139 +723,125 @@
   GotoIf(is_feedback_unavailable, &call_construct);
 
   // Check that the constructor is not a smi.
-  Node* is_smi = WordIsSmi(constructor);
+  Node* is_smi = TaggedIsSmi(constructor);
   GotoIf(is_smi, &call_construct);
 
   // Check that constructor is a JSFunction.
   Node* instance_type = LoadInstanceType(constructor);
   Node* is_js_function =
       WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
-  BranchIf(is_js_function, &js_function, &call_construct);
+  GotoUnless(is_js_function, &call_construct);
 
-  Bind(&js_function);
+  // Check if it is a monomorphic constructor.
+  Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id);
+  Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
+  Node* is_monomorphic = WordEqual(constructor, feedback_value);
+  allocation_feedback.Bind(UndefinedConstant());
+  Branch(is_monomorphic, &call_construct_function, &extra_checks);
+
+  Bind(&call_construct_function);
   {
-    // Cache the called function in a feedback vector slot. Cache states
-    // are uninitialized, monomorphic (indicated by a JSFunction), and
-    // megamorphic.
-    // TODO(mythria/v8:5210): Check if it is better to mark extra_checks as a
-    // deferred block so that call_construct_function will be scheduled.
-    Label extra_checks(this), call_construct_function(this);
+    Comment("call using callConstructFunction");
+    IncrementCallCount(type_feedback_vector, slot_id);
+    Callable callable_function = CodeFactory::InterpreterPushArgsAndConstruct(
+        isolate(), CallableType::kJSFunction);
+    return_value.Bind(CallStub(callable_function.descriptor(),
+                               HeapConstant(callable_function.code()), context,
+                               arg_count, new_target, constructor,
+                               allocation_feedback.value(), first_arg));
+    Goto(&end);
+  }
 
-    Node* feedback_element =
-        LoadFixedArrayElement(type_feedback_vector, slot_id);
-    Node* feedback_value = LoadWeakCellValue(feedback_element);
-    Node* is_monomorphic = WordEqual(constructor, feedback_value);
-    BranchIf(is_monomorphic, &call_construct_function, &extra_checks);
+  Bind(&extra_checks);
+  {
+    Label check_allocation_site(this), check_initialized(this),
+        initialize(this), mark_megamorphic(this);
 
-    Bind(&extra_checks);
+    // Check if it is a megamorphic target.
+    Comment("check if megamorphic");
+    Node* is_megamorphic = WordEqual(
+        feedback_element,
+        HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+    GotoIf(is_megamorphic, &call_construct_function);
+
+    Comment("check if weak cell");
+    Node* is_weak_cell = WordEqual(LoadMap(feedback_element),
+                                   LoadRoot(Heap::kWeakCellMapRootIndex));
+    GotoUnless(is_weak_cell, &check_allocation_site);
+
+    // If the weak cell is cleared, we have a new chance to become
+    // monomorphic.
+    Comment("check if weak cell is cleared");
+    Node* is_smi = TaggedIsSmi(feedback_value);
+    Branch(is_smi, &initialize, &mark_megamorphic);
+
+    Bind(&check_allocation_site);
     {
-      Label mark_megamorphic(this), initialize(this),
-          check_allocation_site(this), check_initialized(this),
-          set_alloc_feedback_and_call(this);
+      Comment("check if it is an allocation site");
+      Node* is_allocation_site =
+          WordEqual(LoadObjectField(feedback_element, 0),
+                    LoadRoot(Heap::kAllocationSiteMapRootIndex));
+      GotoUnless(is_allocation_site, &check_initialized);
+
+      // Make sure the function is the Array() function.
+      Node* context_slot =
+          LoadFixedArrayElement(LoadNativeContext(context),
+                                Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+      Node* is_array_function = WordEqual(context_slot, constructor);
+      GotoUnless(is_array_function, &mark_megamorphic);
+
+      allocation_feedback.Bind(feedback_element);
+      Goto(&call_construct_function);
+    }
+
+    Bind(&check_initialized);
+    {
+      // Check if it is uninitialized.
+      Comment("check if uninitialized");
+      Node* is_uninitialized = WordEqual(
+          feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex));
+      Branch(is_uninitialized, &initialize, &mark_megamorphic);
+    }
+
+    Bind(&initialize);
+    {
+      Label create_allocation_site(this), create_weak_cell(this);
+      Comment("initialize the feedback element");
+      // Create an allocation site if the function is an array function,
+      // otherwise create a weak cell.
+      Node* context_slot =
+          LoadFixedArrayElement(LoadNativeContext(context),
+                                Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+      Node* is_array_function = WordEqual(context_slot, constructor);
+      Branch(is_array_function, &create_allocation_site, &create_weak_cell);
+
+      Bind(&create_allocation_site);
       {
-        // Check if it is a megamorphic target
-        Comment("check if megamorphic");
-        Node* is_megamorphic = WordEqual(
-            feedback_element,
-            HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
-        GotoIf(is_megamorphic, &call_construct_function);
-
-        Comment("check if weak cell");
-        Node* is_weak_cell = WordEqual(LoadMap(feedback_element),
-                                       LoadRoot(Heap::kWeakCellMapRootIndex));
-        GotoUnless(is_weak_cell, &check_allocation_site);
-        // If the weak cell is cleared, we have a new chance to become
-        // monomorphic.
-        Comment("check if weak cell is cleared");
-        Node* is_smi = WordIsSmi(feedback_value);
-        BranchIf(is_smi, &initialize, &mark_megamorphic);
-      }
-
-      Bind(&check_allocation_site);
-      {
-        Comment("check if it is an allocation site");
-        Node* is_allocation_site =
-            WordEqual(LoadObjectField(feedback_element, 0),
-                      LoadRoot(Heap::kAllocationSiteMapRootIndex));
-        GotoUnless(is_allocation_site, &check_initialized);
-
-        // Make sure the function is the Array() function
-        Node* context_slot =
-            LoadFixedArrayElement(LoadNativeContext(context),
-                                  Int32Constant(Context::ARRAY_FUNCTION_INDEX));
-        Node* is_array_function = WordEqual(context_slot, constructor);
-        BranchIf(is_array_function, &set_alloc_feedback_and_call,
-                 &mark_megamorphic);
-      }
-
-      Bind(&set_alloc_feedback_and_call);
-      {
-        allocation_feedback.Bind(feedback_element);
+        Node* site = CreateAllocationSiteInFeedbackVector(type_feedback_vector,
+                                                          SmiTag(slot_id));
+        allocation_feedback.Bind(site);
         Goto(&call_construct_function);
       }
 
-      Bind(&check_initialized);
+      Bind(&create_weak_cell);
       {
-        // Check if it is uninitialized.
-        Comment("check if uninitialized");
-        Node* is_uninitialized = WordEqual(
-            feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex));
-        BranchIf(is_uninitialized, &initialize, &mark_megamorphic);
-      }
-
-      Bind(&initialize);
-      {
-        Label create_weak_cell(this), create_allocation_site(this);
-        Comment("initialize the feedback element");
-        // Check that it is the Array() function.
-        Node* context_slot =
-            LoadFixedArrayElement(LoadNativeContext(context),
-                                  Int32Constant(Context::ARRAY_FUNCTION_INDEX));
-        Node* is_array_function = WordEqual(context_slot, constructor);
-        BranchIf(is_array_function, &create_allocation_site, &create_weak_cell);
-
-        Bind(&create_allocation_site);
-        {
-          Node* site = CreateAllocationSiteInFeedbackVector(
-              type_feedback_vector, SmiTag(slot_id));
-          allocation_feedback.Bind(site);
-          Goto(&call_construct_function);
-        }
-
-        Bind(&create_weak_cell);
-        {
-          CreateWeakCellInFeedbackVector(type_feedback_vector, SmiTag(slot_id),
-                                         constructor);
-          Goto(&call_construct_function);
-        }
-      }
-
-      Bind(&mark_megamorphic);
-      {
-        // MegamorphicSentinel is an immortal immovable object so
-        // write-barrier is not needed.
-        Comment("transition to megamorphic");
-        DCHECK(
-            Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
-        StoreFixedArrayElement(
-            type_feedback_vector, slot_id,
-            HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())),
-            SKIP_WRITE_BARRIER);
+        CreateWeakCellInFeedbackVector(type_feedback_vector, SmiTag(slot_id),
+                                       constructor);
         Goto(&call_construct_function);
       }
     }
 
-    Bind(&call_construct_function);
+    Bind(&mark_megamorphic);
     {
-      Comment("call using callConstructFunction");
-      IncrementCallCount(type_feedback_vector, slot_id);
-      Callable callable_function = CodeFactory::InterpreterPushArgsAndConstruct(
-          isolate(), CallableType::kJSFunction);
-      return_value.Bind(CallStub(callable_function.descriptor(),
-                                 HeapConstant(callable_function.code()),
-                                 context, arg_count, new_target, constructor,
-                                 allocation_feedback.value(), first_arg));
-      Goto(&end);
+      // MegamorphicSentinel is an immortal immovable object so
+      // write-barrier is not needed.
+      Comment("transition to megamorphic");
+      DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
+      StoreFixedArrayElement(
+          type_feedback_vector, slot_id,
+          HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())),
+          SKIP_WRITE_BARRIER);
+      Goto(&call_construct_function);
     }
   }
 
@@ -1007,7 +942,7 @@
 void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
   Label match(this), no_match(this);
 
-  BranchIf(condition, &match, &no_match);
+  Branch(condition, &match, &no_match);
   Bind(&match);
   Jump(delta);
   Bind(&no_match);
@@ -1035,12 +970,12 @@
 Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
   Label do_inline_star(this), done(this);
 
-  Variable var_bytecode(this, MachineRepresentation::kWord8);
+  Variable var_bytecode(this, MachineType::PointerRepresentation());
   var_bytecode.Bind(target_bytecode);
 
   Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar));
   Node* is_star = WordEqual(target_bytecode, star_bytecode);
-  BranchIf(is_star, &do_inline_star, &done);
+  Branch(is_star, &do_inline_star, &done);
 
   Bind(&do_inline_star);
   {
@@ -1161,7 +1096,7 @@
 
     // Check if the {value} is a Smi or a HeapObject.
     Label if_valueissmi(this), if_valueisnotsmi(this);
-    Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+    Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
 
     Bind(&if_valueissmi);
     {
@@ -1178,7 +1113,8 @@
       // Check if {value} is a HeapNumber.
       Label if_valueisheapnumber(this),
           if_valueisnotheapnumber(this, Label::kDeferred);
-      Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
+      Node* value_map = LoadMap(value);
+      Branch(WordEqual(value_map, HeapNumberMapConstant()),
              &if_valueisheapnumber, &if_valueisnotheapnumber);
 
       Bind(&if_valueisheapnumber);
@@ -1193,11 +1129,36 @@
 
       Bind(&if_valueisnotheapnumber);
       {
-        // Convert the {value} to a Number first.
-        Callable callable = CodeFactory::NonNumberToNumber(isolate());
-        var_value.Bind(CallStub(callable, context, value));
-        var_type_feedback->Bind(Int32Constant(BinaryOperationFeedback::kAny));
-        Goto(&loop);
+        // We do not require an Or with earlier feedback here because once we
+        // convert the value to a number, we cannot reach this path. We can
+        // only reach this path on the first pass when the feedback is kNone.
+        CSA_ASSERT(this,
+                   Word32Equal(var_type_feedback->value(),
+                               Int32Constant(BinaryOperationFeedback::kNone)));
+
+        Label if_valueisoddball(this),
+            if_valueisnotoddball(this, Label::kDeferred);
+        Node* is_oddball = Word32Equal(LoadMapInstanceType(value_map),
+                                       Int32Constant(ODDBALL_TYPE));
+        Branch(is_oddball, &if_valueisoddball, &if_valueisnotoddball);
+
+        Bind(&if_valueisoddball);
+        {
+          // Convert Oddball to a Number and perform checks again.
+          var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
+          var_type_feedback->Bind(
+              Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+          Goto(&loop);
+        }
+
+        Bind(&if_valueisnotoddball);
+        {
+          // Convert the {value} to a Number first.
+          Callable callable = CodeFactory::NonNumberToNumber(isolate());
+          var_value.Bind(CallStub(callable, context, value));
+          var_type_feedback->Bind(Int32Constant(BinaryOperationFeedback::kAny));
+          Goto(&loop);
+        }
       }
     }
   }
@@ -1241,7 +1202,7 @@
 void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
                                                BailoutReason bailout_reason) {
   Label ok(this), abort(this, Label::kDeferred);
-  BranchIfWordEqual(lhs, rhs, &ok, &abort);
+  Branch(WordEqual(lhs, rhs), &ok, &abort);
 
   Bind(&abort);
   Abort(bailout_reason);
@@ -1271,7 +1232,7 @@
 
   Node* counter_reached_max = WordEqual(
       old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
-  BranchIf(counter_reached_max, &counter_saturated, &counter_ok);
+  Branch(counter_reached_max, &counter_saturated, &counter_ok);
 
   Bind(&counter_ok);
   {
diff --git a/src/interpreter/interpreter-assembler.h b/src/interpreter/interpreter-assembler.h
index 9dda20a..aefd2bc 100644
--- a/src/interpreter/interpreter-assembler.h
+++ b/src/interpreter/interpreter-assembler.h
@@ -9,6 +9,7 @@
 #include "src/builtins/builtins.h"
 #include "src/code-stub-assembler.h"
 #include "src/frames.h"
+#include "src/globals.h"
 #include "src/interpreter/bytecode-register.h"
 #include "src/interpreter/bytecodes.h"
 #include "src/runtime/runtime.h"
@@ -17,7 +18,7 @@
 namespace internal {
 namespace interpreter {
 
-class InterpreterAssembler : public CodeStubAssembler {
+class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
  public:
   InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode,
                        OperandScale operand_scale);
@@ -92,15 +93,6 @@
   // Load and untag constant at |index| in the constant pool.
   compiler::Node* LoadAndUntagConstantPoolEntry(compiler::Node* index);
 
-  // Load |slot_index| from |context|.
-  compiler::Node* LoadContextSlot(compiler::Node* context, int slot_index);
-  compiler::Node* LoadContextSlot(compiler::Node* context,
-                                  compiler::Node* slot_index);
-  // Stores |value| into |slot_index| of |context|.
-  compiler::Node* StoreContextSlot(compiler::Node* context,
-                                   compiler::Node* slot_index,
-                                   compiler::Node* value);
-
   // Load the TypeFeedbackVector for the current function.
   compiler::Node* LoadTypeFeedbackVector();
 
diff --git a/src/interpreter/interpreter-intrinsics.cc b/src/interpreter/interpreter-intrinsics.cc
index 600b9c0..b46ca87 100644
--- a/src/interpreter/interpreter-intrinsics.cc
+++ b/src/interpreter/interpreter-intrinsics.cc
@@ -125,7 +125,7 @@
   InterpreterAssembler::Label if_not_smi(assembler_), return_true(assembler_),
       return_false(assembler_), end(assembler_);
   Node* arg = __ LoadRegister(input);
-  __ GotoIf(__ WordIsSmi(arg), &return_false);
+  __ GotoIf(__ TaggedIsSmi(arg), &return_false);
 
   Node* condition = CompareInstanceType(arg, type, kInstanceTypeEqual);
   __ Branch(condition, &return_true, &return_false);
@@ -154,7 +154,7 @@
       end(assembler_);
 
   Node* arg = __ LoadRegister(input);
-  __ GotoIf(__ WordIsSmi(arg), &return_false);
+  __ GotoIf(__ TaggedIsSmi(arg), &return_false);
 
   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
   Node* condition = CompareInstanceType(arg, FIRST_JS_RECEIVER_TYPE,
@@ -202,7 +202,7 @@
 
   Node* arg = __ LoadRegister(input);
 
-  __ Branch(__ WordIsSmi(arg), &if_smi, &if_not_smi);
+  __ Branch(__ TaggedIsSmi(arg), &if_smi, &if_not_smi);
   __ Bind(&if_smi);
   {
     return_value.Bind(__ BooleanConstant(true));
@@ -249,12 +249,6 @@
                              CodeFactory::NumberToString(isolate()));
 }
 
-Node* IntrinsicsHelper::RegExpConstructResult(Node* input, Node* arg_count,
-                                              Node* context) {
-  return IntrinsicAsStubCall(input, context,
-                             CodeFactory::RegExpConstructResult(isolate()));
-}
-
 Node* IntrinsicsHelper::RegExpExec(Node* input, Node* arg_count,
                                    Node* context) {
   return IntrinsicAsStubCall(input, context,
@@ -321,7 +315,7 @@
   return_value.Bind(object);
 
   // If the object is a smi return the object.
-  __ GotoIf(__ WordIsSmi(object), &done);
+  __ GotoIf(__ TaggedIsSmi(object), &done);
 
   // If the object is not a value type, return the object.
   Node* condition =
@@ -346,7 +340,7 @@
   Node* object = __ LoadRegister(args_reg);
 
   // If the object is not a JSReceiver, we return null.
-  __ GotoIf(__ WordIsSmi(object), &null);
+  __ GotoIf(__ TaggedIsSmi(object), &null);
   STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
   Node* is_js_receiver = CompareInstanceType(object, FIRST_JS_RECEIVER_TYPE,
                                              kInstanceTypeGreaterThanOrEqual);
diff --git a/src/interpreter/interpreter-intrinsics.h b/src/interpreter/interpreter-intrinsics.h
index 11fe4a0..70ff291 100644
--- a/src/interpreter/interpreter-intrinsics.h
+++ b/src/interpreter/interpreter-intrinsics.h
@@ -35,7 +35,6 @@
   V(IsTypedArray, is_typed_array, 1)                    \
   V(NewObject, new_object, 2)                           \
   V(NumberToString, number_to_string, 1)                \
-  V(RegExpConstructResult, reg_exp_construct_result, 3) \
   V(RegExpExec, reg_exp_exec, 4)                        \
   V(SubString, sub_string, 3)                           \
   V(ToString, to_string, 1)                             \
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
index 4100302..81aecaf 100644
--- a/src/interpreter/interpreter.cc
+++ b/src/interpreter/interpreter.cc
@@ -53,8 +53,8 @@
 }
 
 void Interpreter::Initialize() {
-  if (IsDispatchTableInitialized()) return;
-  Zone zone(isolate_->allocator());
+  if (!ShouldInitializeDispatchTable()) return;
+  Zone zone(isolate_->allocator(), ZONE_NAME);
   HandleScope scope(isolate_);
 
   if (FLAG_trace_ignition_dispatches) {
@@ -103,6 +103,9 @@
       dispatch_table_[index] = dispatch_table_[illegal_index];
     }
   }
+
+  // Initialization should have been successful.
+  DCHECK(IsDispatchTableInitialized());
 }
 
 Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
@@ -197,6 +200,8 @@
     return FAILED;
   }
 
+  CodeGenerator::MakeCodePrologue(info(), "interpreter");
+
   if (FLAG_print_bytecode) {
     OFStream os(stdout);
     bytecodes->Print(os);
@@ -213,13 +218,17 @@
 }
 
 bool Interpreter::IsDispatchTableInitialized() {
+  return dispatch_table_[0] != nullptr;
+}
+
+bool Interpreter::ShouldInitializeDispatchTable() {
   if (FLAG_trace_ignition || FLAG_trace_ignition_codegen ||
       FLAG_trace_ignition_dispatches) {
     // Regenerate table to add bytecode tracing operations, print the assembly
     // code generated by TurboFan or instrument handlers with dispatch counters.
-    return false;
+    return true;
   }
-  return dispatch_table_[0] != nullptr;
+  return !IsDispatchTableInitialized();
 }
 
 void Interpreter::TraceCodegen(Handle<Code> code) {
@@ -343,17 +352,6 @@
   __ Dispatch();
 }
 
-// LdrUndefined <reg>
-//
-// Loads undefined into the accumulator and |reg|.
-void Interpreter::DoLdrUndefined(InterpreterAssembler* assembler) {
-  Node* undefined_value =
-      __ HeapConstant(isolate_->factory()->undefined_value());
-  Node* destination = __ BytecodeOperandReg(0);
-  __ StoreRegister(undefined_value, destination);
-  __ Dispatch();
-}
-
 // LdaNull
 //
 // Load Null into the accumulator.
@@ -451,23 +449,6 @@
   __ Dispatch();
 }
 
-// LdrGlobal <slot> <reg>
-//
-// Load the global with name in constant pool entry <name_index> into
-// register <reg> using FeedBackVector slot <slot> outside of a typeof.
-void Interpreter::DoLdrGlobal(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
-
-  Node* context = __ GetContext();
-
-  Node* raw_slot = __ BytecodeOperandIdx(0);
-  Node* result = BuildLoadGlobal(ic, context, raw_slot, assembler);
-  Node* destination = __ BytecodeOperandReg(1);
-  __ StoreRegister(result, destination);
-  __ Dispatch();
-}
-
 // LdaGlobalInsideTypeof <slot>
 //
 // Load the global with name in constant pool entry <name_index> into the
@@ -488,9 +469,9 @@
   typedef StoreWithVectorDescriptor Descriptor;
   // Get the global object.
   Node* context = __ GetContext();
-  Node* native_context =
-      __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
-  Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX);
+  Node* native_context = __ LoadNativeContext(context);
+  Node* global =
+      __ LoadContextElement(native_context, Context::EXTENSION_INDEX);
 
   // Store the global via the StoreIC.
   Node* code_target = __ HeapConstant(ic.code());
@@ -525,34 +506,29 @@
   DoStaGlobal(ic, assembler);
 }
 
-compiler::Node* Interpreter::BuildLoadContextSlot(
-    InterpreterAssembler* assembler) {
-  Node* reg_index = __ BytecodeOperandReg(0);
-  Node* context = __ LoadRegister(reg_index);
-  Node* slot_index = __ BytecodeOperandIdx(1);
-  Node* depth = __ BytecodeOperandUImm(2);
-  Node* slot_context = __ GetContextAtDepth(context, depth);
-  return __ LoadContextSlot(slot_context, slot_index);
-}
-
 // LdaContextSlot <context> <slot_index> <depth>
 //
 // Load the object in |slot_index| of the context at |depth| in the context
 // chain starting at |context| into the accumulator.
 void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
-  Node* result = BuildLoadContextSlot(assembler);
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* context = __ LoadRegister(reg_index);
+  Node* slot_index = __ BytecodeOperandIdx(1);
+  Node* depth = __ BytecodeOperandUImm(2);
+  Node* slot_context = __ GetContextAtDepth(context, depth);
+  Node* result = __ LoadContextElement(slot_context, slot_index);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-// LdrContextSlot <context> <slot_index> <depth> <reg>
+// LdaCurrentContextSlot <slot_index>
 //
-// Load the object in |slot_index| of the context at |depth| in the context
-// chain of |context| into register |reg|.
-void Interpreter::DoLdrContextSlot(InterpreterAssembler* assembler) {
-  Node* result = BuildLoadContextSlot(assembler);
-  Node* destination = __ BytecodeOperandReg(3);
-  __ StoreRegister(result, destination);
+// Load the object in |slot_index| of the current context into the accumulator.
+void Interpreter::DoLdaCurrentContextSlot(InterpreterAssembler* assembler) {
+  Node* slot_index = __ BytecodeOperandIdx(0);
+  Node* slot_context = __ GetContext();
+  Node* result = __ LoadContextElement(slot_context, slot_index);
+  __ SetAccumulator(result);
   __ Dispatch();
 }
 
@@ -567,7 +543,19 @@
   Node* slot_index = __ BytecodeOperandIdx(1);
   Node* depth = __ BytecodeOperandUImm(2);
   Node* slot_context = __ GetContextAtDepth(context, depth);
-  __ StoreContextSlot(slot_context, slot_index, value);
+  __ StoreContextElement(slot_context, slot_index, value);
+  __ Dispatch();
+}
+
+// StaCurrentContextSlot <slot_index>
+//
+// Stores the object in the accumulator into |slot_index| of the current
+// context.
+void Interpreter::DoStaCurrentContextSlot(InterpreterAssembler* assembler) {
+  Node* value = __ GetAccumulator();
+  Node* slot_index = __ BytecodeOperandIdx(0);
+  Node* slot_context = __ GetContext();
+  __ StoreContextElement(slot_context, slot_index, value);
   __ Dispatch();
 }
 
@@ -612,7 +600,7 @@
   // Fast path does a normal load context.
   {
     Node* slot_context = __ GetContextAtDepth(context, depth);
-    Node* result = __ LoadContextSlot(slot_context, slot_index);
+    Node* result = __ LoadContextElement(slot_context, slot_index);
     __ SetAccumulator(result);
     __ Dispatch();
   }
@@ -724,9 +712,13 @@
   DoStaLookupSlot(LanguageMode::STRICT, assembler);
 }
 
-Node* Interpreter::BuildLoadNamedProperty(Callable ic,
-                                          InterpreterAssembler* assembler) {
+// LdaNamedProperty <object> <name_index> <slot>
+//
+// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
+// constant pool entry <name_index>.
+void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
   typedef LoadWithVectorDescriptor Descriptor;
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
   Node* code_target = __ HeapConstant(ic.code());
   Node* register_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(register_index);
@@ -736,38 +728,21 @@
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
   Node* context = __ GetContext();
-  return __ CallStub(
+  Node* result = __ CallStub(
       ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
       Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
       Arg(Descriptor::kVector, type_feedback_vector));
-}
-
-// LdaNamedProperty <object> <name_index> <slot>
-//
-// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
-// constant pool entry <name_index>.
-void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
-  Node* result = BuildLoadNamedProperty(ic, assembler);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-// LdrNamedProperty <object> <name_index> <slot> <reg>
+// KeyedLoadIC <object> <slot>
 //
-// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
-// constant pool entry <name_index> and puts the result into register <reg>.
-void Interpreter::DoLdrNamedProperty(InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
-  Node* result = BuildLoadNamedProperty(ic, assembler);
-  Node* destination = __ BytecodeOperandReg(3);
-  __ StoreRegister(result, destination);
-  __ Dispatch();
-}
-
-Node* Interpreter::BuildLoadKeyedProperty(Callable ic,
-                                          InterpreterAssembler* assembler) {
+// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
+// in the accumulator.
+void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
   typedef LoadWithVectorDescriptor Descriptor;
+  Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
   Node* code_target = __ HeapConstant(ic.code());
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(reg_index);
@@ -776,35 +751,14 @@
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
   Node* context = __ GetContext();
-  return __ CallStub(
+  Node* result = __ CallStub(
       ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
       Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
       Arg(Descriptor::kVector, type_feedback_vector));
-}
-
-// KeyedLoadIC <object> <slot>
-//
-// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
-// in the accumulator.
-void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
-  Node* result = BuildLoadKeyedProperty(ic, assembler);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-// LdrKeyedProperty <object> <slot> <reg>
-//
-// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
-// in the accumulator and puts the result in register <reg>.
-void Interpreter::DoLdrKeyedProperty(InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
-  Node* result = BuildLoadKeyedProperty(ic, assembler);
-  Node* destination = __ BytecodeOperandReg(2);
-  __ StoreRegister(result, destination);
-  __ Dispatch();
-}
-
 void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
   typedef StoreWithVectorDescriptor Descriptor;
   Node* code_target = __ HeapConstant(ic.code());
@@ -881,6 +835,88 @@
   DoKeyedStoreIC(ic, assembler);
 }
 
+// LdaModuleVariable <cell_index> <depth>
+//
+// Load the contents of a module variable into the accumulator.  The variable is
+// identified by <cell_index>.  <depth> is the depth of the current context
+// relative to the module context.
+void Interpreter::DoLdaModuleVariable(InterpreterAssembler* assembler) {
+  Node* cell_index = __ BytecodeOperandImm(0);
+  Node* depth = __ BytecodeOperandUImm(1);
+
+  Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
+  Node* module =
+      __ LoadContextElement(module_context, Context::EXTENSION_INDEX);
+
+  Label if_export(assembler), if_import(assembler), end(assembler);
+  __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export,
+            &if_import);
+
+  __ Bind(&if_export);
+  {
+    Node* regular_exports =
+        __ LoadObjectField(module, Module::kRegularExportsOffset);
+    // The actual array index is (cell_index - 1).
+    Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1));
+    Node* cell = __ LoadFixedArrayElement(regular_exports, export_index);
+    __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset));
+    __ Goto(&end);
+  }
+
+  __ Bind(&if_import);
+  {
+    Node* regular_imports =
+        __ LoadObjectField(module, Module::kRegularImportsOffset);
+    // The actual array index is (-cell_index - 1).
+    Node* import_index = __ IntPtrSub(__ IntPtrConstant(-1), cell_index);
+    Node* cell = __ LoadFixedArrayElement(regular_imports, import_index);
+    __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset));
+    __ Goto(&end);
+  }
+
+  __ Bind(&end);
+  __ Dispatch();
+}
+
+// StaModuleVariable <cell_index> <depth>
+//
+// Store accumulator to the module variable identified by <cell_index>.
+// <depth> is the depth of the current context relative to the module context.
+void Interpreter::DoStaModuleVariable(InterpreterAssembler* assembler) {
+  Node* value = __ GetAccumulator();
+  Node* cell_index = __ BytecodeOperandImm(0);
+  Node* depth = __ BytecodeOperandUImm(1);
+
+  Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
+  Node* module =
+      __ LoadContextElement(module_context, Context::EXTENSION_INDEX);
+
+  Label if_export(assembler), if_import(assembler), end(assembler);
+  __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export,
+            &if_import);
+
+  __ Bind(&if_export);
+  {
+    Node* regular_exports =
+        __ LoadObjectField(module, Module::kRegularExportsOffset);
+    // The actual array index is (cell_index - 1).
+    Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1));
+    Node* cell = __ LoadFixedArrayElement(regular_exports, export_index);
+    __ StoreObjectField(cell, Cell::kValueOffset, value);
+    __ Goto(&end);
+  }
+
+  __ Bind(&if_import);
+  {
+    // Not supported (probably never).
+    __ Abort(kUnsupportedModuleOperation);
+    __ Goto(&end);
+  }
+
+  __ Bind(&end);
+  __ Dispatch();
+}
+
 // PushContext <context>
 //
 // Saves the current context in <context>, and pushes the accumulator as the
@@ -904,14 +940,24 @@
   __ Dispatch();
 }
 
-// TODO(mythria): Remove this function once all BinaryOps record type feedback.
-template <class Generator>
-void Interpreter::DoBinaryOp(InterpreterAssembler* assembler) {
+// TODO(mythria): Remove this function once all CompareOps record type feedback.
+void Interpreter::DoCompareOp(Token::Value compare_op,
+                              InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* lhs = __ LoadRegister(reg_index);
   Node* rhs = __ GetAccumulator();
   Node* context = __ GetContext();
-  Node* result = Generator::Generate(assembler, lhs, rhs, context);
+  Node* result;
+  switch (compare_op) {
+    case Token::IN:
+      result = assembler->HasProperty(rhs, lhs, context);
+      break;
+    case Token::INSTANCEOF:
+      result = assembler->InstanceOf(lhs, rhs, context);
+      break;
+    default:
+      UNREACHABLE();
+  }
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -930,8 +976,8 @@
   __ Dispatch();
 }
 
-template <class Generator>
-void Interpreter::DoCompareOpWithFeedback(InterpreterAssembler* assembler) {
+void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
+                                          InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* lhs = __ LoadRegister(reg_index);
   Node* rhs = __ GetAccumulator();
@@ -950,7 +996,7 @@
   Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
   Label lhs_is_smi(assembler), lhs_is_not_smi(assembler),
       gather_rhs_type(assembler), do_compare(assembler);
-  __ Branch(__ WordIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
+  __ Branch(__ TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
 
   __ Bind(&lhs_is_smi);
   var_type_feedback.Bind(
@@ -976,7 +1022,7 @@
   __ Bind(&gather_rhs_type);
   {
     Label rhs_is_smi(assembler);
-    __ GotoIf(__ WordIsSmi(rhs), &rhs_is_smi);
+    __ GotoIf(__ TaggedIsSmi(rhs), &rhs_is_smi);
 
     Node* rhs_map = __ LoadMap(rhs);
     Node* rhs_type =
@@ -999,7 +1045,39 @@
   __ Goto(&skip_feedback_update);
 
   __ Bind(&skip_feedback_update);
-  Node* result = Generator::Generate(assembler, lhs, rhs, context);
+  Node* result;
+  switch (compare_op) {
+    case Token::EQ:
+      result = assembler->Equal(CodeStubAssembler::kDontNegateResult, lhs, rhs,
+                                context);
+      break;
+    case Token::NE:
+      result =
+          assembler->Equal(CodeStubAssembler::kNegateResult, lhs, rhs, context);
+      break;
+    case Token::EQ_STRICT:
+      result = assembler->StrictEqual(CodeStubAssembler::kDontNegateResult, lhs,
+                                      rhs, context);
+      break;
+    case Token::LT:
+      result = assembler->RelationalComparison(CodeStubAssembler::kLessThan,
+                                               lhs, rhs, context);
+      break;
+    case Token::GT:
+      result = assembler->RelationalComparison(CodeStubAssembler::kGreaterThan,
+                                               lhs, rhs, context);
+      break;
+    case Token::LTE:
+      result = assembler->RelationalComparison(
+          CodeStubAssembler::kLessThanOrEqual, lhs, rhs, context);
+      break;
+    case Token::GTE:
+      result = assembler->RelationalComparison(
+          CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context);
+      break;
+    default:
+      UNREACHABLE();
+  }
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1089,13 +1167,13 @@
   }
 
   Node* result_type =
-      __ Select(__ WordIsSmi(result),
+      __ Select(__ TaggedIsSmi(result),
                 __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
                 __ Int32Constant(BinaryOperationFeedback::kNumber));
 
   if (FLAG_debug_code) {
     Label ok(assembler);
-    __ GotoIf(__ WordIsSmi(result), &ok);
+    __ GotoIf(__ TaggedIsSmi(result), &ok);
     Node* result_map = __ LoadMap(result);
     __ AbortIfWordNotEqual(result_map, __ HeapNumberMapConstant(),
                            kExpectedHeapNumber);
@@ -1180,21 +1258,22 @@
 
   // {right} is known to be a Smi.
   // Check if the {left} is a Smi take the fast path.
-  __ BranchIf(__ WordIsSmi(left), &fastpath, &slowpath);
+  __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
   __ Bind(&fastpath);
   {
     // Try fast Smi addition first.
-    Node* pair = __ SmiAddWithOverflow(left, right);
+    Node* pair = __ IntPtrAddWithOverflow(__ BitcastTaggedToWord(left),
+                                          __ BitcastTaggedToWord(right));
     Node* overflow = __ Projection(1, pair);
 
     // Check if the Smi additon overflowed.
     Label if_notoverflow(assembler);
-    __ BranchIf(overflow, &slowpath, &if_notoverflow);
+    __ Branch(overflow, &slowpath, &if_notoverflow);
     __ Bind(&if_notoverflow);
     {
       __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
                         type_feedback_vector, slot_index);
-      var_result.Bind(__ Projection(0, pair));
+      var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
       __ Goto(&end);
     }
   }
@@ -1233,21 +1312,22 @@
 
   // {right} is known to be a Smi.
   // Check if the {left} is a Smi take the fast path.
-  __ BranchIf(__ WordIsSmi(left), &fastpath, &slowpath);
+  __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
   __ Bind(&fastpath);
   {
     // Try fast Smi subtraction first.
-    Node* pair = __ SmiSubWithOverflow(left, right);
+    Node* pair = __ IntPtrSubWithOverflow(__ BitcastTaggedToWord(left),
+                                          __ BitcastTaggedToWord(right));
     Node* overflow = __ Projection(1, pair);
 
     // Check if the Smi subtraction overflowed.
     Label if_notoverflow(assembler);
-    __ BranchIf(overflow, &slowpath, &if_notoverflow);
+    __ Branch(overflow, &slowpath, &if_notoverflow);
     __ Bind(&if_notoverflow);
     {
       __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
                         type_feedback_vector, slot_index);
-      var_result.Bind(__ Projection(0, pair));
+      var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
       __ Goto(&end);
     }
   }
@@ -1287,7 +1367,7 @@
   Node* value = __ Word32Or(lhs_value, rhs_value);
   Node* result = __ ChangeInt32ToTagged(value);
   Node* result_type =
-      __ Select(__ WordIsSmi(result),
+      __ Select(__ TaggedIsSmi(result),
                 __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
                 __ Int32Constant(BinaryOperationFeedback::kNumber));
   __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
@@ -1315,7 +1395,7 @@
   Node* value = __ Word32And(lhs_value, rhs_value);
   Node* result = __ ChangeInt32ToTagged(value);
   Node* result_type =
-      __ Select(__ WordIsSmi(result),
+      __ Select(__ TaggedIsSmi(result),
                 __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
                 __ Int32Constant(BinaryOperationFeedback::kNumber));
   __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
@@ -1345,7 +1425,7 @@
   Node* value = __ Word32Shl(lhs_value, shift_count);
   Node* result = __ ChangeInt32ToTagged(value);
   Node* result_type =
-      __ Select(__ WordIsSmi(result),
+      __ Select(__ TaggedIsSmi(result),
                 __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
                 __ Int32Constant(BinaryOperationFeedback::kNumber));
   __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
@@ -1375,7 +1455,7 @@
   Node* value = __ Word32Sar(lhs_value, shift_count);
   Node* result = __ ChangeInt32ToTagged(value);
   Node* result_type =
-      __ Select(__ WordIsSmi(result),
+      __ Select(__ TaggedIsSmi(result),
                 __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
                 __ Int32Constant(BinaryOperationFeedback::kNumber));
   __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
@@ -1393,15 +1473,6 @@
 }
 
 template <class Generator>
-void Interpreter::DoUnaryOp(InterpreterAssembler* assembler) {
-  Node* value = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* result = Generator::Generate(assembler, value, context);
-  __ SetAccumulator(result);
-  __ Dispatch();
-}
-
-template <class Generator>
 void Interpreter::DoUnaryOpWithFeedback(InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
   Node* context = __ GetContext();
@@ -1495,7 +1566,7 @@
   Label if_true(assembler), if_false(assembler), end(assembler);
   Node* true_value = __ BooleanConstant(true);
   Node* false_value = __ BooleanConstant(false);
-  __ BranchIfWordEqual(value, true_value, &if_true, &if_false);
+  __ Branch(__ WordEqual(value, true_value), &if_true, &if_false);
   __ Bind(&if_true);
   {
     result.Bind(false_value);
@@ -1520,7 +1591,11 @@
 // Load the accumulator with the string representating type of the
 // object in the accumulator.
 void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
-  DoUnaryOp<TypeofStub>(assembler);
+  Node* value = __ GetAccumulator();
+  Node* context = __ GetContext();
+  Node* result = assembler->Typeof(value, context);
+  __ SetAccumulator(result);
+  __ Dispatch();
 }
 
 void Interpreter::DoDelete(Runtime::FunctionId function_id,
@@ -1578,6 +1653,17 @@
   DoJSCall(assembler, TailCallMode::kDisallow);
 }
 
+// CallProperty <callable> <receiver> <arg_count> <feedback_slot_id>
+//
+// Call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers. Collect type feedback into
+// |feedback_slot_id|. The callable is known to be a property of the receiver.
+void Interpreter::DoCallProperty(InterpreterAssembler* assembler) {
+  // TODO(leszeks): Look into making the interpreter use the fact that the
+  // receiver is non-null.
+  DoJSCall(assembler, TailCallMode::kDisallow);
+}
+
 // TailCall <callable> <receiver> <arg_count> <feedback_slot_id>
 //
 // Tail call a JSfunction or Callable in |callable| with the |receiver| and
@@ -1660,9 +1746,8 @@
 
   // Get the function to call from the native context.
   Node* context = __ GetContext();
-  Node* native_context =
-      __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
-  Node* function = __ LoadContextSlot(native_context, context_index);
+  Node* native_context = __ LoadNativeContext(context);
+  Node* function = __ LoadContextElement(native_context, context_index);
 
   // Call the function.
   Node* result = __ CallJS(function, context, first_arg, args_count,
@@ -1698,35 +1783,35 @@
 //
 // Test if the value in the <src> register equals the accumulator.
 void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
-  DoCompareOpWithFeedback<EqualStub>(assembler);
+  DoCompareOpWithFeedback(Token::Value::EQ, assembler);
 }
 
 // TestNotEqual <src>
 //
 // Test if the value in the <src> register is not equal to the accumulator.
 void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
-  DoCompareOpWithFeedback<NotEqualStub>(assembler);
+  DoCompareOpWithFeedback(Token::Value::NE, assembler);
 }
 
 // TestEqualStrict <src>
 //
 // Test if the value in the <src> register is strictly equal to the accumulator.
 void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
-  DoCompareOpWithFeedback<StrictEqualStub>(assembler);
+  DoCompareOpWithFeedback(Token::Value::EQ_STRICT, assembler);
 }
 
 // TestLessThan <src>
 //
 // Test if the value in the <src> register is less than the accumulator.
 void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
-  DoCompareOpWithFeedback<LessThanStub>(assembler);
+  DoCompareOpWithFeedback(Token::Value::LT, assembler);
 }
 
 // TestGreaterThan <src>
 //
 // Test if the value in the <src> register is greater than the accumulator.
 void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
-  DoCompareOpWithFeedback<GreaterThanStub>(assembler);
+  DoCompareOpWithFeedback(Token::Value::GT, assembler);
 }
 
 // TestLessThanOrEqual <src>
@@ -1734,7 +1819,7 @@
 // Test if the value in the <src> register is less than or equal to the
 // accumulator.
 void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
-  DoCompareOpWithFeedback<LessThanOrEqualStub>(assembler);
+  DoCompareOpWithFeedback(Token::Value::LTE, assembler);
 }
 
 // TestGreaterThanOrEqual <src>
@@ -1742,7 +1827,7 @@
 // Test if the value in the <src> register is greater than or equal to the
 // accumulator.
 void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
-  DoCompareOpWithFeedback<GreaterThanOrEqualStub>(assembler);
+  DoCompareOpWithFeedback(Token::Value::GTE, assembler);
 }
 
 // TestIn <src>
@@ -1750,7 +1835,7 @@
 // Test if the object referenced by the register operand is a property of the
 // object referenced by the accumulator.
 void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
-  DoBinaryOp<HasPropertyStub>(assembler);
+  DoCompareOp(Token::IN, assembler);
 }
 
 // TestInstanceOf <src>
@@ -1758,7 +1843,7 @@
 // Test if the object referenced by the <src> register is an an instance of type
 // referenced by the accumulator.
 void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
-  DoBinaryOp<InstanceOfStub>(assembler);
+  DoCompareOp(Token::INSTANCEOF, assembler);
 }
 
 // Jump <imm>
@@ -2025,7 +2110,7 @@
   Node* use_fast_shallow_clone = __ Word32And(
       bytecode_flags,
       __ Int32Constant(CreateArrayLiteralFlags::FastShallowCloneBit::kMask));
-  __ BranchIf(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime);
+  __ Branch(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime);
 
   __ Bind(&fast_shallow_clone);
   {
@@ -2068,9 +2153,9 @@
   Label if_fast_clone(assembler),
       if_not_fast_clone(assembler, Label::kDeferred);
   Node* fast_clone_properties_count =
-      __ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
+      __ DecodeWord32<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
           bytecode_flags);
-  __ BranchIf(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
+  __ Branch(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
 
   __ Bind(&if_fast_clone);
   {
@@ -2217,7 +2302,7 @@
   Node* duplicate_parameters_bit = __ Int32Constant(
       1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte);
   Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit);
-  __ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
+  __ Branch(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
 
   __ Bind(&if_not_duplicate_parameters);
   {
@@ -2273,7 +2358,7 @@
   Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred);
 
   Node* interrupt = __ StackCheckTriggeredInterrupt();
-  __ BranchIf(interrupt, &stack_check_interrupt, &ok);
+  __ Branch(interrupt, &stack_check_interrupt, &ok);
 
   __ Bind(&ok);
   __ Dispatch();
@@ -2363,7 +2448,7 @@
   Node* object_reg = __ BytecodeOperandReg(0);
   Node* receiver = __ LoadRegister(object_reg);
   Node* context = __ GetContext();
-  Node* const zero_smi = __ SmiConstant(Smi::FromInt(0));
+  Node* const zero_smi = __ SmiConstant(Smi::kZero);
 
   Label nothing_to_iterate(assembler, Label::kDeferred),
       use_enum_cache(assembler), use_runtime(assembler, Label::kDeferred);
@@ -2446,7 +2531,7 @@
   // Check if we can use the for-in fast path potentially using the enum cache.
   Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
   Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
-  __ BranchIfWordEqual(receiver_map, cache_type, &if_fast, &if_slow);
+  __ Branch(__ WordEqual(receiver_map, cache_type), &if_fast, &if_slow);
   __ Bind(&if_fast);
   {
     // Enum cache in use for {receiver}, the {key} is definitely valid.
@@ -2483,7 +2568,7 @@
 
   // Check if {index} is at {cache_length} already.
   Label if_true(assembler), if_false(assembler), end(assembler);
-  __ BranchIfWordEqual(index, cache_length, &if_true, &if_false);
+  __ Branch(__ WordEqual(index, cache_length), &if_true, &if_false);
   __ Bind(&if_true);
   {
     __ SetAccumulator(__ BooleanConstant(false));
@@ -2554,7 +2639,7 @@
   STATIC_ASSERT(StepFrame > StepNext);
   STATIC_ASSERT(LastStepAction == StepFrame);
   Node* step_next = __ Int32Constant(StepNext);
-  __ BranchIfInt32LessThanOrEqual(step_next, step_action, &if_stepping, &ok);
+  __ Branch(__ Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok);
   __ Bind(&ok);
 
   Node* array =
diff --git a/src/interpreter/interpreter.h b/src/interpreter/interpreter.h
index b646bf8..b10ae2e 100644
--- a/src/interpreter/interpreter.h
+++ b/src/interpreter/interpreter.h
@@ -78,16 +78,12 @@
 
   // Generates code to perform the binary operation via |Generator|.
   template <class Generator>
-  void DoBinaryOp(InterpreterAssembler* assembler);
-
-  // Generates code to perform the binary operation via |Generator|.
-  template <class Generator>
   void DoBinaryOpWithFeedback(InterpreterAssembler* assembler);
 
   // Generates code to perform the comparison via |Generator| while gathering
   // type feedback.
-  template <class Generator>
-  void DoCompareOpWithFeedback(InterpreterAssembler* assembler);
+  void DoCompareOpWithFeedback(Token::Value compare_op,
+                               InterpreterAssembler* assembler);
 
   // Generates code to perform the bitwise binary operation corresponding to
   // |bitwise_op| while gathering type feedback.
@@ -99,10 +95,6 @@
   template <class Generator>
   void DoBinaryOpWithImmediate(InterpreterAssembler* assembler);
 
-  // Generates code to perform the unary operation via |Generator|.
-  template <class Generator>
-  void DoUnaryOp(InterpreterAssembler* assembler);
-
   // Generates code to perform the unary operation via |Generator| while
   // gatering type feedback.
   template <class Generator>
@@ -147,22 +139,11 @@
   void DoStaLookupSlot(LanguageMode language_mode,
                        InterpreterAssembler* assembler);
 
-  // Generates code to load a context slot.
-  compiler::Node* BuildLoadContextSlot(InterpreterAssembler* assembler);
-
   // Generates code to load a global.
   compiler::Node* BuildLoadGlobal(Callable ic, compiler::Node* context,
                                   compiler::Node* feedback_slot,
                                   InterpreterAssembler* assembler);
 
-  // Generates code to load a named property.
-  compiler::Node* BuildLoadNamedProperty(Callable ic,
-                                         InterpreterAssembler* assembler);
-
-  // Generates code to load a keyed property.
-  compiler::Node* BuildLoadKeyedProperty(Callable ic,
-                                         InterpreterAssembler* assembler);
-
   // Generates code to prepare the result for ForInPrepare. Cache data
   // are placed into the consecutive series of registers starting at
   // |output_register|.
@@ -183,6 +164,7 @@
                                       OperandScale operand_scale);
 
   bool IsDispatchTableInitialized();
+  bool ShouldInitializeDispatchTable();
 
   static const int kNumberOfWideVariants = 3;
   static const int kDispatchTableSize = kNumberOfWideVariants * (kMaxUInt8 + 1);
diff --git a/src/interpreter/mkpeephole.cc b/src/interpreter/mkpeephole.cc
index 270fe83..62d3a77 100644
--- a/src/interpreter/mkpeephole.cc
+++ b/src/interpreter/mkpeephole.cc
@@ -79,33 +79,6 @@
 // static
 PeepholeActionAndData PeepholeActionTableWriter::LookupActionAndData(
     Bytecode last, Bytecode current) {
-  // Optimize various accumulator loads followed by store accumulator
-  // to an equivalent register load and loading the accumulator with
-  // the register. The latter accumulator load can often be elided as
-  // it is side-effect free and often followed by another accumulator
-  // load so can be elided.
-  if (current == Bytecode::kStar) {
-    switch (last) {
-      case Bytecode::kLdaNamedProperty:
-        return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
-                Bytecode::kLdrNamedProperty};
-      case Bytecode::kLdaKeyedProperty:
-        return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
-                Bytecode::kLdrKeyedProperty};
-      case Bytecode::kLdaGlobal:
-        return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
-                Bytecode::kLdrGlobal};
-      case Bytecode::kLdaContextSlot:
-        return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
-                Bytecode::kLdrContextSlot};
-      case Bytecode::kLdaUndefined:
-        return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
-                Bytecode::kLdrUndefined};
-      default:
-        break;
-    }
-  }
-
   // ToName bytecodes can be replaced by Star with the same output register if
   // the value in the accumulator is already a name.
   if (current == Bytecode::kToName && Bytecodes::PutsNameInAccumulator(last)) {
diff --git a/src/isolate-inl.h b/src/isolate-inl.h
index 34c98bb..a148968 100644
--- a/src/isolate-inl.h
+++ b/src/isolate-inl.h
@@ -130,17 +130,27 @@
 
   Cell* species_cell = heap()->species_protector();
   return species_cell->value()->IsSmi() &&
-         Smi::cast(species_cell->value())->value() == kArrayProtectorValid;
+         Smi::cast(species_cell->value())->value() == kProtectorValid;
 }
 
 bool Isolate::IsHasInstanceLookupChainIntact() {
   PropertyCell* has_instance_cell = heap()->has_instance_protector();
-  return has_instance_cell->value() == Smi::FromInt(kArrayProtectorValid);
+  return has_instance_cell->value() == Smi::FromInt(kProtectorValid);
 }
 
 bool Isolate::IsStringLengthOverflowIntact() {
   PropertyCell* has_instance_cell = heap()->string_length_protector();
-  return has_instance_cell->value() == Smi::FromInt(kArrayProtectorValid);
+  return has_instance_cell->value() == Smi::FromInt(kProtectorValid);
+}
+
+bool Isolate::IsFastArrayIterationIntact() {
+  Cell* fast_iteration = heap()->fast_array_iteration_protector();
+  return fast_iteration->value() == Smi::FromInt(kProtectorValid);
+}
+
+bool Isolate::IsArrayIteratorLookupChainIntact() {
+  Cell* array_iterator_cell = heap()->array_iterator_protector();
+  return array_iterator_cell->value() == Smi::FromInt(kProtectorValid);
 }
 
 }  // namespace internal
diff --git a/src/isolate.cc b/src/isolate.cc
index 63c927b..0eab398 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -20,12 +20,15 @@
 #include "src/codegen.h"
 #include "src/compilation-cache.h"
 #include "src/compilation-statistics.h"
+#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
 #include "src/crankshaft/hydrogen.h"
 #include "src/debug/debug.h"
 #include "src/deoptimizer.h"
+#include "src/elements.h"
 #include "src/external-reference-table.h"
 #include "src/frames-inl.h"
+#include "src/ic/access-compiler-data.h"
 #include "src/ic/stub-cache.h"
 #include "src/interface-descriptors.h"
 #include "src/interpreter/interpreter.h"
@@ -39,6 +42,7 @@
 #include "src/runtime-profiler.h"
 #include "src/simulator.h"
 #include "src/snapshot/deserializer.h"
+#include "src/tracing/tracing-category-observer.h"
 #include "src/v8.h"
 #include "src/version.h"
 #include "src/vm-state-inl.h"
@@ -507,7 +511,7 @@
 
       case StackFrame::WASM: {
         WasmFrame* wasm_frame = WasmFrame::cast(frame);
-        Handle<Object> wasm_object(wasm_frame->wasm_obj(), this);
+        Handle<Object> instance(wasm_frame->wasm_instance(), this);
         const int wasm_function_index = wasm_frame->function_index();
         Code* code = wasm_frame->unchecked_code();
         Handle<AbstractCode> abstract_code(AbstractCode::cast(code), this);
@@ -516,12 +520,15 @@
 
         // TODO(wasm): The wasm object returned by the WasmFrame should always
         //             be a wasm object.
-        DCHECK(wasm::IsWasmObject(*wasm_object) ||
-               wasm_object->IsUndefined(this));
+        DCHECK(wasm::IsWasmInstance(*instance) || instance->IsUndefined(this));
 
-        elements = FrameArray::AppendWasmFrame(
-            elements, wasm_object, wasm_function_index, abstract_code, offset,
-            FrameArray::kIsWasmFrame);
+        int flags = wasm::WasmIsAsmJs(*instance, this)
+                        ? FrameArray::kIsAsmJsWasmFrame
+                        : FrameArray::kIsWasmFrame;
+
+        elements =
+            FrameArray::AppendWasmFrame(elements, instance, wasm_function_index,
+                                        abstract_code, offset, flags);
       } break;
 
       default:
@@ -627,7 +634,7 @@
     if (!line_key_.is_null()) {
       Script::PositionInfo info;
       bool valid_pos =
-          script->GetPositionInfo(position, &info, Script::WITH_OFFSET);
+          Script::GetPositionInfo(script, position, &info, Script::WITH_OFFSET);
 
       if (!column_key_.is_null() && valid_pos) {
         JSObject::AddProperty(stack_frame, column_key_,
@@ -695,17 +702,18 @@
 
     if (!function_key_.is_null()) {
       Handle<String> name = wasm::GetWasmFunctionName(
-          isolate_, handle(frame->wasm_obj(), isolate_),
+          isolate_, handle(frame->wasm_instance(), isolate_),
           frame->function_index());
       JSObject::AddProperty(stack_frame, function_key_, name, NONE);
     }
-    // Encode the function index as line number.
+    // Encode the function index as line number (1-based).
     if (!line_key_.is_null()) {
       JSObject::AddProperty(
           stack_frame, line_key_,
-          isolate_->factory()->NewNumberFromInt(frame->function_index()), NONE);
+          isolate_->factory()->NewNumberFromInt(frame->function_index() + 1),
+          NONE);
     }
-    // Encode the byte offset as column.
+    // Encode the byte offset as column (1-based).
     if (!column_key_.is_null()) {
       Code* code = frame->LookupCode();
       int offset = static_cast<int>(frame->pc() - code->instruction_start());
@@ -1063,6 +1071,39 @@
   HandleScope scope(this);
   Handle<Object> exception_handle(exception, this);
 
+  if (FLAG_print_all_exceptions) {
+    printf("=========================================================\n");
+    printf("Exception thrown:\n");
+    if (location) {
+      Handle<Script> script = location->script();
+      Handle<Object> name = Script::GetNameOrSourceURL(script);
+      printf("at ");
+      if (name->IsString() && String::cast(*name)->length() > 0)
+        String::cast(*name)->PrintOn(stdout);
+      else
+        printf("<anonymous>");
+// Script::GetLineNumber and Script::GetColumnNumber can allocate on the heap to
+// initialize the line_ends array, so be careful when calling them.
+#ifdef DEBUG
+      if (AllowHeapAllocation::IsAllowed()) {
+#else
+      if (false) {
+#endif
+        printf(", %d:%d - %d:%d\n",
+               Script::GetLineNumber(script, location->start_pos()) + 1,
+               Script::GetColumnNumber(script, location->start_pos()),
+               Script::GetLineNumber(script, location->end_pos()) + 1,
+               Script::GetColumnNumber(script, location->end_pos()));
+      } else {
+        printf(", line %d\n", script->GetLineNumber(location->start_pos()) + 1);
+      }
+    }
+    exception->Print();
+    printf("Stack Trace:\n");
+    PrintStack(stdout);
+    printf("=========================================================\n");
+  }
+
   // Determine whether a message needs to be created for the given exception
   // depending on the following criteria:
   // 1) External v8::TryCatch missing: Always create a message because any
@@ -1228,9 +1269,19 @@
     // For interpreted frame we perform a range lookup in the handler table.
     if (frame->is_interpreted() && catchable_by_js) {
       InterpretedFrame* js_frame = static_cast<InterpretedFrame*>(frame);
+      int register_slots = js_frame->GetBytecodeArray()->register_count();
       int context_reg = 0;  // Will contain register index holding context.
       offset = js_frame->LookupExceptionHandlerInTable(&context_reg, nullptr);
       if (offset >= 0) {
+        // Compute the stack pointer from the frame pointer. This ensures that
+        // argument slots on the stack are dropped as returning would.
+        // Note: This is only needed for interpreted frames that have been
+        //       materialized by the deoptimizer. If there is a handler frame
+        //       in between then {frame->sp()} would already be correct.
+        Address return_sp = frame->fp() -
+                            InterpreterFrameConstants::kFixedFrameSizeFromFp -
+                            register_slots * kPointerSize;
+
         // Patch the bytecode offset in the interpreted frame to reflect the
         // position of the exception handler. The special builtin below will
         // take care of continuing to dispatch at that position. Also restore
@@ -1241,7 +1292,7 @@
 
         // Gather information from the frame.
         code = *builtins()->InterpreterEnterBytecodeDispatch();
-        handler_sp = frame->sp();
+        handler_sp = return_sp;
         handler_fp = frame->fp();
         break;
       }
@@ -1933,48 +1984,102 @@
 
 class VerboseAccountingAllocator : public AccountingAllocator {
  public:
-  VerboseAccountingAllocator(Heap* heap, size_t sample_bytes)
-      : heap_(heap), last_memory_usage_(0), sample_bytes_(sample_bytes) {}
+  VerboseAccountingAllocator(Heap* heap, size_t allocation_sample_bytes,
+                             size_t pool_sample_bytes)
+      : heap_(heap),
+        last_memory_usage_(0),
+        last_pool_size_(0),
+        nesting_deepth_(0),
+        allocation_sample_bytes_(allocation_sample_bytes),
+        pool_sample_bytes_(pool_sample_bytes) {}
 
-  v8::internal::Segment* AllocateSegment(size_t size) override {
-    v8::internal::Segment* memory = AccountingAllocator::AllocateSegment(size);
+  v8::internal::Segment* GetSegment(size_t size) override {
+    v8::internal::Segment* memory = AccountingAllocator::GetSegment(size);
     if (memory) {
-      size_t current = GetCurrentMemoryUsage();
-      if (last_memory_usage_.Value() + sample_bytes_ < current) {
-        PrintJSON(current);
-        last_memory_usage_.SetValue(current);
+      size_t malloced_current = GetCurrentMemoryUsage();
+      size_t pooled_current = GetCurrentPoolSize();
+
+      if (last_memory_usage_.Value() + allocation_sample_bytes_ <
+              malloced_current ||
+          last_pool_size_.Value() + pool_sample_bytes_ < pooled_current) {
+        PrintMemoryJSON(malloced_current, pooled_current);
+        last_memory_usage_.SetValue(malloced_current);
+        last_pool_size_.SetValue(pooled_current);
       }
     }
     return memory;
   }
 
-  void FreeSegment(v8::internal::Segment* memory) override {
-    AccountingAllocator::FreeSegment(memory);
-    size_t current = GetCurrentMemoryUsage();
-    if (current + sample_bytes_ < last_memory_usage_.Value()) {
-      PrintJSON(current);
-      last_memory_usage_.SetValue(current);
+  void ReturnSegment(v8::internal::Segment* memory) override {
+    AccountingAllocator::ReturnSegment(memory);
+    size_t malloced_current = GetCurrentMemoryUsage();
+    size_t pooled_current = GetCurrentPoolSize();
+
+    if (malloced_current + allocation_sample_bytes_ <
+            last_memory_usage_.Value() ||
+        pooled_current + pool_sample_bytes_ < last_pool_size_.Value()) {
+      PrintMemoryJSON(malloced_current, pooled_current);
+      last_memory_usage_.SetValue(malloced_current);
+      last_pool_size_.SetValue(pooled_current);
     }
   }
 
+  void ZoneCreation(const Zone* zone) override {
+    double time = heap_->isolate()->time_millis_since_init();
+    PrintF(
+        "{"
+        "\"type\": \"zonecreation\", "
+        "\"isolate\": \"%p\", "
+        "\"time\": %f, "
+        "\"ptr\": \"%p\", "
+        "\"name\": \"%s\","
+        "\"nesting\": %zu"
+        "}\n",
+        reinterpret_cast<void*>(heap_->isolate()), time,
+        reinterpret_cast<const void*>(zone), zone->name(),
+        nesting_deepth_.Value());
+    nesting_deepth_.Increment(1);
+  }
+
+  void ZoneDestruction(const Zone* zone) override {
+    nesting_deepth_.Decrement(1);
+    double time = heap_->isolate()->time_millis_since_init();
+    PrintF(
+        "{"
+        "\"type\": \"zonedestruction\", "
+        "\"isolate\": \"%p\", "
+        "\"time\": %f, "
+        "\"ptr\": \"%p\", "
+        "\"name\": \"%s\", "
+        "\"size\": %zu,"
+        "\"nesting\": %zu"
+        "}\n",
+        reinterpret_cast<void*>(heap_->isolate()), time,
+        reinterpret_cast<const void*>(zone), zone->name(),
+        zone->allocation_size(), nesting_deepth_.Value());
+  }
+
  private:
-  void PrintJSON(size_t sample) {
+  void PrintMemoryJSON(size_t malloced, size_t pooled) {
     // Note: Neither isolate, nor heap is locked, so be careful with accesses
     // as the allocator is potentially used on a concurrent thread.
     double time = heap_->isolate()->time_millis_since_init();
     PrintF(
         "{"
-        "\"type\": \"malloced\", "
+        "\"type\": \"zone\", "
         "\"isolate\": \"%p\", "
         "\"time\": %f, "
-        "\"value\": %zu"
+        "\"allocated\": %zu,"
+        "\"pooled\": %zu"
         "}\n",
-        reinterpret_cast<void*>(heap_->isolate()), time, sample);
+        reinterpret_cast<void*>(heap_->isolate()), time, malloced, pooled);
   }
 
   Heap* heap_;
   base::AtomicNumber<size_t> last_memory_usage_;
-  size_t sample_bytes_;
+  base::AtomicNumber<size_t> last_pool_size_;
+  base::AtomicNumber<size_t> nesting_deepth_;
+  size_t allocation_sample_bytes_, pool_sample_bytes_;
 };
 
 Isolate::Isolate(bool enable_serializer)
@@ -1997,15 +2102,13 @@
       capture_stack_trace_for_uncaught_exceptions_(false),
       stack_trace_for_uncaught_exceptions_frame_limit_(0),
       stack_trace_for_uncaught_exceptions_options_(StackTrace::kOverview),
-      keyed_lookup_cache_(NULL),
       context_slot_cache_(NULL),
       descriptor_lookup_cache_(NULL),
       handle_scope_implementer_(NULL),
       unicode_cache_(NULL),
-      allocator_(FLAG_trace_gc_object_stats
-                     ? new VerboseAccountingAllocator(&heap_, 256 * KB)
-                     : new AccountingAllocator()),
-      runtime_zone_(new Zone(allocator_)),
+      allocator_(FLAG_trace_gc_object_stats ? new VerboseAccountingAllocator(
+                                                  &heap_, 256 * KB, 128 * KB)
+                                            : new AccountingAllocator()),
       inner_pointer_to_code_cache_(NULL),
       global_handles_(NULL),
       eternal_handles_(NULL),
@@ -2031,7 +2134,6 @@
       optimizing_compile_dispatcher_(NULL),
       stress_deopt_count_(0),
       next_optimization_id_(0),
-      js_calls_from_api_counter_(0),
 #if TRACE_MAPS
       next_unique_sfi_id_(0),
 #endif
@@ -2170,13 +2272,16 @@
   delete heap_profiler_;
   heap_profiler_ = NULL;
 
+  cancelable_task_manager()->CancelAndWait();
+
   heap_.TearDown();
   logger_->TearDown();
 
   delete interpreter_;
   interpreter_ = NULL;
 
-  cancelable_task_manager()->CancelAndWait();
+  delete compiler_dispatcher_tracer_;
+  compiler_dispatcher_tracer_ = nullptr;
 
   delete cpu_profiler_;
   cpu_profiler_ = NULL;
@@ -2200,9 +2305,6 @@
 Isolate::~Isolate() {
   TRACE_ISOLATE(destructor);
 
-  // Has to be called while counters_ are still alive
-  runtime_zone_->DeleteKeptSegment();
-
   // The entry stack must be empty when we get here.
   DCHECK(entry_stack_ == NULL || entry_stack_->previous_item == NULL);
 
@@ -2218,6 +2320,9 @@
   delete[] call_descriptor_data_;
   call_descriptor_data_ = NULL;
 
+  delete access_compiler_data_;
+  access_compiler_data_ = NULL;
+
   delete regexp_stack_;
   regexp_stack_ = NULL;
 
@@ -2225,8 +2330,6 @@
   descriptor_lookup_cache_ = NULL;
   delete context_slot_cache_;
   context_slot_cache_ = NULL;
-  delete keyed_lookup_cache_;
-  keyed_lookup_cache_ = NULL;
 
   delete load_stub_cache_;
   load_stub_cache_ = NULL;
@@ -2279,9 +2382,6 @@
   delete cancelable_task_manager_;
   cancelable_task_manager_ = nullptr;
 
-  delete runtime_zone_;
-  runtime_zone_ = nullptr;
-
   delete allocator_;
   allocator_ = nullptr;
 
@@ -2371,7 +2471,6 @@
 #undef ASSIGN_ELEMENT
 
   compilation_cache_ = new CompilationCache(this);
-  keyed_lookup_cache_ = new KeyedLookupCache();
   context_slot_cache_ = new ContextSlotCache();
   descriptor_lookup_cache_ = new DescriptorLookupCache();
   unicode_cache_ = new UnicodeCache();
@@ -2388,9 +2487,11 @@
   date_cache_ = new DateCache();
   call_descriptor_data_ =
       new CallInterfaceDescriptorData[CallDescriptors::NUMBER_OF_DESCRIPTORS];
+  access_compiler_data_ = new AccessCompilerData();
   cpu_profiler_ = new CpuProfiler(this);
   heap_profiler_ = new HeapProfiler(heap());
   interpreter_ = new interpreter::Interpreter(this);
+  compiler_dispatcher_tracer_ = new CompilerDispatcherTracer(this);
 
   // Enable logging before setting up the heap
   logger_->SetUp(this);
@@ -2471,9 +2572,7 @@
     }
     load_stub_cache_->Initialize();
     store_stub_cache_->Initialize();
-    if (FLAG_ignition || serializer_enabled()) {
-      interpreter_->Initialize();
-    }
+    interpreter_->Initialize();
 
     heap_.NotifyDeserializationComplete();
   }
@@ -2651,8 +2750,8 @@
   turbo_statistics_ = nullptr;
   delete hstatistics_;
   hstatistics_ = nullptr;
-  if (FLAG_runtime_call_stats &&
-      !TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED()) {
+  if (V8_UNLIKELY(FLAG_runtime_stats ==
+                  v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) {
     OFStream os(stdout);
     counters()->runtime_call_stats()->Print(os);
     counters()->runtime_call_stats()->Reset();
@@ -2733,7 +2832,7 @@
   PropertyCell* no_elements_cell = heap()->array_protector();
   bool cell_reports_intact =
       no_elements_cell->value()->IsSmi() &&
-      Smi::cast(no_elements_cell->value())->value() == kArrayProtectorValid;
+      Smi::cast(no_elements_cell->value())->value() == kProtectorValid;
 
 #ifdef DEBUG
   Map* root_array_map =
@@ -2792,7 +2891,7 @@
   Cell* is_concat_spreadable_cell = heap()->is_concat_spreadable_protector();
   bool is_is_concat_spreadable_set =
       Smi::cast(is_concat_spreadable_cell->value())->value() ==
-      kArrayProtectorInvalid;
+      kProtectorInvalid;
 #ifdef DEBUG
   Map* root_array_map = get_initial_js_array_map(GetInitialFastElementsKind());
   if (root_array_map == NULL) {
@@ -2827,7 +2926,7 @@
   if (!IsArrayOrObjectPrototype(*object)) return;
   PropertyCell::SetValueWithInvalidation(
       factory()->array_protector(),
-      handle(Smi::FromInt(kArrayProtectorInvalid), this));
+      handle(Smi::FromInt(kProtectorInvalid), this));
 }
 
 void Isolate::InvalidateHasInstanceProtector() {
@@ -2835,7 +2934,7 @@
   DCHECK(IsHasInstanceLookupChainIntact());
   PropertyCell::SetValueWithInvalidation(
       factory()->has_instance_protector(),
-      handle(Smi::FromInt(kArrayProtectorInvalid), this));
+      handle(Smi::FromInt(kProtectorInvalid), this));
   DCHECK(!IsHasInstanceLookupChainIntact());
 }
 
@@ -2843,15 +2942,14 @@
   DCHECK(factory()->is_concat_spreadable_protector()->value()->IsSmi());
   DCHECK(IsIsConcatSpreadableLookupChainIntact());
   factory()->is_concat_spreadable_protector()->set_value(
-      Smi::FromInt(kArrayProtectorInvalid));
+      Smi::FromInt(kProtectorInvalid));
   DCHECK(!IsIsConcatSpreadableLookupChainIntact());
 }
 
 void Isolate::InvalidateArraySpeciesProtector() {
   DCHECK(factory()->species_protector()->value()->IsSmi());
   DCHECK(IsArraySpeciesLookupChainIntact());
-  factory()->species_protector()->set_value(
-      Smi::FromInt(kArrayProtectorInvalid));
+  factory()->species_protector()->set_value(Smi::FromInt(kProtectorInvalid));
   DCHECK(!IsArraySpeciesLookupChainIntact());
 }
 
@@ -2860,10 +2958,18 @@
   DCHECK(IsStringLengthOverflowIntact());
   PropertyCell::SetValueWithInvalidation(
       factory()->string_length_protector(),
-      handle(Smi::FromInt(kArrayProtectorInvalid), this));
+      handle(Smi::FromInt(kProtectorInvalid), this));
   DCHECK(!IsStringLengthOverflowIntact());
 }
 
+void Isolate::InvalidateArrayIteratorProtector() {
+  DCHECK(factory()->array_iterator_protector()->value()->IsSmi());
+  DCHECK(IsArrayIteratorLookupChainIntact());
+  factory()->array_iterator_protector()->set_value(
+      Smi::FromInt(kProtectorInvalid));
+  DCHECK(!IsArrayIteratorLookupChainIntact());
+}
+
 bool Isolate::IsAnyInitialArrayPrototype(Handle<JSArray> array) {
   DisallowHeapAllocation no_gc;
   return IsInAnyContext(*array, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
@@ -2888,6 +2994,14 @@
   return random_number_generator_;
 }
 
+int Isolate::GenerateIdentityHash(uint32_t mask) {
+  int hash;
+  int attempts = 0;
+  do {
+    hash = random_number_generator()->NextInt() & mask;
+  } while (hash == 0 && attempts++ < 30);
+  return hash != 0 ? hash : 1;
+}
 
 Object* Isolate::FindCodeObject(Address a) {
   return inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(a);
@@ -3005,20 +3119,88 @@
       v8::Utils::StackTraceToLocal(stack_trace)));
 }
 
-void Isolate::PromiseResolveThenableJob(Handle<PromiseContainer> container,
-                                        MaybeHandle<Object>* result,
-                                        MaybeHandle<Object>* maybe_exception) {
-  if (debug()->is_active()) {
-    Handle<Object> before_debug_event(container->before_debug_event(), this);
-    if (before_debug_event->IsJSObject()) {
-      debug()->OnAsyncTaskEvent(Handle<JSObject>::cast(before_debug_event));
+namespace {
+class PromiseDebugEventScope {
+ public:
+  PromiseDebugEventScope(Isolate* isolate, Object* id, Object* name)
+      : isolate_(isolate),
+        id_(id, isolate_),
+        name_(name, isolate_),
+        is_debug_active_(isolate_->debug()->is_active() && id_->IsNumber() &&
+                         name_->IsString()) {
+    if (is_debug_active_) {
+      isolate_->debug()->OnAsyncTaskEvent(
+          isolate_->factory()->will_handle_string(), id_,
+          Handle<String>::cast(name_));
     }
   }
 
-  Handle<JSReceiver> thenable(container->thenable(), this);
-  Handle<JSFunction> resolve(container->resolve(), this);
-  Handle<JSFunction> reject(container->reject(), this);
-  Handle<JSReceiver> then(container->then(), this);
+  ~PromiseDebugEventScope() {
+    if (is_debug_active_) {
+      isolate_->debug()->OnAsyncTaskEvent(
+          isolate_->factory()->did_handle_string(), id_,
+          Handle<String>::cast(name_));
+    }
+  }
+
+ private:
+  Isolate* isolate_;
+  Handle<Object> id_;
+  Handle<Object> name_;
+  bool is_debug_active_;
+};
+}  // namespace
+
+void Isolate::PromiseReactionJob(Handle<PromiseReactionJobInfo> info,
+                                 MaybeHandle<Object>* result,
+                                 MaybeHandle<Object>* maybe_exception) {
+  PromiseDebugEventScope helper(this, info->debug_id(), info->debug_name());
+
+  Handle<Object> value(info->value(), this);
+  Handle<Object> tasks(info->tasks(), this);
+  Handle<JSFunction> promise_handle_fn = promise_handle();
+  Handle<Object> undefined = factory()->undefined_value();
+
+  // If tasks is an array we have multiple onFulfilled/onRejected callbacks
+  // associated with the promise. The deferred object for each callback
+  // is attached to this array as well.
+  // Otherwise, there is a single callback and the deferred object is attached
+  // directly to PromiseReactionJobInfo.
+  if (tasks->IsJSArray()) {
+    Handle<JSArray> array = Handle<JSArray>::cast(tasks);
+    DCHECK(array->length()->IsSmi());
+    int length = Smi::cast(array->length())->value();
+    ElementsAccessor* accessor = array->GetElementsAccessor();
+    DCHECK(length % 2 == 0);
+    for (int i = 0; i < length; i += 2) {
+      DCHECK(accessor->HasElement(array, i));
+      DCHECK(accessor->HasElement(array, i + 1));
+      Handle<Object> argv[] = {value, accessor->Get(array, i),
+                               accessor->Get(array, i + 1)};
+      *result = Execution::TryCall(this, promise_handle_fn, undefined,
+                                   arraysize(argv), argv, maybe_exception);
+      // If execution is terminating, just bail out.
+      if (result->is_null() && maybe_exception->is_null()) {
+        return;
+      }
+    }
+  } else {
+    Handle<Object> deferred(info->deferred(), this);
+    Handle<Object> argv[] = {value, tasks, deferred};
+    *result = Execution::TryCall(this, promise_handle_fn, undefined,
+                                 arraysize(argv), argv, maybe_exception);
+  }
+}
+
+void Isolate::PromiseResolveThenableJob(
+    Handle<PromiseResolveThenableJobInfo> info, MaybeHandle<Object>* result,
+    MaybeHandle<Object>* maybe_exception) {
+  PromiseDebugEventScope helper(this, info->debug_id(), info->debug_name());
+
+  Handle<JSReceiver> thenable(info->thenable(), this);
+  Handle<JSFunction> resolve(info->resolve(), this);
+  Handle<JSFunction> reject(info->reject(), this);
+  Handle<JSReceiver> then(info->then(), this);
   Handle<Object> argv[] = {resolve, reject};
   *result = Execution::TryCall(this, then, thenable, arraysize(argv), argv,
                                maybe_exception);
@@ -3031,18 +3213,12 @@
         Execution::TryCall(this, reject, factory()->undefined_value(),
                            arraysize(reason_arg), reason_arg, maybe_exception);
   }
-
-  if (debug()->is_active()) {
-    Handle<Object> after_debug_event(container->after_debug_event(), this);
-    if (after_debug_event->IsJSObject()) {
-      debug()->OnAsyncTaskEvent(Handle<JSObject>::cast(after_debug_event));
-    }
-  }
 }
 
 void Isolate::EnqueueMicrotask(Handle<Object> microtask) {
   DCHECK(microtask->IsJSFunction() || microtask->IsCallHandlerInfo() ||
-         microtask->IsPromiseContainer());
+         microtask->IsPromiseResolveThenableJobInfo() ||
+         microtask->IsPromiseReactionJobInfo());
   Handle<FixedArray> queue(heap()->microtask_queue(), this);
   int num_tasks = pending_microtask_count();
   DCHECK(num_tasks <= queue->length());
@@ -3094,11 +3270,16 @@
         callback(data);
       } else {
         SaveContext save(this);
-        Context* context = microtask->IsJSFunction()
-                               ? Handle<JSFunction>::cast(microtask)->context()
-                               : Handle<PromiseContainer>::cast(microtask)
-                                     ->resolve()
-                                     ->context();
+        Context* context;
+        if (microtask->IsJSFunction()) {
+          context = Handle<JSFunction>::cast(microtask)->context();
+        } else if (microtask->IsPromiseResolveThenableJobInfo()) {
+          context =
+              Handle<PromiseResolveThenableJobInfo>::cast(microtask)->context();
+        } else {
+          context = Handle<PromiseReactionJobInfo>::cast(microtask)->context();
+        }
+
         set_context(context->native_context());
         handle_scope_implementer_->EnterMicrotaskContext(
             Handle<Context>(context, this));
@@ -3112,9 +3293,13 @@
           result = Execution::TryCall(this, microtask_function,
                                       factory()->undefined_value(), 0, NULL,
                                       &maybe_exception);
+        } else if (microtask->IsPromiseResolveThenableJobInfo()) {
+          PromiseResolveThenableJob(
+              Handle<PromiseResolveThenableJobInfo>::cast(microtask), &result,
+              &maybe_exception);
         } else {
-          PromiseResolveThenableJob(Handle<PromiseContainer>::cast(microtask),
-                                    &result, &maybe_exception);
+          PromiseReactionJob(Handle<PromiseReactionJobInfo>::cast(microtask),
+                             &result, &maybe_exception);
         }
 
         handle_scope_implementer_->LeaveMicrotaskContext();
@@ -3213,7 +3398,7 @@
   Handle<FixedArray> detached_contexts = factory()->detached_contexts();
   int length = detached_contexts->length();
   detached_contexts = factory()->CopyFixedArrayAndGrow(detached_contexts, 2);
-  detached_contexts->set(length, Smi::FromInt(0));
+  detached_contexts->set(length, Smi::kZero);
   detached_contexts->set(length + 1, *cell);
   heap()->set_detached_contexts(*detached_contexts);
 }
diff --git a/src/isolate.h b/src/isolate.h
index 8d0d3b4..87bc45b 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -33,6 +33,8 @@
 
 namespace internal {
 
+class AccessCompilerData;
+class AddressToIndexHashMap;
 class BasicBlockProfiler;
 class Bootstrapper;
 class CancelableTaskManager;
@@ -44,6 +46,7 @@
 class CodeStubDescriptor;
 class CodeTracer;
 class CompilationCache;
+class CompilerDispatcherTracer;
 class CompilationStatistics;
 class ContextSlotCache;
 class Counters;
@@ -57,12 +60,12 @@
 class ExternalReferenceTable;
 class Factory;
 class HandleScopeImplementer;
+class HeapObjectToIndexHashMap;
 class HeapProfiler;
 class HStatistics;
 class HTracer;
 class InlineRuntimeFunctionsTable;
 class InnerPointerToCodeCache;
-class KeyedLookupCache;
 class Logger;
 class MaterializedObjectStore;
 class OptimizingCompileDispatcher;
@@ -116,16 +119,6 @@
 #define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
   RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>())
 
-#define RETURN_RESULT(isolate, call, T)           \
-  do {                                            \
-    Handle<T> __result__;                         \
-    if (!(call).ToHandle(&__result__)) {          \
-      DCHECK((isolate)->has_pending_exception()); \
-      return MaybeHandle<T>();                    \
-    }                                             \
-    return __result__;                            \
-  } while (false)
-
 #define RETURN_RESULT_OR_FAILURE(isolate, call)     \
   do {                                              \
     Handle<Object> __result__;                      \
@@ -409,9 +402,12 @@
   V(Object*, string_stream_current_security_token, nullptr)                   \
   V(ExternalReferenceTable*, external_reference_table, nullptr)               \
   V(intptr_t*, api_external_references, nullptr)                              \
-  V(base::HashMap*, external_reference_map, nullptr)                          \
-  V(base::HashMap*, root_index_map, nullptr)                                  \
+  V(AddressToIndexHashMap*, external_reference_map, nullptr)                  \
+  V(HeapObjectToIndexHashMap*, root_index_map, nullptr)                       \
+  V(v8::DeserializeInternalFieldsCallback,                                    \
+    deserialize_internal_fields_callback, nullptr)                            \
   V(int, pending_microtask_count, 0)                                          \
+  V(int, debug_microtask_count, 0)                                            \
   V(HStatistics*, hstatistics, nullptr)                                       \
   V(CompilationStatistics*, turbo_statistics, nullptr)                        \
   V(HTracer*, htracer, nullptr)                                               \
@@ -723,7 +719,7 @@
   void ReportFailedAccessCheck(Handle<JSObject> receiver);
 
   // Exception throwing support. The caller should use the result
-  // of Throw() as its return vaue.
+  // of Throw() as its return value.
   Object* Throw(Object* exception, MessageLocation* location = NULL);
   Object* ThrowIllegalOperation();
 
@@ -868,10 +864,6 @@
     return materialized_object_store_;
   }
 
-  KeyedLookupCache* keyed_lookup_cache() {
-    return keyed_lookup_cache_;
-  }
-
   ContextSlotCache* context_slot_cache() {
     return context_slot_cache_;
   }
@@ -886,7 +878,6 @@
     DCHECK(handle_scope_implementer_);
     return handle_scope_implementer_;
   }
-  Zone* runtime_zone() { return runtime_zone_; }
 
   UnicodeCache* unicode_cache() {
     return unicode_cache_;
@@ -927,6 +918,8 @@
 
   RegExpStack* regexp_stack() { return regexp_stack_; }
 
+  List<int>* regexp_indices() { return &regexp_indices_; }
+
   unibrow::Mapping<unibrow::Ecma262Canonicalize>*
       interp_canonicalize_mapping() {
     return &regexp_macro_assembler_canonicalize_;
@@ -994,8 +987,8 @@
 
   Map* get_initial_js_array_map(ElementsKind kind);
 
-  static const int kArrayProtectorValid = 1;
-  static const int kArrayProtectorInvalid = 0;
+  static const int kProtectorValid = 1;
+  static const int kProtectorInvalid = 0;
 
   bool IsFastArrayConstructorPrototypeChainIntact();
   inline bool IsArraySpeciesLookupChainIntact();
@@ -1003,6 +996,10 @@
   bool IsIsConcatSpreadableLookupChainIntact();
   bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver);
   inline bool IsStringLengthOverflowIntact();
+  inline bool IsArrayIteratorLookupChainIntact();
+
+  // Avoid deopt loops if fast Array Iterators migrate to slow Array Iterators.
+  inline bool IsFastArrayIterationIntact();
 
   // On intent to set an element in object, make sure that appropriate
   // notifications occur if the set is on the elements of the array or
@@ -1022,12 +1019,15 @@
   void InvalidateHasInstanceProtector();
   void InvalidateIsConcatSpreadableProtector();
   void InvalidateStringLengthOverflowProtector();
+  void InvalidateArrayIteratorProtector();
 
   // Returns true if array is the initial array prototype in any native context.
   bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
 
   CallInterfaceDescriptorData* call_descriptor_data(int index);
 
+  AccessCompilerData* access_compiler_data() { return access_compiler_data_; }
+
   void IterateDeferredHandles(ObjectVisitor* visitor);
   void LinkDeferredHandles(DeferredHandles* deferred_handles);
   void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
@@ -1063,7 +1063,11 @@
 
   void* stress_deopt_count_address() { return &stress_deopt_count_; }
 
-  base::RandomNumberGenerator* random_number_generator();
+  V8_EXPORT_PRIVATE base::RandomNumberGenerator* random_number_generator();
+
+  // Generates a random number that is non-zero when masked
+  // with the provided mask.
+  int GenerateIdentityHash(uint32_t mask);
 
   // Given an address occupied by a live code object, return that object.
   Object* FindCodeObject(Address a);
@@ -1076,12 +1080,6 @@
     return id;
   }
 
-  void IncrementJsCallsFromApiCounter() { ++js_calls_from_api_counter_; }
-
-  unsigned int js_calls_from_api_counter() {
-    return js_calls_from_api_counter_;
-  }
-
   // Get (and lazily initialize) the registry for per-isolate symbols.
   Handle<JSObject> GetSymbolRegistry();
 
@@ -1101,12 +1099,16 @@
   void ReportPromiseReject(Handle<JSObject> promise, Handle<Object> value,
                            v8::PromiseRejectEvent event);
 
-  void PromiseResolveThenableJob(Handle<PromiseContainer> container,
+  void PromiseReactionJob(Handle<PromiseReactionJobInfo> info,
+                          MaybeHandle<Object>* result,
+                          MaybeHandle<Object>* maybe_exception);
+  void PromiseResolveThenableJob(Handle<PromiseResolveThenableJobInfo> info,
                                  MaybeHandle<Object>* result,
                                  MaybeHandle<Object>* maybe_exception);
   void EnqueueMicrotask(Handle<Object> microtask);
   void RunMicrotasks();
   bool IsRunningMicrotasks() const { return is_running_microtasks_; }
+  int GetNextDebugMicrotaskId() { return debug_microtask_count_++; }
 
   void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
   void CountUsage(v8::Isolate::UseCounterFeature feature);
@@ -1151,6 +1153,10 @@
 
   AccountingAllocator* allocator() { return allocator_; }
 
+  CompilerDispatcherTracer* compiler_dispatcher_tracer() const {
+    return compiler_dispatcher_tracer_;
+  }
+
   bool IsInAnyContext(Object* object, uint32_t index);
 
   void SetRAILMode(RAILMode rail_mode);
@@ -1319,14 +1325,12 @@
   bool capture_stack_trace_for_uncaught_exceptions_;
   int stack_trace_for_uncaught_exceptions_frame_limit_;
   StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
-  KeyedLookupCache* keyed_lookup_cache_;
   ContextSlotCache* context_slot_cache_;
   DescriptorLookupCache* descriptor_lookup_cache_;
   HandleScopeData handle_scope_data_;
   HandleScopeImplementer* handle_scope_implementer_;
   UnicodeCache* unicode_cache_;
   AccountingAllocator* allocator_;
-  Zone* runtime_zone_;
   InnerPointerToCodeCache* inner_pointer_to_code_cache_;
   GlobalHandles* global_handles_;
   EternalHandles* eternal_handles_;
@@ -1339,8 +1343,10 @@
   unibrow::Mapping<unibrow::Ecma262Canonicalize>
       regexp_macro_assembler_canonicalize_;
   RegExpStack* regexp_stack_;
+  List<int> regexp_indices_;
   DateCache* date_cache_;
   CallInterfaceDescriptorData* call_descriptor_data_;
+  AccessCompilerData* access_compiler_data_;
   base::RandomNumberGenerator* random_number_generator_;
   base::AtomicValue<RAILMode> rail_mode_;
 
@@ -1377,6 +1383,8 @@
 
   interpreter::Interpreter* interpreter_;
 
+  CompilerDispatcherTracer* compiler_dispatcher_tracer_;
+
   typedef std::pair<InterruptCallback, void*> InterruptEntry;
   std::queue<InterruptEntry> api_interrupts_queue_;
 
@@ -1409,9 +1417,6 @@
 
   int next_optimization_id_;
 
-  // Counts javascript calls from the API. Wraps around on overflow.
-  unsigned int js_calls_from_api_counter_;
-
 #if TRACE_MAPS
   int next_unique_sfi_id_;
 #endif
diff --git a/src/js/array-iterator.js b/src/js/array-iterator.js
deleted file mode 100644
index 227f733..0000000
--- a/src/js/array-iterator.js
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -----------------------------------------------------------------------
-// Imports
-
-var arrayIterationKindSymbol =
-    utils.ImportNow("array_iteration_kind_symbol");
-var arrayIteratorNextIndexSymbol =
-    utils.ImportNow("array_iterator_next_symbol");
-var arrayIteratorObjectSymbol =
-    utils.ImportNow("array_iterator_object_symbol");
-var GlobalArray = global.Array;
-var IteratorPrototype = utils.ImportNow("IteratorPrototype");
-var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-var GlobalTypedArray = %object_get_prototype_of(global.Uint8Array);
-
-// -----------------------------------------------------------------------
-
-function ArrayIterator() {}
-
-
-// TODO(wingo): Update section numbers when ES6 has stabilized.  The
-// section numbers below are already out of date as of the May 2014
-// draft.
-
-
-// 15.4.5.1 CreateArrayIterator Abstract Operation
-function CreateArrayIterator(array, kind) {
-  var object = TO_OBJECT(array);
-  var iterator = new ArrayIterator;
-  SET_PRIVATE(iterator, arrayIteratorObjectSymbol, object);
-  SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, 0);
-  SET_PRIVATE(iterator, arrayIterationKindSymbol, kind);
-  return iterator;
-}
-
-
-// 22.1.5.2.2 %ArrayIteratorPrototype%[@@iterator]
-function ArrayIteratorIterator() {
-    return this;
-}
-
-
-// ES6 section 22.1.5.2.1 %ArrayIteratorPrototype%.next( )
-function ArrayIteratorNext() {
-  var iterator = this;
-  var value = UNDEFINED;
-  var done = true;
-
-  if (!IS_RECEIVER(iterator) ||
-      !HAS_DEFINED_PRIVATE(iterator, arrayIteratorNextIndexSymbol)) {
-    throw %make_type_error(kIncompatibleMethodReceiver,
-                        'Array Iterator.prototype.next', this);
-  }
-
-  var array = GET_PRIVATE(iterator, arrayIteratorObjectSymbol);
-  if (!IS_UNDEFINED(array)) {
-    var index = GET_PRIVATE(iterator, arrayIteratorNextIndexSymbol);
-    var itemKind = GET_PRIVATE(iterator, arrayIterationKindSymbol);
-    var length = TO_UINT32(array.length);
-
-    // "sparse" is never used.
-
-    if (index >= length) {
-      SET_PRIVATE(iterator, arrayIteratorObjectSymbol, UNDEFINED);
-    } else {
-      SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, index + 1);
-
-      if (itemKind == ITERATOR_KIND_VALUES) {
-        value = array[index];
-      } else if (itemKind == ITERATOR_KIND_ENTRIES) {
-        value = [index, array[index]];
-      } else {
-        value = index;
-      }
-      done = false;
-    }
-  }
-
-  return %_CreateIterResultObject(value, done);
-}
-
-
-function ArrayEntries() {
-  return CreateArrayIterator(this, ITERATOR_KIND_ENTRIES);
-}
-
-
-function ArrayValues() {
-  return CreateArrayIterator(this, ITERATOR_KIND_VALUES);
-}
-
-
-function ArrayKeys() {
-  return CreateArrayIterator(this, ITERATOR_KIND_KEYS);
-}
-
-// TODO(littledan): Check for detached TypedArray in these three methods
-function TypedArrayEntries() {
-  if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
-  return %_Call(ArrayEntries, this);
-}
-
-
-function TypedArrayValues() {
-  if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
-  return %_Call(ArrayValues, this);
-}
-
-
-function TypedArrayKeys() {
-  if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
-  return %_Call(ArrayKeys, this);
-}
-
-
-%FunctionSetPrototype(ArrayIterator, {__proto__: IteratorPrototype});
-%FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator');
-
-utils.InstallFunctions(ArrayIterator.prototype, DONT_ENUM, [
-  'next', ArrayIteratorNext
-]);
-utils.SetFunctionName(ArrayIteratorIterator, iteratorSymbol);
-%AddNamedProperty(ArrayIterator.prototype, toStringTagSymbol,
-                  "Array Iterator", READ_ONLY | DONT_ENUM);
-
-utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
-  // No 'values' since it breaks webcompat: http://crbug.com/409858
-  'entries', ArrayEntries,
-  'keys', ArrayKeys
-]);
-
-// TODO(adam): Remove these calls once 'values' is in the above
-// InstallFunctions block, as they'll be redundant.
-utils.SetFunctionName(ArrayValues, 'values');
-%FunctionRemovePrototype(ArrayValues);
-%SetNativeFlag(ArrayValues);
-
-%AddNamedProperty(GlobalArray.prototype, iteratorSymbol, ArrayValues,
-                  DONT_ENUM);
-
-utils.InstallFunctions(GlobalTypedArray.prototype, DONT_ENUM, [
-  'entries', TypedArrayEntries,
-  'keys', TypedArrayKeys,
-  'values', TypedArrayValues
-]);
-%AddNamedProperty(GlobalTypedArray.prototype,
-                  iteratorSymbol, TypedArrayValues, DONT_ENUM);
-
-// -------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
-  to.ArrayValues = ArrayValues;
-});
-
-%InstallToContext(["array_values_iterator", ArrayValues]);
-
-})
diff --git a/src/js/array.js b/src/js/array.js
index d10e7f1..e23810f 100644
--- a/src/js/array.js
+++ b/src/js/array.js
@@ -1539,6 +1539,8 @@
   return f;
 };
 
+var ArrayValues = getFunction("values", null, 0);
+
 // Set up non-enumerable functions of the Array.prototype object and
 // set their names.
 // Manipulate the length of some of the functions to meet
@@ -1568,9 +1570,14 @@
   "find", getFunction("find", ArrayFind, 1),
   "findIndex", getFunction("findIndex", ArrayFindIndex, 1),
   "fill", getFunction("fill", ArrayFill, 1),
-  "includes", getFunction("includes", null, 1)
+  "includes", getFunction("includes", null, 1),
+  "keys", getFunction("keys", null, 0),
+  "entries", getFunction("entries", null, 0),
+  iteratorSymbol, ArrayValues
 ]);
 
+%FunctionSetName(ArrayValues, "values");
+
 utils.InstallGetter(GlobalArray, speciesSymbol, ArraySpecies);
 
 %FinishArrayPrototypeSetup(GlobalArray.prototype);
@@ -1614,6 +1621,7 @@
   to.ArrayJoin = ArrayJoin;
   to.ArrayPush = ArrayPush;
   to.ArrayToString = ArrayToString;
+  to.ArrayValues = ArrayValues;
   to.InnerArrayCopyWithin = InnerArrayCopyWithin;
   to.InnerArrayEvery = InnerArrayEvery;
   to.InnerArrayFill = InnerArrayFill;
@@ -1638,6 +1646,7 @@
   "array_splice", ArraySplice,
   "array_slice", ArraySlice,
   "array_unshift", ArrayUnshift,
+  "array_values_iterator", ArrayValues,
 ]);
 
 });
diff --git a/src/js/async-await.js b/src/js/async-await.js
index b733f3d..a1cac0d 100644
--- a/src/js/async-await.js
+++ b/src/js/async-await.js
@@ -30,7 +30,6 @@
   NewPromiseCapability = from.NewPromiseCapability;
   PerformPromiseThen = from.PerformPromiseThen;
   PromiseCreate = from.PromiseCreate;
-  PromiseNextMicrotaskID = from.PromiseNextMicrotaskID;
   RejectPromise = from.RejectPromise;
   ResolvePromise = from.ResolvePromise;
 });
@@ -143,13 +142,9 @@
     %DebugPushPromise(promise);
     // Assign ID and create a recurring task to save stack for future
     // resumptions from await.
-    var id = PromiseNextMicrotaskID();
+    var id = %DebugNextMicrotaskId();
     SET_PRIVATE(promise, promiseAsyncStackIDSymbol, id);
-    %DebugAsyncTaskEvent({
-      type: "enqueueRecurring",
-      id: id,
-      name: "async function",
-    });
+    %DebugAsyncTaskEvent("enqueueRecurring", id, "async function");
   }
   return promise;
 }
@@ -158,11 +153,12 @@
   if (DEBUG_IS_ACTIVE) {
     // Cancel
     var id = GET_PRIVATE(promise, promiseAsyncStackIDSymbol);
-    %DebugAsyncTaskEvent({
-      type: "cancel",
-      id: id,
-      name: "async function",
-    });
+
+    // Don't send invalid events when catch prediction is turned on in
+    // the middle of some async operation.
+    if (!IS_UNDEFINED(id)) {
+      %DebugAsyncTaskEvent("cancel", id, "async function");
+    }
     // Pop the Promise under construction in an async function on
     // from catch prediction stack.
     %DebugPopPromise();
diff --git a/src/js/collection.js b/src/js/collection.js
index 6fe880d..a4ae904 100644
--- a/src/js/collection.js
+++ b/src/js/collection.js
@@ -14,14 +14,13 @@
 var GlobalObject = global.Object;
 var GlobalSet = global.Set;
 var hashCodeSymbol = utils.ImportNow("hash_code_symbol");
-var MathRandom;
+var MathRandom = global.Math.random;
 var MapIterator;
 var SetIterator;
 var speciesSymbol = utils.ImportNow("species_symbol");
 var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
 
 utils.Import(function(from) {
-  MathRandom = from.MathRandom;
   MapIterator = from.MapIterator;
   SetIterator = from.SetIterator;
 });
diff --git a/src/js/i18n.js b/src/js/i18n.js
index a397849..b051b09 100644
--- a/src/js/i18n.js
+++ b/src/js/i18n.js
@@ -26,23 +26,17 @@
 var InstallFunctions = utils.InstallFunctions;
 var InstallGetter = utils.InstallGetter;
 var InternalArray = utils.InternalArray;
-var InternalRegExpMatch;
-var InternalRegExpReplace
 var ObjectHasOwnProperty = utils.ImportNow("ObjectHasOwnProperty");
 var OverrideFunction = utils.OverrideFunction;
 var patternSymbol = utils.ImportNow("intl_pattern_symbol");
 var resolvedSymbol = utils.ImportNow("intl_resolved_symbol");
 var SetFunctionName = utils.SetFunctionName;
-var StringIndexOf;
 var StringSubstr = GlobalString.prototype.substr;
 var StringSubstring = GlobalString.prototype.substring;
 
 utils.Import(function(from) {
   ArrayJoin = from.ArrayJoin;
   ArrayPush = from.ArrayPush;
-  InternalRegExpMatch = from.InternalRegExpMatch;
-  InternalRegExpReplace = from.InternalRegExpReplace;
-  StringIndexOf = from.StringIndexOf;
 });
 
 // Utilities for definitions
@@ -78,9 +72,10 @@
     if (IS_UNDEFINED(this[internalName])) {
       var boundMethod;
       if (IS_UNDEFINED(length) || length === 2) {
-        boundMethod = ANONYMOUS_FUNCTION((x, y) => implementation(this, x, y));
+        boundMethod =
+          ANONYMOUS_FUNCTION((fst, snd) => implementation(this, fst, snd));
       } else if (length === 1) {
-        boundMethod = ANONYMOUS_FUNCTION(x => implementation(this, x));
+        boundMethod = ANONYMOUS_FUNCTION(fst => implementation(this, fst));
       } else {
         boundMethod = ANONYMOUS_FUNCTION((...args) => {
           // DateTimeFormat.format needs to be 0 arg method, but can still
@@ -250,7 +245,7 @@
  * Parameter locales is treated as a priority list.
  */
 function supportedLocalesOf(service, locales, options) {
-  if (IS_NULL(InternalRegExpMatch(GetServiceRE(), service))) {
+  if (IS_NULL(%regexp_internal_match(GetServiceRE(), service))) {
     throw %make_error(kWrongServiceType, service);
   }
 
@@ -298,7 +293,7 @@
   var matchedLocales = new InternalArray();
   for (var i = 0; i < requestedLocales.length; ++i) {
     // Remove -u- extension.
-    var locale = InternalRegExpReplace(
+    var locale = %RegExpInternalReplace(
         GetUnicodeExtensionRE(), requestedLocales[i], '');
     do {
       if (!IS_UNDEFINED(availableLocales[locale])) {
@@ -408,7 +403,7 @@
  * lookup algorithm.
  */
 function lookupMatcher(service, requestedLocales) {
-  if (IS_NULL(InternalRegExpMatch(GetServiceRE(), service))) {
+  if (IS_NULL(%regexp_internal_match(GetServiceRE(), service))) {
     throw %make_error(kWrongServiceType, service);
   }
 
@@ -419,12 +414,12 @@
 
   for (var i = 0; i < requestedLocales.length; ++i) {
     // Remove all extensions.
-    var locale = InternalRegExpReplace(
+    var locale = %RegExpInternalReplace(
         GetAnyExtensionRE(), requestedLocales[i], '');
     do {
       if (!IS_UNDEFINED(AVAILABLE_LOCALES[service][locale])) {
         // Return the resolved locale and extension.
-        var extensionMatch = InternalRegExpMatch(
+        var extensionMatch = %regexp_internal_match(
             GetUnicodeExtensionRE(), requestedLocales[i]);
         var extension = IS_NULL(extensionMatch) ? '' : extensionMatch[0];
         return {'locale': locale, 'extension': extension, 'position': i};
@@ -622,7 +617,7 @@
 
   // Preserve extensions of resolved locale, but swap base tags with original.
   var resolvedBase = new GlobalRegExp('^' + locales[1].base, 'g');
-  return InternalRegExpReplace(resolvedBase, resolved, locales[0].base);
+  return %RegExpInternalReplace(resolvedBase, resolved, locales[0].base);
 }
 
 
@@ -637,7 +632,7 @@
 
   for (var i in available) {
     if (HAS_OWN_PROPERTY(available, i)) {
-      var parts = InternalRegExpMatch(
+      var parts = %regexp_internal_match(
           /^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/, i);
       if (!IS_NULL(parts)) {
         // Build xx-ZZ. We don't care about the actual value,
@@ -709,7 +704,7 @@
  * 'of', 'au' and 'es' are special-cased and lowercased.
  */
 function toTitleCaseTimezoneLocation(location) {
-  var match = InternalRegExpMatch(GetTimezoneNameLocationPartRE(), location)
+  var match = %regexp_internal_match(GetTimezoneNameLocationPartRE(), location)
   if (IS_NULL(match)) throw %make_range_error(kExpectedLocation, location);
 
   var result = toTitleCaseWord(match[1]);
@@ -744,7 +739,7 @@
   // Optimize for the most common case; a language code alone in
   // the canonical form/lowercase (e.g. "en", "fil").
   if (IS_STRING(localeID) &&
-      !IS_NULL(InternalRegExpMatch(/^[a-z]{2,3}$/, localeID))) {
+      !IS_NULL(%regexp_internal_match(/^[a-z]{2,3}$/, localeID))) {
     return localeID;
   }
 
@@ -822,12 +817,12 @@
  */
 function isStructuallyValidLanguageTag(locale) {
   // Check if it's well-formed, including grandfadered tags.
-  if (IS_NULL(InternalRegExpMatch(GetLanguageTagRE(), locale))) {
+  if (IS_NULL(%regexp_internal_match(GetLanguageTagRE(), locale))) {
     return false;
   }
 
   // Just return if it's a x- form. It's all private.
-  if (%_Call(StringIndexOf, locale, 'x-') === 0) {
+  if (%StringIndexOf(locale, 'x-', 0) === 0) {
     return true;
   }
 
@@ -844,7 +839,7 @@
   var parts = %StringSplit(locale, '-', kMaxUint32);
   for (var i = 1; i < parts.length; i++) {
     var value = parts[i];
-    if (!IS_NULL(InternalRegExpMatch(GetLanguageVariantRE(), value)) &&
+    if (!IS_NULL(%regexp_internal_match(GetLanguageVariantRE(), value)) &&
         extensions.length === 0) {
       if (%ArrayIndexOf(variants, value, 0) === -1) {
         %_Call(ArrayPush, variants, value);
@@ -853,7 +848,7 @@
       }
     }
 
-    if (!IS_NULL(InternalRegExpMatch(GetLanguageSingletonRE(), value))) {
+    if (!IS_NULL(%regexp_internal_match(GetLanguageSingletonRE(), value))) {
       if (%ArrayIndexOf(extensions, value, 0) === -1) {
         %_Call(ArrayPush, extensions, value);
       } else {
@@ -1122,7 +1117,7 @@
  */
 function isWellFormedCurrencyCode(currency) {
   return typeof currency == "string" && currency.length == 3 &&
-      IS_NULL(InternalRegExpMatch(/[^A-Za-z]/, currency));
+      IS_NULL(%regexp_internal_match(/[^A-Za-z]/, currency));
 }
 
 
@@ -1440,57 +1435,57 @@
  */
 function fromLDMLString(ldmlString) {
   // First remove '' quoted text, so we lose 'Uhr' strings.
-  ldmlString = InternalRegExpReplace(GetQuotedStringRE(), ldmlString, '');
+  ldmlString = %RegExpInternalReplace(GetQuotedStringRE(), ldmlString, '');
 
   var options = {};
-  var match = InternalRegExpMatch(/E{3,5}/, ldmlString);
+  var match = %regexp_internal_match(/E{3,5}/, ldmlString);
   options = appendToDateTimeObject(
       options, 'weekday', match, {EEEEE: 'narrow', EEE: 'short', EEEE: 'long'});
 
-  match = InternalRegExpMatch(/G{3,5}/, ldmlString);
+  match = %regexp_internal_match(/G{3,5}/, ldmlString);
   options = appendToDateTimeObject(
       options, 'era', match, {GGGGG: 'narrow', GGG: 'short', GGGG: 'long'});
 
-  match = InternalRegExpMatch(/y{1,2}/, ldmlString);
+  match = %regexp_internal_match(/y{1,2}/, ldmlString);
   options = appendToDateTimeObject(
       options, 'year', match, {y: 'numeric', yy: '2-digit'});
 
-  match = InternalRegExpMatch(/M{1,5}/, ldmlString);
+  match = %regexp_internal_match(/M{1,5}/, ldmlString);
   options = appendToDateTimeObject(options, 'month', match, {MM: '2-digit',
       M: 'numeric', MMMMM: 'narrow', MMM: 'short', MMMM: 'long'});
 
   // Sometimes we get L instead of M for month - standalone name.
-  match = InternalRegExpMatch(/L{1,5}/, ldmlString);
+  match = %regexp_internal_match(/L{1,5}/, ldmlString);
   options = appendToDateTimeObject(options, 'month', match, {LL: '2-digit',
       L: 'numeric', LLLLL: 'narrow', LLL: 'short', LLLL: 'long'});
 
-  match = InternalRegExpMatch(/d{1,2}/, ldmlString);
+  match = %regexp_internal_match(/d{1,2}/, ldmlString);
   options = appendToDateTimeObject(
       options, 'day', match, {d: 'numeric', dd: '2-digit'});
 
-  match = InternalRegExpMatch(/h{1,2}/, ldmlString);
+  match = %regexp_internal_match(/h{1,2}/, ldmlString);
   if (match !== null) {
     options['hour12'] = true;
   }
   options = appendToDateTimeObject(
       options, 'hour', match, {h: 'numeric', hh: '2-digit'});
 
-  match = InternalRegExpMatch(/H{1,2}/, ldmlString);
+  match = %regexp_internal_match(/H{1,2}/, ldmlString);
   if (match !== null) {
     options['hour12'] = false;
   }
   options = appendToDateTimeObject(
       options, 'hour', match, {H: 'numeric', HH: '2-digit'});
 
-  match = InternalRegExpMatch(/m{1,2}/, ldmlString);
+  match = %regexp_internal_match(/m{1,2}/, ldmlString);
   options = appendToDateTimeObject(
       options, 'minute', match, {m: 'numeric', mm: '2-digit'});
 
-  match = InternalRegExpMatch(/s{1,2}/, ldmlString);
+  match = %regexp_internal_match(/s{1,2}/, ldmlString);
   options = appendToDateTimeObject(
       options, 'second', match, {s: 'numeric', ss: '2-digit'});
 
-  match = InternalRegExpMatch(/z|zzzz/, ldmlString);
+  match = %regexp_internal_match(/z|zzzz/, ldmlString);
   options = appendToDateTimeObject(
       options, 'timeZoneName', match, {z: 'short', zzzz: 'long'});
 
@@ -1819,7 +1814,7 @@
 
   // We expect only _, '-' and / beside ASCII letters.
   // All inputs should conform to Area/Location(/Location)* from now on.
-  var match = InternalRegExpMatch(GetTimezoneNameCheckRE(), tzID);
+  var match = %regexp_internal_match(GetTimezoneNameCheckRE(), tzID);
   if (IS_NULL(match)) throw %make_range_error(kExpectedTimezoneID, tzID);
 
   var result = toTitleCaseTimezoneLocation(match[1]) + '/' +
@@ -2058,7 +2053,7 @@
   }
 
   // StringSplit is slower than this.
-  var pos = %_Call(StringIndexOf, language, '-');
+  var pos = %StringIndexOf(language, '-', 0);
   if (pos != -1) {
     language = %_Call(StringSubstring, language, 0, pos);
   }
diff --git a/src/js/macros.py b/src/js/macros.py
index cdc3d0a..5ad578a 100644
--- a/src/js/macros.py
+++ b/src/js/macros.py
@@ -39,9 +39,6 @@
 # 2^32 - 1
 define kMaxUint32 = 4294967295;
 
-# Native cache ids.
-define STRING_TO_REGEXP_CACHE_ID = 0;
-
 # Type query macros.
 #
 # Note: We have special support for typeof(foo) === 'bar' in the compiler.
@@ -117,36 +114,6 @@
 # Macros implemented in Python.
 python macro CHAR_CODE(str) = ord(str[1]);
 
-# Layout of internal RegExpLastMatchInfo object.
-define REGEXP_NUMBER_OF_CAPTURES = 0;
-define REGEXP_LAST_SUBJECT = 1;
-define REGEXP_LAST_INPUT = 2;
-define REGEXP_FIRST_CAPTURE = 3;
-define CAPTURE0 = 3;  # Aliases REGEXP_FIRST_CAPTURE.
-define CAPTURE1 = 4;
-
-macro NUMBER_OF_CAPTURES(array) = ((array)[REGEXP_NUMBER_OF_CAPTURES]);
-macro LAST_SUBJECT(array) = ((array)[REGEXP_LAST_SUBJECT]);
-macro LAST_INPUT(array) = ((array)[REGEXP_LAST_INPUT]);
-macro CAPTURE(index) = (REGEXP_FIRST_CAPTURE + (index));
-
-# Macros for internal slot access.
-macro REGEXP_GLOBAL(regexp) = (%_RegExpFlags(regexp) & 1);
-macro REGEXP_IGNORE_CASE(regexp) = (%_RegExpFlags(regexp) & 2);
-macro REGEXP_MULTILINE(regexp) = (%_RegExpFlags(regexp) & 4);
-macro REGEXP_STICKY(regexp) = (%_RegExpFlags(regexp) & 8);
-macro REGEXP_UNICODE(regexp) = (%_RegExpFlags(regexp) & 16);
-macro REGEXP_SOURCE(regexp) = (%_RegExpSource(regexp));
-
-# For the regexp capture override array.  This has the same
-# format as the arguments to a function called from
-# String.prototype.replace.
-macro OVERRIDE_MATCH(override) = ((override)[0]);
-macro OVERRIDE_POS(override) = ((override)[(override).length - 2]);
-macro OVERRIDE_SUBJECT(override) = ((override)[(override).length - 1]);
-# 1-based so index of 1 returns the first capture
-macro OVERRIDE_CAPTURE(override, index) = ((override)[(index)]);
-
 # For messages.js
 # Matches Script::Type from objects.h
 define TYPE_NATIVE = 0;
diff --git a/src/js/math.js b/src/js/math.js
deleted file mode 100644
index 346da24..0000000
--- a/src/js/math.js
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-// The first two slots are reserved to persist PRNG state.
-define kRandomNumberStart = 2;
-
-var GlobalMath = global.Math;
-var NaN = %GetRootNaN();
-var nextRandomIndex = 0;
-var randomNumbers = UNDEFINED;
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-
-//-------------------------------------------------------------------
-// ECMA 262 - 15.8.2.14
-function MathRandom() {
-  // While creating a startup snapshot, %GenerateRandomNumbers returns a
-  // normal array containing a single random number, and has to be called for
-  // every new random number.
-  // Otherwise, it returns a pre-populated typed array of random numbers. The
-  // first two elements are reserved for the PRNG state.
-  if (nextRandomIndex <= kRandomNumberStart) {
-    randomNumbers = %GenerateRandomNumbers(randomNumbers);
-    if (%_IsTypedArray(randomNumbers)) {
-      nextRandomIndex = %_TypedArrayGetLength(randomNumbers);
-    } else {
-      nextRandomIndex = randomNumbers.length;
-    }
-  }
-  return randomNumbers[--nextRandomIndex];
-}
-
-// -------------------------------------------------------------------
-
-%AddNamedProperty(GlobalMath, toStringTagSymbol, "Math", READ_ONLY | DONT_ENUM);
-
-// Set up non-enumerable functions of the Math object and
-// set their names.
-utils.InstallFunctions(GlobalMath, DONT_ENUM, [
-  "random", MathRandom,
-]);
-
-%SetForceInlineFlag(MathRandom);
-
-// -------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
-  to.MathRandom = MathRandom;
-});
-
-})
diff --git a/src/js/prologue.js b/src/js/prologue.js
index 8a07a4c..dba77d7 100644
--- a/src/js/prologue.js
+++ b/src/js/prologue.js
@@ -181,10 +181,7 @@
 
   // Whitelist of exports from normal natives to experimental natives and debug.
   var expose_list = [
-    "ArrayToString",
     "FormatDateToParts",
-    "GetIterator",
-    "GetMethod",
     "MapEntries",
     "MapIterator",
     "MapIteratorNext",
@@ -196,26 +193,12 @@
     "ToLocaleLowerCaseI18N",
     "ToLocaleUpperCaseI18N",
     "ToLowerCaseI18N",
-    "ToPositiveInteger",
     "ToUpperCaseI18N",
     // From runtime:
-    "is_concat_spreadable_symbol",
-    "iterator_symbol",
-    "object_freeze",
-    "object_is_frozen",
-    "object_is_sealed",
     "promise_result_symbol",
     "promise_state_symbol",
     "reflect_apply",
-    "reflect_construct",
-    "regexp_flags_symbol",
     "to_string_tag_symbol",
-    "object_to_string",
-    "species_symbol",
-    "match_symbol",
-    "replace_symbol",
-    "search_symbol",
-    "split_symbol",
   ];
 
   var filtered_exports = {};
diff --git a/src/js/promise.js b/src/js/promise.js
index 793d60f..0b37c64 100644
--- a/src/js/promise.js
+++ b/src/js/promise.js
@@ -24,8 +24,8 @@
     utils.ImportNow("promise_reject_reactions_symbol");
 var promiseFulfillReactionsSymbol =
     utils.ImportNow("promise_fulfill_reactions_symbol");
-var promiseDeferredReactionsSymbol =
-    utils.ImportNow("promise_deferred_reactions_symbol");
+var promiseDeferredReactionSymbol =
+    utils.ImportNow("promise_deferred_reaction_symbol");
 var promiseHandledHintSymbol =
     utils.ImportNow("promise_handled_hint_symbol");
 var promiseRawSymbol = utils.ImportNow("promise_raw_symbol");
@@ -44,44 +44,13 @@
 // -------------------------------------------------------------------
 
 // [[PromiseState]] values:
+// These values should be kept in sync with PromiseStatus in globals.h
 const kPending = 0;
 const kFulfilled = +1;
-const kRejected = -1;
+const kRejected = +2;
 
-var lastMicrotaskId = 0;
-
-function PromiseNextMicrotaskID() {
-  return ++lastMicrotaskId;
-}
-
-// ES#sec-createresolvingfunctions
-// CreateResolvingFunctions ( promise )
-function CreateResolvingFunctions(promise, debugEvent) {
-  var alreadyResolved = false;
-
-  // ES#sec-promise-resolve-functions
-  // Promise Resolve Functions
-  var resolve = value => {
-    if (alreadyResolved === true) return;
-    alreadyResolved = true;
-    ResolvePromise(promise, value);
-  };
-
-  // ES#sec-promise-reject-functions
-  // Promise Reject Functions
-  var reject = reason => {
-    if (alreadyResolved === true) return;
-    alreadyResolved = true;
-    RejectPromise(promise, reason, debugEvent);
-  };
-
-  return {
-    __proto__: null,
-    resolve: resolve,
-    reject: reject
-  };
-}
-
+const kResolveCallback = 0;
+const kRejectCallback = 1;
 
 // ES#sec-promise-executor
 // Promise ( executor )
@@ -96,13 +65,15 @@
 
   var promise = PromiseInit(%_NewObject(GlobalPromise, new.target));
   // Calling the reject function would be a new exception, so debugEvent = true
-  var callbacks = CreateResolvingFunctions(promise, true);
+  // TODO(gsathya): Remove container for callbacks when this is moved
+  // to CPP/TF.
+  var callbacks = %create_resolving_functions(promise, true);
   var debug_is_active = DEBUG_IS_ACTIVE;
   try {
     if (debug_is_active) %DebugPushPromise(promise);
-    executor(callbacks.resolve, callbacks.reject);
+    executor(callbacks[kResolveCallback], callbacks[kRejectCallback]);
   } %catch (e) {  // Natives syntax to mark this catch block.
-    %_Call(callbacks.reject, UNDEFINED, e);
+    %_Call(callbacks[kRejectCallback], UNDEFINED, e);
   } finally {
     if (debug_is_active) %DebugPopPromise();
   }
@@ -128,16 +99,11 @@
   SET_PRIVATE(promise, promiseFulfillReactionsSymbol, UNDEFINED);
   SET_PRIVATE(promise, promiseRejectReactionsSymbol, UNDEFINED);
 
-  // There are 2 possible states for this symbol --
-  // 1) UNDEFINED -- This is the zero state, no deferred object is
-  // attached to this symbol. When we want to add a new deferred we
-  // directly attach it to this symbol.
-  // 2) symbol with attached deferred object -- New deferred objects
-  // are not attached to this symbol, but instead they are directly
-  // attached to the resolve, reject callback arrays. At this point,
-  // the deferred symbol's state is stale, and the deferreds should be
-  // read from the reject, resolve callbacks.
-  SET_PRIVATE(promise, promiseDeferredReactionsSymbol, UNDEFINED);
+  // This symbol is used only when one deferred needs to be attached. When more
+  // than one deferred need to be attached the promise, we attach them directly
+  // to the promiseFulfillReactionsSymbol and promiseRejectReactionsSymbol and
+  // reset this back to UNDEFINED.
+  SET_PRIVATE(promise, promiseDeferredReactionSymbol, UNDEFINED);
 
   return promise;
 }
@@ -153,47 +119,35 @@
   return PromiseSet(promise, kPending, UNDEFINED);
 }
 
-function FulfillPromise(promise, status, value, promiseQueue) {
-  if (GET_PRIVATE(promise, promiseStateSymbol) === kPending) {
-    var tasks = GET_PRIVATE(promise, promiseQueue);
-    if (!IS_UNDEFINED(tasks)) {
-      var deferreds = GET_PRIVATE(promise, promiseDeferredReactionsSymbol);
-      PromiseEnqueue(value, tasks, deferreds, status);
-    }
-    PromiseSet(promise, status, value);
-  }
-}
-
 function PromiseHandle(value, handler, deferred) {
   var debug_is_active = DEBUG_IS_ACTIVE;
   try {
     if (debug_is_active) %DebugPushPromise(deferred.promise);
     var result = handler(value);
-    deferred.resolve(result);
+    if (IS_UNDEFINED(deferred.resolve)) {
+      ResolvePromise(deferred.promise, result);
+    } else {
+      %_Call(deferred.resolve, UNDEFINED, result);
+    }
   } %catch (exception) {  // Natives syntax to mark this catch block.
-    try { deferred.reject(exception); } catch (e) { }
+    try {
+      if (IS_UNDEFINED(deferred.reject)) {
+        // Pass false for debugEvent so .then chaining does not trigger
+        // redundant ExceptionEvents.
+        %PromiseReject(deferred.promise, exception, false);
+        PromiseSet(deferred.promise, kRejected, exception);
+      } else {
+        %_Call(deferred.reject, UNDEFINED, exception);
+      }
+    } catch (e) { }
   } finally {
     if (debug_is_active) %DebugPopPromise();
   }
 }
 
-function PromiseEnqueue(value, tasks, deferreds, status) {
+function PromiseDebugGetInfo(deferreds, status) {
   var id, name, instrumenting = DEBUG_IS_ACTIVE;
-  %EnqueueMicrotask(function() {
-    if (instrumenting) {
-      %DebugAsyncTaskEvent({ type: "willHandle", id: id, name: name });
-    }
-    if (IS_ARRAY(tasks)) {
-      for (var i = 0; i < tasks.length; i += 2) {
-        PromiseHandle(value, tasks[i], tasks[i + 1]);
-      }
-    } else {
-      PromiseHandle(value, tasks, deferreds);
-    }
-    if (instrumenting) {
-      %DebugAsyncTaskEvent({ type: "didHandle", id: id, name: name });
-    }
-  });
+
   if (instrumenting) {
     // In an async function, reuse the existing stack related to the outer
     // Promise. Otherwise, e.g. in a direct call to then, save a new stack.
@@ -209,11 +163,12 @@
                        promiseAsyncStackIDSymbol);
       name = "async function";
     } else {
-      id = PromiseNextMicrotaskID();
+      id = %DebugNextMicrotaskId();
       name = status === kFulfilled ? "Promise.resolve" : "Promise.reject";
-      %DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name });
+      %DebugAsyncTaskEvent("enqueue", id, name);
     }
   }
+  return [id, name];
 }
 
 function PromiseAttachCallbacks(promise, deferred, onResolve, onReject) {
@@ -222,11 +177,11 @@
   if (IS_UNDEFINED(maybeResolveCallbacks)) {
     SET_PRIVATE(promise, promiseFulfillReactionsSymbol, onResolve);
     SET_PRIVATE(promise, promiseRejectReactionsSymbol, onReject);
-    SET_PRIVATE(promise, promiseDeferredReactionsSymbol, deferred);
+    SET_PRIVATE(promise, promiseDeferredReactionSymbol, deferred);
   } else if (!IS_ARRAY(maybeResolveCallbacks)) {
     var resolveCallbacks = new InternalArray();
     var rejectCallbacks = new InternalArray();
-    var existingDeferred = GET_PRIVATE(promise, promiseDeferredReactionsSymbol);
+    var existingDeferred = GET_PRIVATE(promise, promiseDeferredReactionSymbol);
 
     resolveCallbacks.push(
         maybeResolveCallbacks, existingDeferred, onResolve, deferred);
@@ -237,7 +192,7 @@
 
     SET_PRIVATE(promise, promiseFulfillReactionsSymbol, resolveCallbacks);
     SET_PRIVATE(promise, promiseRejectReactionsSymbol, rejectCallbacks);
-    SET_PRIVATE(promise, promiseDeferredReactionsSymbol, UNDEFINED);
+    SET_PRIVATE(promise, promiseDeferredReactionSymbol, UNDEFINED);
   } else {
     maybeResolveCallbacks.push(onResolve, deferred);
     GET_PRIVATE(promise, promiseRejectReactionsSymbol).push(onReject, deferred);
@@ -266,16 +221,19 @@
 // Promise Resolve Functions, steps 6-13
 function ResolvePromise(promise, resolution) {
   if (resolution === promise) {
-    return RejectPromise(promise,
-                         %make_type_error(kPromiseCyclic, resolution),
-                         true);
+    var exception = %make_type_error(kPromiseCyclic, resolution);
+    %PromiseReject(promise, exception, true);
+    PromiseSet(promise, kRejected, exception);
+    return;
   }
   if (IS_RECEIVER(resolution)) {
     // 25.4.1.3.2 steps 8-12
     try {
       var then = resolution.then;
     } catch (e) {
-      return RejectPromise(promise, e, true);
+      %PromiseReject(promise, e, true);
+      PromiseSet(promise, kRejected, e);
+      return;
     }
 
     // Resolution is a native promise and if it's already resolved or
@@ -287,8 +245,9 @@
         // This goes inside the if-else to save one symbol lookup in
         // the slow path.
         var thenableValue = GET_PRIVATE(resolution, promiseResultSymbol);
-        FulfillPromise(promise, kFulfilled, thenableValue,
+        %PromiseFulfill(promise, kFulfilled, thenableValue,
                        promiseFulfillReactionsSymbol);
+        PromiseSet(promise, kFulfilled, thenableValue);
         SET_PRIVATE(promise, promiseHasHandlerSymbol, true);
         return;
       } else if (thenableState === kRejected) {
@@ -299,70 +258,37 @@
           %PromiseRevokeReject(resolution);
         }
         // Don't cause a debug event as this case is forwarding a rejection
-        RejectPromise(promise, thenableValue, false);
+        %PromiseReject(promise, thenableValue, false);
+        PromiseSet(promise, kRejected, thenableValue);
         SET_PRIVATE(resolution, promiseHasHandlerSymbol, true);
         return;
       }
     }
 
     if (IS_CALLABLE(then)) {
-      var callbacks = CreateResolvingFunctions(promise, false);
-      var id, before_debug_event, after_debug_event;
-      var instrumenting = DEBUG_IS_ACTIVE;
-      if (instrumenting) {
-        if (IsPromise(resolution)) {
+      if (DEBUG_IS_ACTIVE && IsPromise(resolution)) {
           // Mark the dependency of the new promise on the resolution
-          SET_PRIVATE(resolution, promiseHandledBySymbol, promise);
-        }
-        id = PromiseNextMicrotaskID();
-        before_debug_event = {
-          type: "willHandle",
-          id: id,
-          name: "PromiseResolveThenableJob"
-        };
-        after_debug_event = {
-          type: "didHandle",
-          id: id,
-          name: "PromiseResolveThenableJob"
-        };
-        %DebugAsyncTaskEvent({
-          type: "enqueue",
-          id: id,
-          name: "PromiseResolveThenableJob"
-        });
+        SET_PRIVATE(resolution, promiseHandledBySymbol, promise);
       }
-      %EnqueuePromiseResolveThenableJob(
-          resolution, then, callbacks.resolve, callbacks.reject,
-          before_debug_event, after_debug_event);
+      %EnqueuePromiseResolveThenableJob(promise, resolution, then);
       return;
     }
   }
-  FulfillPromise(promise, kFulfilled, resolution,
-                 promiseFulfillReactionsSymbol);
+  %PromiseFulfill(promise, kFulfilled, resolution,
+                  promiseFulfillReactionsSymbol);
+  PromiseSet(promise, kFulfilled, resolution);
 }
 
-// ES#sec-rejectpromise
-// RejectPromise ( promise, reason )
+// Only used by async-await.js
 function RejectPromise(promise, reason, debugEvent) {
-  // Check promise status to confirm that this reject has an effect.
-  // Call runtime for callbacks to the debugger or for unhandled reject.
-  // The debugEvent parameter sets whether a debug ExceptionEvent should
-  // be triggered. It should be set to false when forwarding a rejection
-  // rather than creating a new one.
-  if (GET_PRIVATE(promise, promiseStateSymbol) === kPending) {
-    // This check is redundant with checks in the runtime, but it may help
-    // avoid unnecessary runtime calls.
-    if ((debugEvent && DEBUG_IS_ACTIVE) ||
-        !HAS_DEFINED_PRIVATE(promise, promiseHasHandlerSymbol)) {
-      %PromiseRejectEvent(promise, reason, debugEvent);
-    }
-  }
-  FulfillPromise(promise, kRejected, reason, promiseRejectReactionsSymbol)
+  %PromiseReject(promise, reason, debugEvent);
+  PromiseSet(promise, kRejected, reason);
 }
 
 // Export to bindings
 function DoRejectPromise(promise, reason) {
-  return RejectPromise(promise, reason, true);
+  %PromiseReject(promise, reason, true);
+  PromiseSet(promise, kRejected, reason);
 }
 
 // ES#sec-newpromisecapability
@@ -371,11 +297,13 @@
   if (C === GlobalPromise) {
     // Optimized case, avoid extra closure.
     var promise = PromiseCreate();
-    var callbacks = CreateResolvingFunctions(promise, debugEvent);
+    // TODO(gsathya): Remove container for callbacks when this is
+    // moved to CPP/TF.
+    var callbacks = %create_resolving_functions(promise, debugEvent);
     return {
       promise: promise,
-      resolve: callbacks.resolve,
-      reject: callbacks.reject
+      resolve: callbacks[kResolveCallback],
+      reject: callbacks[kRejectCallback]
     };
   }
 
@@ -423,8 +351,8 @@
       PromiseAttachCallbacks(promise, resultCapability, onResolve, onReject);
       break;
     case kFulfilled:
-      PromiseEnqueue(GET_PRIVATE(promise, promiseResultSymbol),
-                     onResolve, resultCapability, kFulfilled);
+      %EnqueuePromiseReactionJob(GET_PRIVATE(promise, promiseResultSymbol),
+                                 onResolve, resultCapability, kFulfilled);
       break;
     case kRejected:
       if (!HAS_DEFINED_PRIVATE(promise, promiseHasHandlerSymbol)) {
@@ -432,8 +360,8 @@
         // Revoke previously triggered reject event.
         %PromiseRevokeReject(promise);
       }
-      PromiseEnqueue(GET_PRIVATE(promise, promiseResultSymbol),
-                     onReject, resultCapability, kRejected);
+      %EnqueuePromiseReactionJob(GET_PRIVATE(promise, promiseResultSymbol),
+                                 onReject, resultCapability, kRejected);
       break;
   }
 
@@ -452,9 +380,23 @@
   }
 
   var constructor = SpeciesConstructor(this, GlobalPromise);
-  // Pass false for debugEvent so .then chaining does not trigger
-  // redundant ExceptionEvents.
-  var resultCapability = NewPromiseCapability(constructor, false);
+  var resultCapability;
+
+  // The resultCapability.promise is only ever fulfilled internally,
+  // so we don't need the closures to protect against accidentally
+  // calling them multiple times.
+  if (constructor === GlobalPromise) {
+    // TODO(gsathya): Combine this into NewPromiseCapability.
+    resultCapability = {
+      promise: PromiseCreate(),
+      resolve: UNDEFINED,
+      reject: UNDEFINED
+    };
+  } else {
+    // Pass false for debugEvent so .then chaining does not trigger
+    // redundant ExceptionEvents.
+    resultCapability = NewPromiseCapability(constructor, false);
+  }
   return PerformPromiseThen(this, onResolve, onReject, resultCapability);
 }
 
@@ -477,13 +419,13 @@
   // Avoid creating resolving functions.
   if (this === GlobalPromise) {
     var promise = PromiseCreate();
-    var resolveResult = ResolvePromise(promise, x);
+    ResolvePromise(promise, x);
     return promise;
   }
 
   // debugEvent is not so meaningful here as it will be resolved
   var promiseCapability = NewPromiseCapability(this, true);
-  var resolveResult = %_Call(promiseCapability.resolve, UNDEFINED, x);
+  %_Call(promiseCapability.resolve, UNDEFINED, x);
   return promiseCapability.promise;
 }
 
@@ -580,7 +522,7 @@
       }
     }
   } catch (e) {
-    deferred.reject(e)
+    %_Call(deferred.reject, UNDEFINED, e);
   }
   return deferred.promise;
 }
@@ -620,12 +562,12 @@
   }
 
   var queue = GET_PRIVATE(promise, promiseRejectReactionsSymbol);
-  var deferreds = GET_PRIVATE(promise, promiseDeferredReactionsSymbol);
+  var deferred = GET_PRIVATE(promise, promiseDeferredReactionSymbol);
 
   if (IS_UNDEFINED(queue)) return false;
 
   if (!IS_ARRAY(queue)) {
-    return PromiseHasUserDefinedRejectHandlerCheck(queue, deferreds);
+    return PromiseHasUserDefinedRejectHandlerCheck(queue, deferred);
   }
 
   for (var i = 0; i < queue.length; i += 2) {
@@ -645,6 +587,10 @@
   return PromiseHasUserDefinedRejectHandlerRecursive(this);
 };
 
+function MarkPromiseAsHandled(promise) {
+  SET_PRIVATE(promise, promiseHasHandlerSymbol, true);
+}
+
 
 function PromiseSpecies() {
   return this;
@@ -676,8 +622,12 @@
   "promise_create", PromiseCreate,
   "promise_has_user_defined_reject_handler", PromiseHasUserDefinedRejectHandler,
   "promise_reject", DoRejectPromise,
+  // TODO(gsathya): Remove this once we update the promise builtin.
+  "promise_internal_reject", RejectPromise,
   "promise_resolve", ResolvePromise,
-  "promise_then", PromiseThen
+  "promise_then", PromiseThen,
+  "promise_handle", PromiseHandle,
+  "promise_debug_get_info", PromiseDebugGetInfo
 ]);
 
 // This allows extras to create promises quickly without building extra
@@ -686,14 +636,14 @@
 utils.InstallFunctions(extrasUtils, 0, [
   "createPromise", PromiseCreate,
   "resolvePromise", ResolvePromise,
-  "rejectPromise", DoRejectPromise
+  "rejectPromise", DoRejectPromise,
+  "markPromiseAsHandled", MarkPromiseAsHandled
 ]);
 
 utils.Export(function(to) {
   to.IsPromise = IsPromise;
   to.PromiseCreate = PromiseCreate;
   to.PromiseThen = PromiseThen;
-  to.PromiseNextMicrotaskID = PromiseNextMicrotaskID;
 
   to.GlobalPromise = GlobalPromise;
   to.NewPromiseCapability = NewPromiseCapability;
diff --git a/src/js/regexp.js b/src/js/regexp.js
deleted file mode 100644
index 49da45b..0000000
--- a/src/js/regexp.js
+++ /dev/null
@@ -1,1058 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-'use strict';
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalArray = global.Array;
-var GlobalObject = global.Object;
-var GlobalRegExp = global.RegExp;
-var GlobalRegExpPrototype = GlobalRegExp.prototype;
-var InternalArray = utils.InternalArray;
-var InternalPackedArray = utils.InternalPackedArray;
-var MaxSimple;
-var MinSimple;
-var RegExpExecJS = GlobalRegExp.prototype.exec;
-var matchSymbol = utils.ImportNow("match_symbol");
-var replaceSymbol = utils.ImportNow("replace_symbol");
-var searchSymbol = utils.ImportNow("search_symbol");
-var speciesSymbol = utils.ImportNow("species_symbol");
-var splitSymbol = utils.ImportNow("split_symbol");
-var SpeciesConstructor;
-
-utils.Import(function(from) {
-  MaxSimple = from.MaxSimple;
-  MinSimple = from.MinSimple;
-  SpeciesConstructor = from.SpeciesConstructor;
-});
-
-// -------------------------------------------------------------------
-
-// Property of the builtins object for recording the result of the last
-// regexp match.  The property RegExpLastMatchInfo includes the matchIndices
-// array of the last successful regexp match (an array of start/end index
-// pairs for the match and all the captured substrings), the invariant is
-// that there are at least two capture indices.  The array also contains
-// the subject string for the last successful match.
-// We use a JSObject rather than a JSArray so we don't have to manually update
-// its length.
-var RegExpLastMatchInfo = {
-  REGEXP_NUMBER_OF_CAPTURES: 2,
-  REGEXP_LAST_SUBJECT:       "",
-  REGEXP_LAST_INPUT:         UNDEFINED,  // Settable with RegExpSetInput.
-  CAPTURE0:                  0,
-  CAPTURE1:                  0
-};
-
-// -------------------------------------------------------------------
-
-// ES#sec-isregexp IsRegExp ( argument )
-function IsRegExp(o) {
-  if (!IS_RECEIVER(o)) return false;
-  var is_regexp = o[matchSymbol];
-  if (!IS_UNDEFINED(is_regexp)) return TO_BOOLEAN(is_regexp);
-  return IS_REGEXP(o);
-}
-
-
-// ES#sec-regexpinitialize
-// Runtime Semantics: RegExpInitialize ( obj, pattern, flags )
-function RegExpInitialize(object, pattern, flags) {
-  pattern = IS_UNDEFINED(pattern) ? '' : TO_STRING(pattern);
-  flags = IS_UNDEFINED(flags) ? '' : TO_STRING(flags);
-  %RegExpInitializeAndCompile(object, pattern, flags);
-  return object;
-}
-
-
-function PatternFlags(pattern) {
-  return (REGEXP_GLOBAL(pattern) ? 'g' : '') +
-         (REGEXP_IGNORE_CASE(pattern) ? 'i' : '') +
-         (REGEXP_MULTILINE(pattern) ? 'm' : '') +
-         (REGEXP_UNICODE(pattern) ? 'u' : '') +
-         (REGEXP_STICKY(pattern) ? 'y' : '');
-}
-
-
-// ES#sec-regexp.prototype.compile RegExp.prototype.compile (pattern, flags)
-function RegExpCompileJS(pattern, flags) {
-  if (!IS_REGEXP(this)) {
-    throw %make_type_error(kIncompatibleMethodReceiver,
-                        "RegExp.prototype.compile", this);
-  }
-
-  if (IS_REGEXP(pattern)) {
-    if (!IS_UNDEFINED(flags)) throw %make_type_error(kRegExpFlags);
-
-    flags = PatternFlags(pattern);
-    pattern = REGEXP_SOURCE(pattern);
-  }
-
-  RegExpInitialize(this, pattern, flags);
-
-  // Return undefined for compatibility with JSC.
-  // See http://crbug.com/585775 for web compat details.
-}
-
-
-function DoRegExpExec(regexp, string, index) {
-  return %_RegExpExec(regexp, string, index, RegExpLastMatchInfo);
-}
-
-
-// This is kind of performance sensitive, so we want to avoid unnecessary
-// type checks on inputs. But we also don't want to inline it several times
-// manually, so we use a macro :-)
-macro RETURN_NEW_RESULT_FROM_MATCH_INFO(MATCHINFO, STRING)
-  var numResults = NUMBER_OF_CAPTURES(MATCHINFO) >> 1;
-  var start = MATCHINFO[CAPTURE0];
-  var end = MATCHINFO[CAPTURE1];
-  // Calculate the substring of the first match before creating the result array
-  // to avoid an unnecessary write barrier storing the first result.
-  var first = %_SubString(STRING, start, end);
-  var result = %_RegExpConstructResult(numResults, start, STRING);
-  result[0] = first;
-  if (numResults == 1) return result;
-  var j = REGEXP_FIRST_CAPTURE + 2;
-  for (var i = 1; i < numResults; i++) {
-    start = MATCHINFO[j++];
-    if (start != -1) {
-      end = MATCHINFO[j];
-      result[i] = %_SubString(STRING, start, end);
-    }
-    j++;
-  }
-  return result;
-endmacro
-
-
-
-// ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
-// Also takes an optional exec method in case our caller
-// has already fetched exec.
-function RegExpSubclassExec(regexp, string, exec) {
-  if (IS_UNDEFINED(exec)) {
-    exec = regexp.exec;
-  }
-  if (IS_CALLABLE(exec)) {
-    var result = %_Call(exec, regexp, string);
-    if (!IS_RECEIVER(result) && !IS_NULL(result)) {
-      throw %make_type_error(kInvalidRegExpExecResult);
-    }
-    return result;
-  }
-  return %_Call(RegExpExecJS, regexp, string);
-}
-%SetForceInlineFlag(RegExpSubclassExec);
-
-
-// ES#sec-regexp.prototype.test RegExp.prototype.test ( S )
-function RegExpSubclassTest(string) {
-  if (!IS_RECEIVER(this)) {
-    throw %make_type_error(kIncompatibleMethodReceiver,
-                        'RegExp.prototype.test', this);
-  }
-  string = TO_STRING(string);
-  var match = RegExpSubclassExec(this, string);
-  return !IS_NULL(match);
-}
-%FunctionRemovePrototype(RegExpSubclassTest);
-
-
-function RegExpToString() {
-  if (!IS_RECEIVER(this)) {
-    throw %make_type_error(
-        kIncompatibleMethodReceiver, 'RegExp.prototype.toString', this);
-  }
-  if (this === GlobalRegExpPrototype) {
-    %IncrementUseCounter(kRegExpPrototypeToString);
-  }
-  return '/' + TO_STRING(this.source) + '/' + TO_STRING(this.flags);
-}
-
-
-function AtSurrogatePair(subject, index) {
-  if (index + 1 >= subject.length) return false;
-  var first = %_StringCharCodeAt(subject, index);
-  if (first < 0xD800 || first > 0xDBFF) return false;
-  var second = %_StringCharCodeAt(subject, index + 1);
-  return second >= 0xDC00 && second <= 0xDFFF;
-}
-
-
-// Fast path implementation of RegExp.prototype[Symbol.split] which
-// doesn't properly call the underlying exec, @@species methods
-function RegExpSplit(string, limit) {
-  if (!IS_REGEXP(this)) {
-    throw %make_type_error(kIncompatibleMethodReceiver,
-                        "RegExp.prototype.@@split", this);
-  }
-  var separator = this;
-  var subject = TO_STRING(string);
-
-  limit = (IS_UNDEFINED(limit)) ? kMaxUint32 : TO_UINT32(limit);
-  var length = subject.length;
-
-  if (limit === 0) return [];
-
-  if (length === 0) {
-    if (DoRegExpExec(separator, subject, 0, 0) !== null) return [];
-    return [subject];
-  }
-
-  var currentIndex = 0;
-  var startIndex = 0;
-  var startMatch = 0;
-  var result = new InternalArray();
-
-  outer_loop:
-  while (true) {
-    if (startIndex === length) {
-      result[result.length] = %_SubString(subject, currentIndex, length);
-      break;
-    }
-
-    var matchInfo = DoRegExpExec(separator, subject, startIndex);
-    if (matchInfo === null || length === (startMatch = matchInfo[CAPTURE0])) {
-      result[result.length] = %_SubString(subject, currentIndex, length);
-      break;
-    }
-    var endIndex = matchInfo[CAPTURE1];
-
-    // We ignore a zero-length match at the currentIndex.
-    if (startIndex === endIndex && endIndex === currentIndex) {
-      if (REGEXP_UNICODE(this) && AtSurrogatePair(subject, startIndex)) {
-        startIndex += 2;
-      } else {
-        startIndex++;
-      }
-      continue;
-    }
-
-    result[result.length] = %_SubString(subject, currentIndex, startMatch);
-
-    if (result.length === limit) break;
-
-    var matchinfo_len = NUMBER_OF_CAPTURES(matchInfo) + REGEXP_FIRST_CAPTURE;
-    for (var i = REGEXP_FIRST_CAPTURE + 2; i < matchinfo_len; ) {
-      var start = matchInfo[i++];
-      var end = matchInfo[i++];
-      if (end != -1) {
-        result[result.length] = %_SubString(subject, start, end);
-      } else {
-        result[result.length] = UNDEFINED;
-      }
-      if (result.length === limit) break outer_loop;
-    }
-
-    startIndex = currentIndex = endIndex;
-  }
-
-  var array_result = [];
-  %MoveArrayContents(result, array_result);
-  return array_result;
-}
-
-
-// ES#sec-regexp.prototype-@@split
-// RegExp.prototype [ @@split ] ( string, limit )
-function RegExpSubclassSplit(string, limit) {
-  if (!IS_RECEIVER(this)) {
-    throw %make_type_error(kIncompatibleMethodReceiver,
-                        "RegExp.prototype.@@split", this);
-  }
-  string = TO_STRING(string);
-  var constructor = SpeciesConstructor(this, GlobalRegExp);
-  var flags = TO_STRING(this.flags);
-
-  // TODO(adamk): this fast path is wrong as we doesn't ensure that 'exec'
-  // is actually a data property on RegExp.prototype.
-  if (IS_REGEXP(this) && constructor === GlobalRegExp) {
-    var exec = this.exec;
-    if (exec === RegExpExecJS) {
-      return %_Call(RegExpSplit, this, string, limit);
-    }
-  }
-
-  var unicode = %StringIndexOf(flags, 'u', 0) >= 0;
-  var sticky = %StringIndexOf(flags, 'y', 0) >= 0;
-  var newFlags = sticky ? flags : flags + "y";
-  var splitter = new constructor(this, newFlags);
-  var array = new GlobalArray();
-  var arrayIndex = 0;
-  var lim = (IS_UNDEFINED(limit)) ? kMaxUint32 : TO_UINT32(limit);
-  var size = string.length;
-  var prevStringIndex = 0;
-  if (lim === 0) return array;
-  var result;
-  if (size === 0) {
-    result = RegExpSubclassExec(splitter, string);
-    if (IS_NULL(result)) %AddElement(array, 0, string);
-    return array;
-  }
-  var stringIndex = prevStringIndex;
-  while (stringIndex < size) {
-    splitter.lastIndex = stringIndex;
-    result = RegExpSubclassExec(splitter, string);
-    if (IS_NULL(result)) {
-      stringIndex += AdvanceStringIndex(string, stringIndex, unicode);
-    } else {
-      var end = MinSimple(TO_LENGTH(splitter.lastIndex), size);
-      if (end === prevStringIndex) {
-        stringIndex += AdvanceStringIndex(string, stringIndex, unicode);
-      } else {
-        %AddElement(
-            array, arrayIndex,
-            %_SubString(string, prevStringIndex, stringIndex));
-        arrayIndex++;
-        if (arrayIndex === lim) return array;
-        prevStringIndex = end;
-        var numberOfCaptures = MaxSimple(TO_LENGTH(result.length), 0);
-        for (var i = 1; i < numberOfCaptures; i++) {
-          %AddElement(array, arrayIndex, result[i]);
-          arrayIndex++;
-          if (arrayIndex === lim) return array;
-        }
-        stringIndex = prevStringIndex;
-      }
-    }
-  }
-  %AddElement(array, arrayIndex,
-                     %_SubString(string, prevStringIndex, size));
-  return array;
-}
-%FunctionRemovePrototype(RegExpSubclassSplit);
-
-
-// ES#sec-regexp.prototype-@@match
-// RegExp.prototype [ @@match ] ( string )
-function RegExpSubclassMatch(string) {
-  if (!IS_RECEIVER(this)) {
-    throw %make_type_error(kIncompatibleMethodReceiver,
-                        "RegExp.prototype.@@match", this);
-  }
-  string = TO_STRING(string);
-  var global = this.global;
-  if (!global) return RegExpSubclassExec(this, string);
-  var unicode = this.unicode;
-  this.lastIndex = 0;
-  var array = new InternalArray();
-  var n = 0;
-  var result;
-  while (true) {
-    result = RegExpSubclassExec(this, string);
-    if (IS_NULL(result)) {
-      if (n === 0) return null;
-      break;
-    }
-    var matchStr = TO_STRING(result[0]);
-    array[n] = matchStr;
-    if (matchStr === "") SetAdvancedStringIndex(this, string, unicode);
-    n++;
-  }
-  var resultArray = [];
-  %MoveArrayContents(array, resultArray);
-  return resultArray;
-}
-%FunctionRemovePrototype(RegExpSubclassMatch);
-
-
-// Legacy implementation of RegExp.prototype[Symbol.replace] which
-// doesn't properly call the underlying exec method.
-
-// TODO(lrn): This array will survive indefinitely if replace is never
-// called again. However, it will be empty, since the contents are cleared
-// in the finally block.
-var reusableReplaceArray = new InternalArray(4);
-
-// Helper function for replacing regular expressions with the result of a
-// function application in String.prototype.replace.
-function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
-  var resultArray = reusableReplaceArray;
-  if (resultArray) {
-    reusableReplaceArray = null;
-  } else {
-    // Inside a nested replace (replace called from the replacement function
-    // of another replace) or we have failed to set the reusable array
-    // back due to an exception in a replacement function. Create a new
-    // array to use in the future, or until the original is written back.
-    resultArray = new InternalArray(16);
-  }
-  var res = %RegExpExecMultiple(regexp,
-                                subject,
-                                RegExpLastMatchInfo,
-                                resultArray);
-  regexp.lastIndex = 0;
-  if (IS_NULL(res)) {
-    // No matches at all.
-    reusableReplaceArray = resultArray;
-    return subject;
-  }
-  var len = res.length;
-  if (NUMBER_OF_CAPTURES(RegExpLastMatchInfo) == 2) {
-    // If the number of captures is two then there are no explicit captures in
-    // the regexp, just the implicit capture that captures the whole match.  In
-    // this case we can simplify quite a bit and end up with something faster.
-    // The builder will consist of some integers that indicate slices of the
-    // input string and some replacements that were returned from the replace
-    // function.
-    var match_start = 0;
-    for (var i = 0; i < len; i++) {
-      var elem = res[i];
-      if (%_IsSmi(elem)) {
-        // Integers represent slices of the original string.
-        if (elem > 0) {
-          match_start = (elem >> 11) + (elem & 0x7ff);
-        } else {
-          match_start = res[++i] - elem;
-        }
-      } else {
-        var func_result = replace(elem, match_start, subject);
-        // Overwrite the i'th element in the results with the string we got
-        // back from the callback function.
-        res[i] = TO_STRING(func_result);
-        match_start += elem.length;
-      }
-    }
-  } else {
-    for (var i = 0; i < len; i++) {
-      var elem = res[i];
-      if (!%_IsSmi(elem)) {
-        // elem must be an Array.
-        // Use the apply argument as backing for global RegExp properties.
-        var func_result = %reflect_apply(replace, UNDEFINED, elem);
-        // Overwrite the i'th element in the results with the string we got
-        // back from the callback function.
-        res[i] = TO_STRING(func_result);
-      }
-    }
-  }
-  var result = %StringBuilderConcat(res, len, subject);
-  resultArray.length = 0;
-  reusableReplaceArray = resultArray;
-  return result;
-}
-
-
-// Compute the string of a given regular expression capture.
-function CaptureString(string, lastCaptureInfo, index) {
-  // Scale the index.
-  var scaled = index << 1;
-  // Compute start and end.
-  var start = lastCaptureInfo[CAPTURE(scaled)];
-  // If start isn't valid, return undefined.
-  if (start < 0) return;
-  var end = lastCaptureInfo[CAPTURE(scaled + 1)];
-  return %_SubString(string, start, end);
-}
-
-
-function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
-  var matchInfo = DoRegExpExec(regexp, subject, 0);
-  if (IS_NULL(matchInfo)) {
-    regexp.lastIndex = 0;
-    return subject;
-  }
-  var index = matchInfo[CAPTURE0];
-  var result = %_SubString(subject, 0, index);
-  var endOfMatch = matchInfo[CAPTURE1];
-  // Compute the parameter list consisting of the match, captures, index,
-  // and subject for the replace function invocation.
-  // The number of captures plus one for the match.
-  var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;
-  var replacement;
-  if (m == 1) {
-    // No captures, only the match, which is always valid.
-    var s = %_SubString(subject, index, endOfMatch);
-    // Don't call directly to avoid exposing the built-in global object.
-    replacement = replace(s, index, subject);
-  } else {
-    var parameters = new InternalArray(m + 2);
-    for (var j = 0; j < m; j++) {
-      parameters[j] = CaptureString(subject, matchInfo, j);
-    }
-    parameters[j] = index;
-    parameters[j + 1] = subject;
-
-    replacement = %reflect_apply(replace, UNDEFINED, parameters);
-  }
-
-  result += replacement;  // The add method converts to string if necessary.
-  // Can't use matchInfo any more from here, since the function could
-  // overwrite it.
-  return result + %_SubString(subject, endOfMatch, subject.length);
-}
-
-// Wraps access to matchInfo's captures into a format understood by
-// GetSubstitution.
-function MatchInfoCaptureWrapper(matches, subject) {
-  this.length = NUMBER_OF_CAPTURES(matches) >> 1;
-  this.match = matches;
-  this.subject = subject;
-}
-
-MatchInfoCaptureWrapper.prototype.at = function(ix) {
-  const match = this.match;
-  const start = match[CAPTURE(ix << 1)];
-  if (start < 0) return UNDEFINED;
-  return %_SubString(this.subject, start, match[CAPTURE((ix << 1) + 1)]);
-};
-%SetForceInlineFlag(MatchInfoCaptureWrapper.prototype.at);
-
-function ArrayCaptureWrapper(array) {
-  this.length = array.length;
-  this.array = array;
-}
-
-ArrayCaptureWrapper.prototype.at = function(ix) {
-  return this.array[ix];
-};
-%SetForceInlineFlag(ArrayCaptureWrapper.prototype.at);
-
-function RegExpReplace(string, replace) {
-  if (!IS_REGEXP(this)) {
-    throw %make_type_error(kIncompatibleMethodReceiver,
-                        "RegExp.prototype.@@replace", this);
-  }
-  var subject = TO_STRING(string);
-  var search = this;
-
-  if (!IS_CALLABLE(replace)) {
-    replace = TO_STRING(replace);
-
-    if (!REGEXP_GLOBAL(search)) {
-      // Non-global regexp search, string replace.
-      var match = DoRegExpExec(search, subject, 0);
-      if (match == null) {
-        search.lastIndex = 0
-        return subject;
-      }
-      if (replace.length == 0) {
-        return %_SubString(subject, 0, match[CAPTURE0]) +
-               %_SubString(subject, match[CAPTURE1], subject.length)
-      }
-      const captures = new MatchInfoCaptureWrapper(match, subject);
-      const start = match[CAPTURE0];
-      const end = match[CAPTURE1];
-
-      const prefix = %_SubString(subject, 0, start);
-      const matched = %_SubString(subject, start, end);
-      const suffix = %_SubString(subject, end, subject.length);
-
-      return prefix +
-             GetSubstitution(matched, subject, start, captures, replace) +
-             suffix;
-    }
-
-    // Global regexp search, string replace.
-    search.lastIndex = 0;
-    return %StringReplaceGlobalRegExpWithString(
-        subject, search, replace, RegExpLastMatchInfo);
-  }
-
-  if (REGEXP_GLOBAL(search)) {
-    // Global regexp search, function replace.
-    return StringReplaceGlobalRegExpWithFunction(subject, search, replace);
-  }
-  // Non-global regexp search, function replace.
-  return StringReplaceNonGlobalRegExpWithFunction(subject, search, replace);
-}
-
-
-// ES#sec-getsubstitution
-// GetSubstitution(matched, str, position, captures, replacement)
-// Expand the $-expressions in the string and return a new string with
-// the result.
-function GetSubstitution(matched, string, position, captures, replacement) {
-  var matchLength = matched.length;
-  var stringLength = string.length;
-  var capturesLength = captures.length;
-  var tailPos = position + matchLength;
-  var result = "";
-  var pos, expansion, peek, next, scaledIndex, advance, newScaledIndex;
-
-  var next = %StringIndexOf(replacement, '$', 0);
-  if (next < 0) {
-    result += replacement;
-    return result;
-  }
-
-  if (next > 0) result += %_SubString(replacement, 0, next);
-
-  while (true) {
-    expansion = '$';
-    pos = next + 1;
-    if (pos < replacement.length) {
-      peek = %_StringCharCodeAt(replacement, pos);
-      if (peek == 36) {         // $$
-        ++pos;
-        result += '$';
-      } else if (peek == 38) {  // $& - match
-        ++pos;
-        result += matched;
-      } else if (peek == 96) {  // $` - prefix
-        ++pos;
-        result += %_SubString(string, 0, position);
-      } else if (peek == 39) {  // $' - suffix
-        ++pos;
-        result += %_SubString(string, tailPos, stringLength);
-      } else if (peek >= 48 && peek <= 57) {
-        // Valid indices are $1 .. $9, $01 .. $09 and $10 .. $99
-        scaledIndex = (peek - 48);
-        advance = 1;
-        if (pos + 1 < replacement.length) {
-          next = %_StringCharCodeAt(replacement, pos + 1);
-          if (next >= 48 && next <= 57) {
-            newScaledIndex = scaledIndex * 10 + ((next - 48));
-            if (newScaledIndex < capturesLength) {
-              scaledIndex = newScaledIndex;
-              advance = 2;
-            }
-          }
-        }
-        if (scaledIndex != 0 && scaledIndex < capturesLength) {
-          var capture = captures.at(scaledIndex);
-          if (!IS_UNDEFINED(capture)) result += capture;
-          pos += advance;
-        } else {
-          result += '$';
-        }
-      } else {
-        result += '$';
-      }
-    } else {
-      result += '$';
-    }
-
-    // Go the the next $ in the replacement.
-    next = %StringIndexOf(replacement, '$', pos);
-
-    // Return if there are no more $ characters in the replacement. If we
-    // haven't reached the end, we need to append the suffix.
-    if (next < 0) {
-      if (pos < replacement.length) {
-        result += %_SubString(replacement, pos, replacement.length);
-      }
-      return result;
-    }
-
-    // Append substring between the previous and the next $ character.
-    if (next > pos) {
-      result += %_SubString(replacement, pos, next);
-    }
-  }
-  return result;
-}
-
-
-// ES#sec-advancestringindex
-// AdvanceStringIndex ( S, index, unicode )
-function AdvanceStringIndex(string, index, unicode) {
-  var increment = 1;
-  if (unicode) {
-    var first = %_StringCharCodeAt(string, index);
-    if (first >= 0xD800 && first <= 0xDBFF && string.length > index + 1) {
-      var second = %_StringCharCodeAt(string, index + 1);
-      if (second >= 0xDC00 && second <= 0xDFFF) {
-        increment = 2;
-      }
-    }
-  }
-  return increment;
-}
-
-
-function SetAdvancedStringIndex(regexp, string, unicode) {
-  var lastIndex = regexp.lastIndex;
-  regexp.lastIndex = lastIndex +
-                     AdvanceStringIndex(string, lastIndex, unicode);
-}
-
-
-// ES#sec-regexp.prototype-@@replace
-// RegExp.prototype [ @@replace ] ( string, replaceValue )
-function RegExpSubclassReplace(string, replace) {
-  if (!IS_RECEIVER(this)) {
-    throw %make_type_error(kIncompatibleMethodReceiver,
-                        "RegExp.prototype.@@replace", this);
-  }
-  string = TO_STRING(string);
-  var length = string.length;
-  var functionalReplace = IS_CALLABLE(replace);
-  if (!functionalReplace) replace = TO_STRING(replace);
-  var global = TO_BOOLEAN(this.global);
-  if (global) {
-    var unicode = TO_BOOLEAN(this.unicode);
-    this.lastIndex = 0;
-  }
-
-  // TODO(adamk): this fast path is wrong as we doesn't ensure that 'exec'
-  // is actually a data property on RegExp.prototype.
-  var exec;
-  if (IS_REGEXP(this)) {
-    exec = this.exec;
-    if (exec === RegExpExecJS) {
-      return %_Call(RegExpReplace, this, string, replace);
-    }
-  }
-
-  var results = new InternalArray();
-  var result, replacement;
-  while (true) {
-    result = RegExpSubclassExec(this, string, exec);
-    // Ensure exec will be read again on the next loop through.
-    exec = UNDEFINED;
-    if (IS_NULL(result)) {
-      break;
-    } else {
-      results.push(result);
-      if (!global) break;
-      var matchStr = TO_STRING(result[0]);
-      if (matchStr === "") SetAdvancedStringIndex(this, string, unicode);
-    }
-  }
-  var accumulatedResult = "";
-  var nextSourcePosition = 0;
-  for (var i = 0; i < results.length; i++) {
-    result = results[i];
-    var capturesLength = MaxSimple(TO_LENGTH(result.length), 0);
-    var matched = TO_STRING(result[0]);
-    var matchedLength = matched.length;
-    var position = MaxSimple(MinSimple(TO_INTEGER(result.index), length), 0);
-    var captures = new InternalArray();
-    for (var n = 0; n < capturesLength; n++) {
-      var capture = result[n];
-      if (!IS_UNDEFINED(capture)) capture = TO_STRING(capture);
-      captures[n] = capture;
-    }
-    if (functionalReplace) {
-      var parameters = new InternalArray(capturesLength + 2);
-      for (var j = 0; j < capturesLength; j++) {
-        parameters[j] = captures[j];
-      }
-      parameters[j] = position;
-      parameters[j + 1] = string;
-      replacement = %reflect_apply(replace, UNDEFINED, parameters, 0,
-                                   parameters.length);
-    } else {
-      const capturesWrapper = new ArrayCaptureWrapper(captures);
-      replacement = GetSubstitution(matched, string, position, capturesWrapper,
-                                    replace);
-    }
-    if (position >= nextSourcePosition) {
-      accumulatedResult +=
-        %_SubString(string, nextSourcePosition, position) + replacement;
-      nextSourcePosition = position + matchedLength;
-    }
-  }
-  if (nextSourcePosition >= length) return accumulatedResult;
-  return accumulatedResult + %_SubString(string, nextSourcePosition, length);
-}
-%FunctionRemovePrototype(RegExpSubclassReplace);
-
-
-// ES#sec-regexp.prototype-@@search
-// RegExp.prototype [ @@search ] ( string )
-function RegExpSubclassSearch(string) {
-  if (!IS_RECEIVER(this)) {
-    throw %make_type_error(kIncompatibleMethodReceiver,
-                        "RegExp.prototype.@@search", this);
-  }
-  string = TO_STRING(string);
-  var previousLastIndex = this.lastIndex;
-  if (previousLastIndex != 0) this.lastIndex = 0;
-  var result = RegExpSubclassExec(this, string);
-  var currentLastIndex = this.lastIndex;
-  if (currentLastIndex != previousLastIndex) this.lastIndex = previousLastIndex;
-  if (IS_NULL(result)) return -1;
-  return result.index;
-}
-%FunctionRemovePrototype(RegExpSubclassSearch);
-
-
-// Getters for the static properties lastMatch, lastParen, leftContext, and
-// rightContext of the RegExp constructor.  The properties are computed based
-// on the captures array of the last successful match and the subject string
-// of the last successful match.
-function RegExpGetLastMatch() {
-  var regExpSubject = LAST_SUBJECT(RegExpLastMatchInfo);
-  return %_SubString(regExpSubject,
-                     RegExpLastMatchInfo[CAPTURE0],
-                     RegExpLastMatchInfo[CAPTURE1]);
-}
-
-
-function RegExpGetLastParen() {
-  var length = NUMBER_OF_CAPTURES(RegExpLastMatchInfo);
-  if (length <= 2) return '';  // There were no captures.
-  // We match the SpiderMonkey behavior: return the substring defined by the
-  // last pair (after the first pair) of elements of the capture array even if
-  // it is empty.
-  var regExpSubject = LAST_SUBJECT(RegExpLastMatchInfo);
-  var start = RegExpLastMatchInfo[CAPTURE(length - 2)];
-  var end = RegExpLastMatchInfo[CAPTURE(length - 1)];
-  if (start != -1 && end != -1) {
-    return %_SubString(regExpSubject, start, end);
-  }
-  return "";
-}
-
-
-function RegExpGetLeftContext() {
-  var start_index;
-  var subject;
-  start_index = RegExpLastMatchInfo[CAPTURE0];
-  subject = LAST_SUBJECT(RegExpLastMatchInfo);
-  return %_SubString(subject, 0, start_index);
-}
-
-
-function RegExpGetRightContext() {
-  var start_index;
-  var subject;
-  start_index = RegExpLastMatchInfo[CAPTURE1];
-  subject = LAST_SUBJECT(RegExpLastMatchInfo);
-  return %_SubString(subject, start_index, subject.length);
-}
-
-
-// The properties $1..$9 are the first nine capturing substrings of the last
-// successful match, or ''.  The function RegExpMakeCaptureGetter will be
-// called with indices from 1 to 9.
-function RegExpMakeCaptureGetter(n) {
-  return function foo() {
-    var index = n * 2;
-    if (index >= NUMBER_OF_CAPTURES(RegExpLastMatchInfo)) return '';
-    var matchStart = RegExpLastMatchInfo[CAPTURE(index)];
-    var matchEnd = RegExpLastMatchInfo[CAPTURE(index + 1)];
-    if (matchStart == -1 || matchEnd == -1) return '';
-    return %_SubString(LAST_SUBJECT(RegExpLastMatchInfo), matchStart, matchEnd);
-  };
-}
-
-
-// ES6 21.2.5.3.
-function RegExpGetFlags() {
-  if (!IS_RECEIVER(this)) {
-    throw %make_type_error(
-        kRegExpNonObject, "RegExp.prototype.flags", TO_STRING(this));
-  }
-  var result = '';
-  if (this.global) result += 'g';
-  if (this.ignoreCase) result += 'i';
-  if (this.multiline) result += 'm';
-  if (this.unicode) result += 'u';
-  if (this.sticky) result += 'y';
-  return result;
-}
-
-
-// ES6 21.2.5.4.
-function RegExpGetGlobal() {
-  if (!IS_REGEXP(this)) {
-    if (this === GlobalRegExpPrototype) {
-      %IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
-      return UNDEFINED;
-    }
-    throw %make_type_error(kRegExpNonRegExp, "RegExp.prototype.global");
-  }
-  return TO_BOOLEAN(REGEXP_GLOBAL(this));
-}
-%SetForceInlineFlag(RegExpGetGlobal);
-
-
-// ES6 21.2.5.5.
-function RegExpGetIgnoreCase() {
-  if (!IS_REGEXP(this)) {
-    if (this === GlobalRegExpPrototype) {
-      %IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
-      return UNDEFINED;
-    }
-    throw %make_type_error(kRegExpNonRegExp, "RegExp.prototype.ignoreCase");
-  }
-  return TO_BOOLEAN(REGEXP_IGNORE_CASE(this));
-}
-
-
-// ES6 21.2.5.7.
-function RegExpGetMultiline() {
-  if (!IS_REGEXP(this)) {
-    if (this === GlobalRegExpPrototype) {
-      %IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
-      return UNDEFINED;
-    }
-    throw %make_type_error(kRegExpNonRegExp, "RegExp.prototype.multiline");
-  }
-  return TO_BOOLEAN(REGEXP_MULTILINE(this));
-}
-
-
-// ES6 21.2.5.10.
-function RegExpGetSource() {
-  if (!IS_REGEXP(this)) {
-    if (this === GlobalRegExpPrototype) {
-      %IncrementUseCounter(kRegExpPrototypeSourceGetter);
-      return "(?:)";
-    }
-    throw %make_type_error(kRegExpNonRegExp, "RegExp.prototype.source");
-  }
-  return REGEXP_SOURCE(this);
-}
-
-
-// ES6 21.2.5.12.
-function RegExpGetSticky() {
-  if (!IS_REGEXP(this)) {
-    if (this === GlobalRegExpPrototype) {
-      %IncrementUseCounter(kRegExpPrototypeStickyGetter);
-      return UNDEFINED;
-    }
-    throw %make_type_error(kRegExpNonRegExp, "RegExp.prototype.sticky");
-  }
-  return TO_BOOLEAN(REGEXP_STICKY(this));
-}
-%SetForceInlineFlag(RegExpGetSticky);
-
-
-// ES6 21.2.5.15.
-function RegExpGetUnicode() {
-  if (!IS_REGEXP(this)) {
-    if (this === GlobalRegExpPrototype) {
-      %IncrementUseCounter(kRegExpPrototypeUnicodeGetter);
-      return UNDEFINED;
-    }
-    throw %make_type_error(kRegExpNonRegExp, "RegExp.prototype.unicode");
-  }
-  return TO_BOOLEAN(REGEXP_UNICODE(this));
-}
-%SetForceInlineFlag(RegExpGetUnicode);
-
-
-function RegExpSpecies() {
-  return this;
-}
-
-
-// -------------------------------------------------------------------
-
-utils.InstallGetter(GlobalRegExp, speciesSymbol, RegExpSpecies);
-
-utils.InstallFunctions(GlobalRegExp.prototype, DONT_ENUM, [
-  "test", RegExpSubclassTest,
-  "toString", RegExpToString,
-  "compile", RegExpCompileJS,
-  matchSymbol, RegExpSubclassMatch,
-  replaceSymbol, RegExpSubclassReplace,
-  searchSymbol, RegExpSubclassSearch,
-  splitSymbol, RegExpSubclassSplit,
-]);
-
-utils.InstallGetter(GlobalRegExp.prototype, 'flags', RegExpGetFlags);
-utils.InstallGetter(GlobalRegExp.prototype, 'global', RegExpGetGlobal);
-utils.InstallGetter(GlobalRegExp.prototype, 'ignoreCase', RegExpGetIgnoreCase);
-utils.InstallGetter(GlobalRegExp.prototype, 'multiline', RegExpGetMultiline);
-utils.InstallGetter(GlobalRegExp.prototype, 'source', RegExpGetSource);
-utils.InstallGetter(GlobalRegExp.prototype, 'sticky', RegExpGetSticky);
-utils.InstallGetter(GlobalRegExp.prototype, 'unicode', RegExpGetUnicode);
-
-// The properties `input` and `$_` are aliases for each other.  When this
-// value is set the value it is set to is coerced to a string.
-// Getter and setter for the input.
-var RegExpGetInput = function() {
-  var regExpInput = LAST_INPUT(RegExpLastMatchInfo);
-  return IS_UNDEFINED(regExpInput) ? "" : regExpInput;
-};
-var RegExpSetInput = function(string) {
-  LAST_INPUT(RegExpLastMatchInfo) = TO_STRING(string);
-};
-
-// TODO(jgruber): All of these getters and setters were intended to be installed
-// with various attributes (e.g. DONT_ENUM | DONT_DELETE), but
-// InstallGetterSetter had a bug which ignored the passed attributes and
-// simply installed as DONT_ENUM instead. We might want to change back
-// to the intended attributes at some point.
-// On the other hand, installing attributes as DONT_ENUM matches the draft
-// specification at
-// https://github.com/claudepache/es-regexp-legacy-static-properties
-
-%OptimizeObjectForAddingMultipleProperties(GlobalRegExp, 22);
-utils.InstallGetterSetter(GlobalRegExp, 'input', RegExpGetInput, RegExpSetInput,
-                          DONT_ENUM);
-utils.InstallGetterSetter(GlobalRegExp, '$_', RegExpGetInput, RegExpSetInput,
-                          DONT_ENUM);
-
-
-var NoOpSetter = function(ignored) {};
-
-
-// Static properties set by a successful match.
-utils.InstallGetterSetter(GlobalRegExp, 'lastMatch', RegExpGetLastMatch,
-                          NoOpSetter, DONT_ENUM);
-utils.InstallGetterSetter(GlobalRegExp, '$&', RegExpGetLastMatch, NoOpSetter,
-                          DONT_ENUM);
-utils.InstallGetterSetter(GlobalRegExp, 'lastParen', RegExpGetLastParen,
-                          NoOpSetter, DONT_ENUM);
-utils.InstallGetterSetter(GlobalRegExp, '$+', RegExpGetLastParen, NoOpSetter,
-                          DONT_ENUM);
-utils.InstallGetterSetter(GlobalRegExp, 'leftContext', RegExpGetLeftContext,
-                          NoOpSetter, DONT_ENUM);
-utils.InstallGetterSetter(GlobalRegExp, '$`', RegExpGetLeftContext, NoOpSetter,
-                          DONT_ENUM);
-utils.InstallGetterSetter(GlobalRegExp, 'rightContext', RegExpGetRightContext,
-                          NoOpSetter, DONT_ENUM);
-utils.InstallGetterSetter(GlobalRegExp, "$'", RegExpGetRightContext, NoOpSetter,
-                          DONT_ENUM);
-
-for (var i = 1; i < 10; ++i) {
-  utils.InstallGetterSetter(GlobalRegExp, '$' + i, RegExpMakeCaptureGetter(i),
-                            NoOpSetter, DONT_ENUM);
-}
-%ToFastProperties(GlobalRegExp);
-
-%InstallToContext(["regexp_last_match_info", RegExpLastMatchInfo]);
-
-// -------------------------------------------------------------------
-// Internal
-
-var InternalRegExpMatchInfo = {
-  REGEXP_NUMBER_OF_CAPTURES: 2,
-  REGEXP_LAST_SUBJECT:       "",
-  REGEXP_LAST_INPUT:         UNDEFINED,
-  CAPTURE0:                  0,
-  CAPTURE1:                  0
-};
-
-function InternalRegExpMatch(regexp, subject) {
-  var matchInfo = %_RegExpExec(regexp, subject, 0, InternalRegExpMatchInfo);
-  if (!IS_NULL(matchInfo)) {
-    RETURN_NEW_RESULT_FROM_MATCH_INFO(matchInfo, subject);
-  }
-  return null;
-}
-
-function InternalRegExpReplace(regexp, subject, replacement) {
-  return %StringReplaceGlobalRegExpWithString(
-      subject, regexp, replacement, InternalRegExpMatchInfo);
-}
-
-// -------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
-  to.GetSubstitution = GetSubstitution;
-  to.InternalRegExpMatch = InternalRegExpMatch;
-  to.InternalRegExpReplace = InternalRegExpReplace;
-  to.IsRegExp = IsRegExp;
-  to.RegExpExec = DoRegExpExec;
-  to.RegExpInitialize = RegExpInitialize;
-  to.RegExpLastMatchInfo = RegExpLastMatchInfo;
-});
-
-})
diff --git a/src/js/string.js b/src/js/string.js
index 7c552a9..3a9254c 100644
--- a/src/js/string.js
+++ b/src/js/string.js
@@ -10,13 +10,10 @@
 // Imports
 
 var ArrayJoin;
-var GetSubstitution;
 var GlobalRegExp = global.RegExp;
 var GlobalString = global.String;
-var IsRegExp;
 var MaxSimple;
 var MinSimple;
-var RegExpInitialize;
 var matchSymbol = utils.ImportNow("match_symbol");
 var replaceSymbol = utils.ImportNow("replace_symbol");
 var searchSymbol = utils.ImportNow("search_symbol");
@@ -24,11 +21,8 @@
 
 utils.Import(function(from) {
   ArrayJoin = from.ArrayJoin;
-  GetSubstitution = from.GetSubstitution;
-  IsRegExp = from.IsRegExp;
   MaxSimple = from.MaxSimple;
   MinSimple = from.MinSimple;
-  RegExpInitialize = from.RegExpInitialize;
 });
 
 //-------------------------------------------------------------------
@@ -46,21 +40,6 @@
 }
 
 
-// ECMA-262 section 15.5.4.7
-function StringIndexOf(pattern, position) {  // length == 1
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.indexOf");
-
-  var subject = TO_STRING(this);
-  pattern = TO_STRING(pattern);
-  var index = TO_INTEGER(position);
-  if (index < 0) index = 0;
-  if (index > subject.length) index = subject.length;
-  return %StringIndexOf(subject, pattern, index);
-}
-
-%FunctionSetLength(StringIndexOf, 1);
-
-
 // ES6 21.1.3.11.
 function StringMatchJS(pattern) {
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.match");
@@ -75,11 +54,94 @@
   var subject = TO_STRING(this);
 
   // Equivalent to RegExpCreate (ES#sec-regexpcreate)
-  var regexp = %_NewObject(GlobalRegExp, GlobalRegExp);
-  RegExpInitialize(regexp, pattern);
+  var regexp = %RegExpCreate(pattern);
   return regexp[matchSymbol](subject);
 }
 
+// ES#sec-getsubstitution
+// GetSubstitution(matched, str, position, captures, replacement)
+// Expand the $-expressions in the string and return a new string with
+// the result.
+function GetSubstitution(matched, string, position, captures, replacement) {
+  var matchLength = matched.length;
+  var stringLength = string.length;
+  var capturesLength = captures.length;
+  var tailPos = position + matchLength;
+  var result = "";
+  var pos, expansion, peek, next, scaledIndex, advance, newScaledIndex;
+
+  var next = %StringIndexOf(replacement, '$', 0);
+  if (next < 0) {
+    result += replacement;
+    return result;
+  }
+
+  if (next > 0) result += %_SubString(replacement, 0, next);
+
+  while (true) {
+    expansion = '$';
+    pos = next + 1;
+    if (pos < replacement.length) {
+      peek = %_StringCharCodeAt(replacement, pos);
+      if (peek == 36) {         // $$
+        ++pos;
+        result += '$';
+      } else if (peek == 38) {  // $& - match
+        ++pos;
+        result += matched;
+      } else if (peek == 96) {  // $` - prefix
+        ++pos;
+        result += %_SubString(string, 0, position);
+      } else if (peek == 39) {  // $' - suffix
+        ++pos;
+        result += %_SubString(string, tailPos, stringLength);
+      } else if (peek >= 48 && peek <= 57) {
+        // Valid indices are $1 .. $9, $01 .. $09 and $10 .. $99
+        scaledIndex = (peek - 48);
+        advance = 1;
+        if (pos + 1 < replacement.length) {
+          next = %_StringCharCodeAt(replacement, pos + 1);
+          if (next >= 48 && next <= 57) {
+            newScaledIndex = scaledIndex * 10 + ((next - 48));
+            if (newScaledIndex < capturesLength) {
+              scaledIndex = newScaledIndex;
+              advance = 2;
+            }
+          }
+        }
+        if (scaledIndex != 0 && scaledIndex < capturesLength) {
+          var capture = captures.at(scaledIndex);
+          if (!IS_UNDEFINED(capture)) result += capture;
+          pos += advance;
+        } else {
+          result += '$';
+        }
+      } else {
+        result += '$';
+      }
+    } else {
+      result += '$';
+    }
+
+    // Go the the next $ in the replacement.
+    next = %StringIndexOf(replacement, '$', pos);
+
+    // Return if there are no more $ characters in the replacement. If we
+    // haven't reached the end, we need to append the suffix.
+    if (next < 0) {
+      if (pos < replacement.length) {
+        result += %_SubString(replacement, pos, replacement.length);
+      }
+      return result;
+    }
+
+    // Append substring between the previous and the next $ character.
+    if (next > pos) {
+      result += %_SubString(replacement, pos, next);
+    }
+  }
+  return result;
+}
 
 // ES6, section 21.1.3.14
 function StringReplace(search, replace) {
@@ -158,8 +220,7 @@
   var subject = TO_STRING(this);
 
   // Equivalent to RegExpCreate (ES#sec-regexpcreate)
-  var regexp = %_NewObject(GlobalRegExp, GlobalRegExp);
-  RegExpInitialize(regexp, pattern);
+  var regexp = %RegExpCreate(pattern);
   return %_Call(regexp[searchSymbol], regexp, subject);
 }
 
@@ -395,87 +456,6 @@
 }
 
 
-// ES6 draft 04-05-14, section 21.1.3.18
-function StringStartsWith(searchString, position) {  // length == 1
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.startsWith");
-
-  var s = TO_STRING(this);
-
-  if (IsRegExp(searchString)) {
-    throw %make_type_error(kFirstArgumentNotRegExp, "String.prototype.startsWith");
-  }
-
-  var ss = TO_STRING(searchString);
-  var pos = TO_INTEGER(position);
-
-  var s_len = s.length;
-  var start = MinSimple(MaxSimple(pos, 0), s_len);
-  var ss_len = ss.length;
-  if (ss_len + start > s_len) {
-    return false;
-  }
-
-  return %_SubString(s, start, start + ss_len) === ss;
-}
-
-%FunctionSetLength(StringStartsWith, 1);
-
-
-// ES6 draft 04-05-14, section 21.1.3.7
-function StringEndsWith(searchString, position) {  // length == 1
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.endsWith");
-
-  var s = TO_STRING(this);
-
-  if (IsRegExp(searchString)) {
-    throw %make_type_error(kFirstArgumentNotRegExp, "String.prototype.endsWith");
-  }
-
-  var ss = TO_STRING(searchString);
-  var s_len = s.length;
-  var pos = !IS_UNDEFINED(position) ? TO_INTEGER(position) : s_len
-
-  var end = MinSimple(MaxSimple(pos, 0), s_len);
-  var ss_len = ss.length;
-  var start = end - ss_len;
-  if (start < 0) {
-    return false;
-  }
-
-  return %_SubString(s, start, start + ss_len) === ss;
-}
-
-%FunctionSetLength(StringEndsWith, 1);
-
-
-// ES6 draft 04-05-14, section 21.1.3.6
-function StringIncludes(searchString, position) {  // length == 1
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.includes");
-
-  var string = TO_STRING(this);
-
-  if (IsRegExp(searchString)) {
-    throw %make_type_error(kFirstArgumentNotRegExp, "String.prototype.includes");
-  }
-
-  searchString = TO_STRING(searchString);
-  var pos = TO_INTEGER(position);
-
-  var stringLength = string.length;
-  if (pos < 0) pos = 0;
-  if (pos > stringLength) pos = stringLength;
-  var searchStringLength = searchString.length;
-
-  if (searchStringLength + pos > stringLength) {
-    return false;
-  }
-
-  return %StringIndexOf(string, searchString, pos) !== -1;
-}
-
-%FunctionSetLength(StringIncludes, 1);
-
-
 // ES6 Draft 05-22-2014, section 21.1.3.3
 function StringCodePointAt(pos) {
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.codePointAt");
@@ -533,16 +513,12 @@
 utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
   "codePointAt", StringCodePointAt,
   "concat", StringConcat,
-  "endsWith", StringEndsWith,
-  "includes", StringIncludes,
-  "indexOf", StringIndexOf,
   "match", StringMatchJS,
   "repeat", StringRepeat,
   "replace", StringReplace,
   "search", StringSearch,
   "slice", StringSlice,
   "split", StringSplitJS,
-  "startsWith", StringStartsWith,
   "toLowerCase", StringToLowerCaseJS,
   "toLocaleLowerCase", StringToLocaleLowerCase,
   "toUpperCase", StringToUpperCaseJS,
@@ -567,7 +543,6 @@
 // Exports
 
 utils.Export(function(to) {
-  to.StringIndexOf = StringIndexOf;
   to.StringMatch = StringMatchJS;
   to.StringReplace = StringReplace;
   to.StringSlice = StringSlice;
diff --git a/src/js/typedarray.js b/src/js/typedarray.js
index edb3b06..7667e18 100644
--- a/src/js/typedarray.js
+++ b/src/js/typedarray.js
@@ -844,12 +844,7 @@
 
 // TODO(bmeurer): Migrate this to a proper builtin.
 function TypedArrayConstructor() {
-  if (IS_UNDEFINED(new.target)) {
-    throw %make_type_error(kConstructorNonCallable, "TypedArray");
-  }
-  if (new.target === GlobalTypedArray) {
-    throw %make_type_error(kConstructAbstractClass, "TypedArray");
-  }
+  throw %make_type_error(kConstructAbstractClass, "TypedArray");
 }
 
 function TypedArraySpecies() {
diff --git a/src/js/v8natives.js b/src/js/v8natives.js
index 93636a0..f67a8b5 100644
--- a/src/js/v8natives.js
+++ b/src/js/v8natives.js
@@ -18,51 +18,6 @@
 // ----------------------------------------------------------------------------
 
 
-// ES6 18.2.5 parseInt(string, radix)
-function GlobalParseInt(string, radix) {
-  if (IS_UNDEFINED(radix) || radix === 10 || radix === 0) {
-    // Some people use parseInt instead of Math.floor.  This
-    // optimization makes parseInt on a Smi 12 times faster (60ns
-    // vs 800ns).  The following optimization makes parseInt on a
-    // non-Smi number 9 times faster (230ns vs 2070ns).  Together
-    // they make parseInt on a string 1.4% slower (274ns vs 270ns).
-    if (%_IsSmi(string)) return string;
-    if (IS_NUMBER(string) &&
-        ((0.01 < string && string < 1e9) ||
-            (-1e9 < string && string < -0.01))) {
-      // Truncate number.
-      return string | 0;
-    }
-    string = TO_STRING(string);
-    radix = radix | 0;
-  } else {
-    // The spec says ToString should be evaluated before ToInt32.
-    string = TO_STRING(string);
-    radix = TO_INT32(radix);
-    if (!(radix == 0 || (2 <= radix && radix <= 36))) {
-      return NaN;
-    }
-  }
-
-  if (%_HasCachedArrayIndex(string) &&
-      (radix == 0 || radix == 10)) {
-    return %_GetCachedArrayIndex(string);
-  }
-  return %StringParseInt(string, radix);
-}
-
-
-// ES6 18.2.4 parseFloat(string)
-function GlobalParseFloat(string) {
-  // 1. Let inputString be ? ToString(string).
-  string = TO_STRING(string);
-  if (%_HasCachedArrayIndex(string)) return %_GetCachedArrayIndex(string);
-  return %StringParseFloat(string);
-}
-
-
-// ----------------------------------------------------------------------------
-
 // Set up global object.
 var attributes = DONT_ENUM | DONT_DELETE | READ_ONLY;
 
@@ -75,12 +30,6 @@
   "undefined", UNDEFINED,
 ]);
 
-// Set up non-enumerable function on the global object.
-utils.InstallFunctions(global, DONT_ENUM, [
-  "parseInt", GlobalParseInt,
-  "parseFloat", GlobalParseFloat,
-]);
-
 
 // ----------------------------------------------------------------------------
 // Object
@@ -114,37 +63,6 @@
   throw %make_type_error(kCalledNonCallable, typeof func);
 }
 
-// ES6 section 19.1.2.18.
-function ObjectSetPrototypeOf(obj, proto) {
-  CHECK_OBJECT_COERCIBLE(obj, "Object.setPrototypeOf");
-
-  if (proto !== null && !IS_RECEIVER(proto)) {
-    throw %make_type_error(kProtoObjectOrNull, proto);
-  }
-
-  if (IS_RECEIVER(obj)) {
-    %SetPrototype(obj, proto);
-  }
-
-  return obj;
-}
-
-// ES6 B.2.2.1.1
-function ObjectGetProto() {
-  return %object_get_prototype_of(this);
-}
-
-
-// ES6 B.2.2.1.2
-function ObjectSetProto(proto) {
-  CHECK_OBJECT_COERCIBLE(this, "Object.prototype.__proto__");
-
-  if ((IS_RECEIVER(proto) || IS_NULL(proto)) && IS_RECEIVER(this)) {
-    %SetPrototype(this, proto);
-  }
-}
-
-
 // ES6 19.1.1.1
 function ObjectConstructor(x) {
   if (GlobalObject != new.target && !IS_UNDEFINED(new.target)) {
@@ -176,16 +94,6 @@
   // __defineSetter__ is added in bootstrapper.cc.
   // __lookupSetter__ is added in bootstrapper.cc.
 ]);
-utils.InstallGetterSetter(
-    GlobalObject.prototype, "__proto__", ObjectGetProto, ObjectSetProto);
-
-// Set up non-enumerable functions in the Object object.
-utils.InstallFunctions(GlobalObject, DONT_ENUM, [
-  "setPrototypeOf", ObjectSetPrototypeOf,
-  // getOwnPropertySymbols is added in symbol.js.
-  // Others are added in bootstrapper.cc.
-]);
-
 
 
 // ----------------------------------------------------------------------------
@@ -210,13 +118,6 @@
   "EPSILON", 2.220446049250313e-16,
 ]);
 
-// Harmony Number constructor additions
-utils.InstallFunctions(GlobalNumber, DONT_ENUM, [
-  "parseInt", GlobalParseInt,
-  "parseFloat", GlobalParseFloat
-]);
-
-
 
 // ----------------------------------------------------------------------------
 // Iterator related spec functions.
diff --git a/src/json-parser.cc b/src/json-parser.cc
index 576100a..5e79b61 100644
--- a/src/json-parser.cc
+++ b/src/json-parser.cc
@@ -104,7 +104,7 @@
       source_length_(source->length()),
       isolate_(isolate),
       factory_(isolate_->factory()),
-      zone_(isolate_->allocator()),
+      zone_(isolate_->allocator(), ZONE_NAME),
       object_constructor_(isolate_->native_context()->object_function(),
                           isolate_),
       position_(-1) {
diff --git a/src/keys.cc b/src/keys.cc
index c6e31e3..9b6c8f3 100644
--- a/src/keys.cc
+++ b/src/keys.cc
@@ -780,7 +780,7 @@
                                        target_keys->get(i));
       nonconfigurable_keys_length++;
       // The key was moved, null it out in the original list.
-      target_keys->set(i, Smi::FromInt(0));
+      target_keys->set(i, Smi::kZero);
     } else {
       // 14c. Else,
       // 14c i. Append key as an element of targetConfigurableKeys.
@@ -794,7 +794,7 @@
     return AddKeysFromJSProxy(proxy, trap_result);
   }
   // 16. Let uncheckedResultKeys be a new List which is a copy of trapResult.
-  Zone set_zone(isolate_->allocator());
+  Zone set_zone(isolate_->allocator(), ZONE_NAME);
   const int kPresent = 1;
   const int kGone = 0;
   IdentityMap<int> unchecked_result_keys(isolate_->heap(), &set_zone);
diff --git a/src/layout-descriptor-inl.h b/src/layout-descriptor-inl.h
index 3f15065..bade05e 100644
--- a/src/layout-descriptor-inl.h
+++ b/src/layout-descriptor-inl.h
@@ -18,7 +18,7 @@
 Handle<LayoutDescriptor> LayoutDescriptor::New(Isolate* isolate, int length) {
   if (length <= kSmiValueSize) {
     // The whole bit vector fits into a smi.
-    return handle(LayoutDescriptor::FromSmi(Smi::FromInt(0)), isolate);
+    return handle(LayoutDescriptor::FromSmi(Smi::kZero), isolate);
   }
   length = GetSlowModeBackingStoreLength(length);
   return Handle<LayoutDescriptor>::cast(isolate->factory()->NewFixedTypedArray(
@@ -37,7 +37,7 @@
 
 
 LayoutDescriptor* LayoutDescriptor::FastPointerLayout() {
-  return LayoutDescriptor::FromSmi(Smi::FromInt(0));
+  return LayoutDescriptor::FromSmi(Smi::kZero);
 }
 
 
diff --git a/src/libplatform/default-platform.cc b/src/libplatform/default-platform.cc
index f64143e..866a447 100644
--- a/src/libplatform/default-platform.cc
+++ b/src/libplatform/default-platform.cc
@@ -7,6 +7,7 @@
 #include <algorithm>
 #include <queue>
 
+#include "include/libplatform/libplatform.h"
 #include "src/base/logging.h"
 #include "src/base/platform/platform.h"
 #include "src/base/platform/time.h"
diff --git a/src/libplatform/default-platform.h b/src/libplatform/default-platform.h
index e36234f..4b52c28 100644
--- a/src/libplatform/default-platform.h
+++ b/src/libplatform/default-platform.h
@@ -11,8 +11,10 @@
 #include <queue>
 #include <vector>
 
+#include "include/libplatform/libplatform-export.h"
 #include "include/libplatform/v8-tracing.h"
 #include "include/v8-platform.h"
+#include "src/base/compiler-specific.h"
 #include "src/base/macros.h"
 #include "src/base/platform/mutex.h"
 #include "src/libplatform/task-queue.h"
@@ -28,7 +30,7 @@
 class TracingController;
 }
 
-class DefaultPlatform : public Platform {
+class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
  public:
   DefaultPlatform();
   virtual ~DefaultPlatform();
diff --git a/src/libplatform/task-queue.h b/src/libplatform/task-queue.h
index 5239cda..330527a 100644
--- a/src/libplatform/task-queue.h
+++ b/src/libplatform/task-queue.h
@@ -7,6 +7,7 @@
 
 #include <queue>
 
+#include "include/libplatform/libplatform-export.h"
 #include "src/base/macros.h"
 #include "src/base/platform/mutex.h"
 #include "src/base/platform/semaphore.h"
@@ -18,7 +19,7 @@
 
 namespace platform {
 
-class TaskQueue {
+class V8_PLATFORM_EXPORT TaskQueue {
  public:
   TaskQueue();
   ~TaskQueue();
diff --git a/src/libplatform/tracing/trace-config.cc b/src/libplatform/tracing/trace-config.cc
index 7a824f6..e77d191 100644
--- a/src/libplatform/tracing/trace-config.cc
+++ b/src/libplatform/tracing/trace-config.cc
@@ -32,11 +32,6 @@
   included_categories_.push_back(included_category);
 }
 
-void TraceConfig::AddExcludedCategory(const char* excluded_category) {
-  DCHECK(excluded_category != NULL && strlen(excluded_category) > 0);
-  excluded_categories_.push_back(excluded_category);
-}
-
 }  // namespace tracing
 }  // namespace platform
 }  // namespace v8
diff --git a/src/libplatform/worker-thread.h b/src/libplatform/worker-thread.h
index 6a55a6b..22b0626 100644
--- a/src/libplatform/worker-thread.h
+++ b/src/libplatform/worker-thread.h
@@ -7,6 +7,8 @@
 
 #include <queue>
 
+#include "include/libplatform/libplatform-export.h"
+#include "src/base/compiler-specific.h"
 #include "src/base/macros.h"
 #include "src/base/platform/platform.h"
 
@@ -16,7 +18,7 @@
 
 class TaskQueue;
 
-class WorkerThread : public base::Thread {
+class V8_PLATFORM_EXPORT WorkerThread : public NON_EXPORTED_BASE(base::Thread) {
  public:
   explicit WorkerThread(TaskQueue* queue);
   virtual ~WorkerThread();
diff --git a/src/libsampler/sampler.cc b/src/libsampler/sampler.cc
index 0b40972..f65498a 100644
--- a/src/libsampler/sampler.cc
+++ b/src/libsampler/sampler.cc
@@ -281,7 +281,7 @@
     if (!entry) return;
     SamplerList& samplers = *static_cast<SamplerList*>(entry->value);
 
-    for (int i = 0; i < samplers.size(); ++i) {
+    for (size_t i = 0; i < samplers.size(); ++i) {
       Sampler* sampler = samplers[i];
       Isolate* isolate = sampler->isolate();
       // We require a fully initialized and entered isolate.
diff --git a/src/list.h b/src/list.h
index 83e5f45..0492865 100644
--- a/src/list.h
+++ b/src/list.h
@@ -129,7 +129,8 @@
   INLINE(void Allocate(int length,
                        AllocationPolicy allocator = AllocationPolicy()));
 
-  // Clears the list by setting the length to zero. Even if T is a
+  // Clears the list by freeing the storage memory. If you want to keep the
+  // memory, use Rewind(0) instead. Be aware, that even if T is a
   // pointer type, clearing the list doesn't delete the entries.
   INLINE(void Clear());
 
diff --git a/src/log-utils.cc b/src/log-utils.cc
index 22972ec..462f83f 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -37,7 +37,6 @@
     FLAG_log_gc = true;
     FLAG_log_suspect = true;
     FLAG_log_handles = true;
-    FLAG_log_regexp = true;
     FLAG_log_internal_timer_events = true;
   }
 
diff --git a/src/log-utils.h b/src/log-utils.h
index 059e5a5..b165b3e 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -30,8 +30,8 @@
 
   static bool InitLogAtStart() {
     return FLAG_log || FLAG_log_api || FLAG_log_code || FLAG_log_gc ||
-           FLAG_log_handles || FLAG_log_suspect || FLAG_log_regexp ||
-           FLAG_ll_prof || FLAG_perf_basic_prof || FLAG_perf_prof ||
+           FLAG_log_handles || FLAG_log_suspect || FLAG_ll_prof ||
+           FLAG_perf_basic_prof || FLAG_perf_prof ||
            FLAG_log_internal_timer_events || FLAG_prof_cpp;
   }
 
diff --git a/src/log.cc b/src/log.cc
index fc7fcb9..bc52d05 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -27,6 +27,7 @@
 #include "src/runtime-profiler.h"
 #include "src/source-position-table.h"
 #include "src/string-stream.h"
+#include "src/tracing/tracing-category-observer.h"
 #include "src/vm-state-inl.h"
 
 namespace v8 {
@@ -893,64 +894,6 @@
 TIMER_EVENTS_LIST(V)
 #undef V
 
-
-namespace {
-// Emits the source code of a regexp. Used by regexp events.
-void LogRegExpSource(Handle<JSRegExp> regexp, Isolate* isolate,
-                     Log::MessageBuilder* msg) {
-  // Prints "/" + re.source + "/" +
-  //      (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
-
-  Handle<Object> source =
-      JSReceiver::GetProperty(isolate, regexp, "source").ToHandleChecked();
-  if (!source->IsString()) {
-    msg->Append("no source");
-    return;
-  }
-
-  switch (regexp->TypeTag()) {
-    case JSRegExp::ATOM:
-      msg->Append('a');
-      break;
-    default:
-      break;
-  }
-  msg->Append('/');
-  msg->AppendDetailed(*Handle<String>::cast(source), false);
-  msg->Append('/');
-
-  // global flag
-  Handle<Object> global =
-      JSReceiver::GetProperty(isolate, regexp, "global").ToHandleChecked();
-  if (global->IsTrue(isolate)) {
-    msg->Append('g');
-  }
-  // ignorecase flag
-  Handle<Object> ignorecase =
-      JSReceiver::GetProperty(isolate, regexp, "ignoreCase").ToHandleChecked();
-  if (ignorecase->IsTrue(isolate)) {
-    msg->Append('i');
-  }
-  // multiline flag
-  Handle<Object> multiline =
-      JSReceiver::GetProperty(isolate, regexp, "multiline").ToHandleChecked();
-  if (multiline->IsTrue(isolate)) {
-    msg->Append('m');
-  }
-}
-}  // namespace
-
-
-void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
-  if (!log_->IsEnabled() || !FLAG_log_regexp) return;
-  Log::MessageBuilder msg(log_);
-  msg.Append("regexp-compile,");
-  LogRegExpSource(regexp, isolate_, &msg);
-  msg.Append(in_cache ? ",hit" : ",miss");
-  msg.WriteToLogFile();
-}
-
-
 void Logger::ApiNamedPropertyAccess(const char* tag,
                                     JSObject* holder,
                                     Object* name) {
@@ -1206,12 +1149,13 @@
          iter.Advance()) {
       if (iter.is_statement()) {
         jit_logger_->AddCodeLinePosInfoEvent(
-            jit_handler_data, iter.code_offset(), iter.source_position(),
+            jit_handler_data, iter.code_offset(),
+            iter.source_position().ScriptOffset(),
             JitCodeEvent::STATEMENT_POSITION);
       }
-      jit_logger_->AddCodeLinePosInfoEvent(jit_handler_data, iter.code_offset(),
-                                           iter.source_position(),
-                                           JitCodeEvent::POSITION);
+      jit_logger_->AddCodeLinePosInfoEvent(
+          jit_handler_data, iter.code_offset(),
+          iter.source_position().ScriptOffset(), JitCodeEvent::POSITION);
     }
     jit_logger_->EndCodePosInfoEvent(code, jit_handler_data);
   }
@@ -1341,7 +1285,8 @@
 
 void Logger::TickEvent(v8::TickSample* sample, bool overflow) {
   if (!log_->IsEnabled() || !FLAG_prof_cpp) return;
-  if (FLAG_runtime_call_stats) {
+  if (V8_UNLIKELY(FLAG_runtime_stats ==
+                  v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) {
     RuntimeCallTimerEvent();
   }
   Log::MessageBuilder msg(log_);
@@ -1542,8 +1487,6 @@
 }
 
 void Logger::LogBytecodeHandlers() {
-  if (!FLAG_ignition) return;
-
   const interpreter::OperandScale kOperandScales[] = {
 #define VALUE(Name, _) interpreter::OperandScale::k##Name,
       OPERAND_SCALE_LIST(VALUE)
diff --git a/src/log.h b/src/log.h
index a05b187..b7a5fc6 100644
--- a/src/log.h
+++ b/src/log.h
@@ -218,11 +218,6 @@
   INLINE(static void CallEventLogger(Isolate* isolate, const char* name,
                                      StartEnd se, bool expose_to_api));
 
-  // ==== Events logged by --log-regexp ====
-  // Regexp compilation and execution events.
-
-  void RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache);
-
   bool is_logging() {
     return is_logging_;
   }
@@ -346,8 +341,7 @@
   V(CompileCode, true)          \
   V(DeoptimizeCode, true)       \
   V(Execute, true)              \
-  V(External, true)             \
-  V(IcMiss, false)
+  V(External, true)
 
 #define V(TimerName, expose)                                                  \
   class TimerEvent##TimerName : public AllStatic {                            \
diff --git a/src/lookup-cache.cc b/src/lookup-cache.cc
index 18729d6..b740fdb 100644
--- a/src/lookup-cache.cc
+++ b/src/lookup-cache.cc
@@ -13,72 +13,5 @@
   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
 }
 
-int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
-  DisallowHeapAllocation no_gc;
-  // Uses only lower 32 bits if pointers are larger.
-  uintptr_t addr_hash =
-      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
-  return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
-}
-
-int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
-  DisallowHeapAllocation no_gc;
-  int index = (Hash(map, name) & kHashMask);
-  for (int i = 0; i < kEntriesPerBucket; i++) {
-    Key& key = keys_[index + i];
-    if ((key.map == *map) && key.name->Equals(*name)) {
-      return field_offsets_[index + i];
-    }
-  }
-  return kNotFound;
-}
-
-void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
-                              int field_offset) {
-  DisallowHeapAllocation no_gc;
-  if (!name->IsUniqueName()) {
-    if (!StringTable::InternalizeStringIfExists(name->GetIsolate(),
-                                                Handle<String>::cast(name))
-             .ToHandle(&name)) {
-      return;
-    }
-  }
-  // This cache is cleared only between mark compact passes, so we expect the
-  // cache to only contain old space names.
-  DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
-
-  int index = (Hash(map, name) & kHashMask);
-  // After a GC there will be free slots, so we use them in order (this may
-  // help to get the most frequently used one in position 0).
-  for (int i = 0; i < kEntriesPerBucket; i++) {
-    Key& key = keys_[index];
-    Object* free_entry_indicator = NULL;
-    if (key.map == free_entry_indicator) {
-      key.map = *map;
-      key.name = *name;
-      field_offsets_[index + i] = field_offset;
-      return;
-    }
-  }
-  // No free entry found in this bucket, so we move them all down one and
-  // put the new entry at position zero.
-  for (int i = kEntriesPerBucket - 1; i > 0; i--) {
-    Key& key = keys_[index + i];
-    Key& key2 = keys_[index + i - 1];
-    key = key2;
-    field_offsets_[index + i] = field_offsets_[index + i - 1];
-  }
-
-  // Write the new first entry.
-  Key& key = keys_[index];
-  key.map = *map;
-  key.name = *name;
-  field_offsets_[index] = field_offset;
-}
-
-void KeyedLookupCache::Clear() {
-  for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
-}
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/lookup-cache.h b/src/lookup-cache.h
index 6da5e5b..bf64cc0 100644
--- a/src/lookup-cache.h
+++ b/src/lookup-cache.h
@@ -52,65 +52,6 @@
   DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
 };
 
-// Cache for mapping (map, property name) into field offset.
-// Cleared at startup and prior to mark sweep collection.
-class KeyedLookupCache {
- public:
-  // Lookup field offset for (map, name). If absent, -1 is returned.
-  int Lookup(Handle<Map> map, Handle<Name> name);
-
-  // Update an element in the cache.
-  void Update(Handle<Map> map, Handle<Name> name, int field_offset);
-
-  // Clear the cache.
-  void Clear();
-
-  static const int kLength = 256;
-  static const int kCapacityMask = kLength - 1;
-  static const int kMapHashShift = 5;
-  static const int kHashMask = -4;  // Zero the last two bits.
-  static const int kEntriesPerBucket = 4;
-  static const int kEntryLength = 2;
-  static const int kMapIndex = 0;
-  static const int kKeyIndex = 1;
-  static const int kNotFound = -1;
-
-  // kEntriesPerBucket should be a power of 2.
-  STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
-  STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
-
- private:
-  KeyedLookupCache() {
-    for (int i = 0; i < kLength; ++i) {
-      keys_[i].map = NULL;
-      keys_[i].name = NULL;
-      field_offsets_[i] = kNotFound;
-    }
-  }
-
-  static inline int Hash(Handle<Map> map, Handle<Name> name);
-
-  // Get the address of the keys and field_offsets arrays.  Used in
-  // generated code to perform cache lookups.
-  Address keys_address() { return reinterpret_cast<Address>(&keys_); }
-
-  Address field_offsets_address() {
-    return reinterpret_cast<Address>(&field_offsets_);
-  }
-
-  struct Key {
-    Map* map;
-    Name* name;
-  };
-
-  Key keys_[kLength];
-  int field_offsets_[kLength];
-
-  friend class ExternalReference;
-  friend class Isolate;
-  DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
-};
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/lookup.cc b/src/lookup.cc
index b6c0b92..186823d 100644
--- a/src/lookup.cc
+++ b/src/lookup.cc
@@ -129,7 +129,8 @@
     Handle<JSValue>::cast(result)->set_value(*receiver);
     return result;
   }
-  auto root = handle(receiver->GetRootMap(isolate)->prototype(), isolate);
+  auto root =
+      handle(receiver->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
   if (root->IsNull(isolate)) {
     unsigned int magic = 0xbbbbbbbb;
     isolate->PushStackTraceAndDie(magic, *receiver, NULL, magic);
@@ -193,6 +194,11 @@
   } else if (*name_ == heap()->has_instance_symbol()) {
     if (!isolate_->IsHasInstanceLookupChainIntact()) return;
     isolate_->InvalidateHasInstanceProtector();
+  } else if (*name_ == heap()->iterator_symbol()) {
+    if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
+    if (holder_->IsJSArray()) {
+      isolate_->InvalidateArrayIteratorProtector();
+    }
   }
 }
 
@@ -601,6 +607,12 @@
   return handle(result, isolate_);
 }
 
+int LookupIterator::GetFieldDescriptorIndex() const {
+  DCHECK(has_property_);
+  DCHECK(holder_->HasFastProperties());
+  DCHECK_EQ(v8::internal::DATA, property_details_.type());
+  return descriptor_number();
+}
 
 int LookupIterator::GetAccessorIndex() const {
   DCHECK(has_property_);
@@ -797,7 +809,8 @@
     JSObject* js_object = JSObject::cast(holder);
     ElementsAccessor* accessor = js_object->GetElementsAccessor();
     FixedArrayBase* backing_store = js_object->elements();
-    number_ = accessor->GetEntryForIndex(js_object, backing_store, index_);
+    number_ =
+        accessor->GetEntryForIndex(isolate_, js_object, backing_store, index_);
     if (number_ == kMaxUInt32) {
       return holder->IsJSTypedArray() ? INTEGER_INDEXED_EXOTIC : NOT_FOUND;
     }
@@ -843,5 +856,27 @@
   return Handle<InterceptorInfo>();
 }
 
+bool LookupIterator::TryLookupCachedProperty() {
+  return state() == LookupIterator::ACCESSOR &&
+         GetAccessors()->IsAccessorPair() && LookupCachedProperty();
+}
+
+bool LookupIterator::LookupCachedProperty() {
+  DCHECK_EQ(state(), LookupIterator::ACCESSOR);
+  DCHECK(GetAccessors()->IsAccessorPair());
+
+  AccessorPair* accessor_pair = AccessorPair::cast(*GetAccessors());
+  Handle<Object> getter(accessor_pair->getter(), isolate());
+  MaybeHandle<Name> maybe_name =
+      FunctionTemplateInfo::TryGetCachedPropertyName(isolate(), getter);
+  if (maybe_name.is_null()) return false;
+
+  // We have found a cached property! Modify the iterator accordingly.
+  name_ = maybe_name.ToHandleChecked();
+  Restart();
+  CHECK_EQ(state(), LookupIterator::DATA);
+  return true;
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/lookup.h b/src/lookup.h
index 687c677..e0b40c4 100644
--- a/src/lookup.h
+++ b/src/lookup.h
@@ -6,13 +6,14 @@
 #define V8_LOOKUP_H_
 
 #include "src/factory.h"
+#include "src/globals.h"
 #include "src/isolate.h"
 #include "src/objects.h"
 
 namespace v8 {
 namespace internal {
 
-class LookupIterator final BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
  public:
   enum Configuration {
     // Configuration bits.
@@ -237,6 +238,7 @@
   }
   FieldIndex GetFieldIndex() const;
   Handle<FieldType> GetFieldType() const;
+  int GetFieldDescriptorIndex() const;
   int GetAccessorIndex() const;
   int GetConstantIndex() const;
   Handle<PropertyCell> GetPropertyCell() const;
@@ -256,11 +258,17 @@
     if (*name_ == heap()->is_concat_spreadable_symbol() ||
         *name_ == heap()->constructor_string() ||
         *name_ == heap()->species_symbol() ||
-        *name_ == heap()->has_instance_symbol()) {
+        *name_ == heap()->has_instance_symbol() ||
+        *name_ == heap()->iterator_symbol()) {
       InternalUpdateProtector();
     }
   }
 
+  // Lookup a 'cached' private property for an accessor.
+  // If not found returns false and leaves the LookupIterator unmodified.
+  bool TryLookupCachedProperty();
+  bool LookupCachedProperty();
+
  private:
   void InternalUpdateProtector();
 
diff --git a/src/machine-type.h b/src/machine-type.h
index e9605d7..844c956 100644
--- a/src/machine-type.h
+++ b/src/machine-type.h
@@ -29,9 +29,14 @@
   kFloat32,
   kFloat64,
   kSimd128,
-  kFirstFPRepresentation = kFloat32
+  kFirstFPRepresentation = kFloat32,
+  kLastRepresentation = kSimd128
 };
 
+static_assert(static_cast<int>(MachineRepresentation::kLastRepresentation) <
+                  kIntSize * kBitsPerByte,
+              "Bit masks of MachineRepresentation should fit in an int");
+
 const char* MachineReprToString(MachineRepresentation);
 
 enum class MachineSemantic : uint8_t {
@@ -223,7 +228,7 @@
 V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
                                            MachineRepresentation rep);
 std::ostream& operator<<(std::ostream& os, MachineSemantic type);
-std::ostream& operator<<(std::ostream& os, MachineType type);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, MachineType type);
 
 inline bool IsFloatingPoint(MachineRepresentation rep) {
   return rep >= MachineRepresentation::kFirstFPRepresentation;
@@ -234,12 +239,17 @@
          rep == MachineRepresentation::kTaggedPointer;
 }
 
+inline bool CanBeTaggedSigned(MachineRepresentation rep) {
+  return rep == MachineRepresentation::kTagged ||
+         rep == MachineRepresentation::kTaggedSigned;
+}
+
 inline bool IsAnyTagged(MachineRepresentation rep) {
   return CanBeTaggedPointer(rep) || rep == MachineRepresentation::kTaggedSigned;
 }
 
 // Gets the log2 of the element size in bytes of the machine type.
-inline int ElementSizeLog2Of(MachineRepresentation rep) {
+V8_EXPORT_PRIVATE inline int ElementSizeLog2Of(MachineRepresentation rep) {
   switch (rep) {
     case MachineRepresentation::kBit:
     case MachineRepresentation::kWord8:
diff --git a/src/messages.cc b/src/messages.cc
index cc6349d..eea77e3 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -12,6 +12,7 @@
 #include "src/keys.h"
 #include "src/string-builder.h"
 #include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
 
 namespace v8 {
 namespace internal {
@@ -211,14 +212,6 @@
   return isolate_->factory()->null_value();
 }
 
-Handle<Object> JSStackFrame::GetScriptNameOrSourceUrl() {
-  if (!HasScript()) return isolate_->factory()->null_value();
-  Handle<Script> script = GetScript();
-  Object* source_url = script->source_url();
-  return (source_url->IsString()) ? handle(source_url, isolate_)
-                                  : handle(script->name(), isolate_);
-}
-
 namespace {
 
 bool CheckMethodName(Isolate* isolate, Handle<JSObject> obj, Handle<Name> name,
@@ -238,8 +231,19 @@
   return false;
 }
 
+Handle<Object> ScriptNameOrSourceUrl(Handle<Script> script, Isolate* isolate) {
+  Object* name_or_url = script->source_url();
+  if (!name_or_url->IsString()) name_or_url = script->name();
+  return handle(name_or_url, isolate);
+}
+
 }  // namespace
 
+Handle<Object> JSStackFrame::GetScriptNameOrSourceUrl() {
+  if (!HasScript()) return isolate_->factory()->null_value();
+  return ScriptNameOrSourceUrl(GetScript(), isolate_);
+}
+
 Handle<Object> JSStackFrame::GetMethodName() {
   if (receiver_->IsNull(isolate_) || receiver_->IsUndefined(isolate_)) {
     return isolate_->factory()->null_value();
@@ -298,7 +302,7 @@
 
 Object* EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
   if (script->eval_from_shared()->IsUndefined(isolate))
-    return *isolate->factory()->undefined_value();
+    return isolate->heap()->undefined_value();
 
   Handle<SharedFunctionInfo> shared(
       SharedFunctionInfo::cast(script->eval_from_shared()));
@@ -312,13 +316,13 @@
 
 Object* EvalFromScript(Isolate* isolate, Handle<Script> script) {
   if (script->eval_from_shared()->IsUndefined(isolate))
-    return *isolate->factory()->undefined_value();
+    return isolate->heap()->undefined_value();
 
   Handle<SharedFunctionInfo> eval_from_shared(
       SharedFunctionInfo::cast(script->eval_from_shared()));
   return eval_from_shared->script()->IsScript()
              ? eval_from_shared->script()
-             : *isolate->factory()->undefined_value();
+             : isolate->heap()->undefined_value();
 }
 
 MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
@@ -364,8 +368,8 @@
         builder.AppendString(Handle<String>::cast(name_obj));
 
         Script::PositionInfo info;
-        if (eval_from_script->GetPositionInfo(script->GetEvalPosition(), &info,
-                                              Script::NO_OFFSET)) {
+        if (Script::GetPositionInfo(eval_from_script, script->GetEvalPosition(),
+                                    &info, Script::NO_OFFSET)) {
           builder.AppendCString(":");
 
           Handle<String> str = isolate->factory()->NumberToString(
@@ -455,7 +459,7 @@
   return (object->IsString() && String::cast(*object)->length() > 0);
 }
 
-void AppendFileLocation(Isolate* isolate, JSStackFrame* call_site,
+void AppendFileLocation(Isolate* isolate, StackFrameBase* call_site,
                         IncrementalStringBuilder* builder) {
   if (call_site->IsNative()) {
     builder->AppendCString("native");
@@ -595,14 +599,14 @@
     builder.AppendString(Handle<String>::cast(function_name));
   } else {
     AppendFileLocation(isolate_, this, &builder);
-    RETURN_RESULT(isolate_, builder.Finish(), String);
+    return builder.Finish();
   }
 
   builder.AppendCString(" (");
   AppendFileLocation(isolate_, this, &builder);
   builder.AppendCString(")");
 
-  RETURN_RESULT(isolate_, builder.Finish(), String);
+  return builder.Finish();
 }
 
 int JSStackFrame::GetPosition() const { return code_->SourcePosition(offset_); }
@@ -617,9 +621,10 @@
 
 void WasmStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
                                     int frame_ix) {
-  DCHECK(array->IsWasmFrame(frame_ix));
+  // This function is called for both wasm and asm.js->wasm frames.
+  DCHECK(array->IsWasmFrame(frame_ix) || array->IsAsmJsWasmFrame(frame_ix));
   isolate_ = isolate;
-  wasm_obj_ = handle(array->WasmObject(frame_ix), isolate);
+  wasm_instance_ = handle(array->WasmInstance(frame_ix), isolate);
   wasm_func_index_ = array->WasmFunctionIndex(frame_ix)->value();
   code_ = handle(array->Code(frame_ix), isolate);
   offset_ = array->Offset(frame_ix)->value();
@@ -631,7 +636,15 @@
 }
 
 Handle<Object> WasmStackFrame::GetFunctionName() {
-  return wasm::GetWasmFunctionNameOrNull(isolate_, wasm_obj_, wasm_func_index_);
+  Handle<Object> name;
+  Handle<WasmCompiledModule> compiled_module(
+      Handle<WasmInstanceObject>::cast(wasm_instance_)->get_compiled_module(),
+      isolate_);
+  if (!WasmCompiledModule::GetFunctionName(compiled_module, wasm_func_index_)
+           .ToHandle(&name)) {
+    name = isolate_->factory()->null_value();
+  }
+  return name;
 }
 
 MaybeHandle<String> WasmStackFrame::ToString() {
@@ -667,6 +680,72 @@
   return isolate_->factory()->null_value();
 }
 
+Handle<Object> AsmJsWasmStackFrame::GetReceiver() const {
+  return isolate_->global_proxy();
+}
+
+Handle<Object> AsmJsWasmStackFrame::GetFunction() const {
+  // TODO(clemensh): Return lazily created JSFunction.
+  return Null();
+}
+
+Handle<Object> AsmJsWasmStackFrame::GetFileName() {
+  Handle<Script> script =
+      wasm::GetScript(Handle<JSObject>::cast(wasm_instance_));
+  DCHECK_EQ(Script::TYPE_NORMAL, script->type());
+  return handle(script->name(), isolate_);
+}
+
+Handle<Object> AsmJsWasmStackFrame::GetScriptNameOrSourceUrl() {
+  Handle<Script> script =
+      wasm::GetScript(Handle<JSObject>::cast(wasm_instance_));
+  DCHECK_EQ(Script::TYPE_NORMAL, script->type());
+  return ScriptNameOrSourceUrl(script, isolate_);
+}
+
+int AsmJsWasmStackFrame::GetPosition() const {
+  DCHECK_LE(0, offset_);
+  int byte_offset = code_->SourcePosition(offset_);
+  return wasm::GetAsmWasmSourcePosition(Handle<JSObject>::cast(wasm_instance_),
+                                        wasm_func_index_, byte_offset);
+}
+
+int AsmJsWasmStackFrame::GetLineNumber() {
+  DCHECK_LE(0, GetPosition());
+  Handle<Script> script =
+      wasm::GetScript(Handle<JSObject>::cast(wasm_instance_));
+  DCHECK_EQ(Script::TYPE_NORMAL, script->type());
+  return Script::GetLineNumber(script, GetPosition()) + 1;
+}
+
+int AsmJsWasmStackFrame::GetColumnNumber() {
+  DCHECK_LE(0, GetPosition());
+  Handle<Script> script =
+      wasm::GetScript(Handle<JSObject>::cast(wasm_instance_));
+  DCHECK_EQ(Script::TYPE_NORMAL, script->type());
+  return Script::GetColumnNumber(script, GetPosition()) + 1;
+}
+
+MaybeHandle<String> AsmJsWasmStackFrame::ToString() {
+  // The string should look exactly as the respective javascript frame string.
+  // Keep this method in line to JSStackFrame::ToString().
+
+  IncrementalStringBuilder builder(isolate_);
+
+  Handle<Object> function_name = GetFunctionName();
+
+  if (IsNonEmptyString(function_name)) {
+    builder.AppendString(Handle<String>::cast(function_name));
+    builder.AppendCString(" (");
+  }
+
+  AppendFileLocation(isolate_, this, &builder);
+
+  if (IsNonEmptyString(function_name)) builder.AppendCString(")");
+
+  return builder.Finish();
+}
+
 FrameArrayIterator::FrameArrayIterator(Isolate* isolate,
                                        Handle<FrameArray> array, int frame_ix)
     : isolate_(isolate), array_(array), next_frame_ix_(frame_ix) {}
@@ -680,13 +759,22 @@
 StackFrameBase* FrameArrayIterator::Frame() {
   DCHECK(HasNext());
   const int flags = array_->Flags(next_frame_ix_)->value();
-  const bool is_js_frame = (flags & FrameArray::kIsWasmFrame) == 0;
-  if (is_js_frame) {
-    js_frame_.FromFrameArray(isolate_, array_, next_frame_ix_);
-    return &js_frame_;
-  } else {
-    wasm_frame_.FromFrameArray(isolate_, array_, next_frame_ix_);
-    return &wasm_frame_;
+  switch (flags & (FrameArray::kIsWasmFrame | FrameArray::kIsAsmJsWasmFrame)) {
+    case 0:
+      // JavaScript Frame.
+      js_frame_.FromFrameArray(isolate_, array_, next_frame_ix_);
+      return &js_frame_;
+    case FrameArray::kIsWasmFrame:
+      // Wasm Frame;
+      wasm_frame_.FromFrameArray(isolate_, array_, next_frame_ix_);
+      return &wasm_frame_;
+    case FrameArray::kIsAsmJsWasmFrame:
+      // Asm.js Wasm Frame:
+      asm_wasm_frame_.FromFrameArray(isolate_, array_, next_frame_ix_);
+      return &asm_wasm_frame_;
+    default:
+      UNREACHABLE();
+      return nullptr;
   }
 }
 
@@ -864,7 +952,7 @@
     }
   }
 
-  RETURN_RESULT(isolate, builder.Finish(), Object);
+  return builder.Finish();
 }
 
 Handle<String> MessageTemplate::FormatMessage(Isolate* isolate,
diff --git a/src/messages.h b/src/messages.h
index e7bbcc3..86cc8d0 100644
--- a/src/messages.h
+++ b/src/messages.h
@@ -126,7 +126,7 @@
  public:
   virtual ~WasmStackFrame() {}
 
-  Handle<Object> GetReceiver() const override { return wasm_obj_; }
+  Handle<Object> GetReceiver() const override { return wasm_instance_; }
   Handle<Object> GetFunction() const override;
 
   Handle<Object> GetFileName() override { return Null(); }
@@ -148,20 +148,40 @@
 
   MaybeHandle<String> ToString() override;
 
- private:
-  void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
+ protected:
   Handle<Object> Null() const;
 
   Isolate* isolate_;
 
-  Handle<Object> wasm_obj_;
+  // TODO(wasm): Use proper typing.
+  Handle<Object> wasm_instance_;
   uint32_t wasm_func_index_;
   Handle<AbstractCode> code_;
   int offset_;
 
+ private:
+  void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
+
   friend class FrameArrayIterator;
 };
 
+class AsmJsWasmStackFrame : public WasmStackFrame {
+ public:
+  virtual ~AsmJsWasmStackFrame() {}
+
+  Handle<Object> GetReceiver() const override;
+  Handle<Object> GetFunction() const override;
+
+  Handle<Object> GetFileName() override;
+  Handle<Object> GetScriptNameOrSourceUrl() override;
+
+  int GetPosition() const override;
+  int GetLineNumber() override;
+  int GetColumnNumber() override;
+
+  MaybeHandle<String> ToString() override;
+};
+
 class FrameArrayIterator {
  public:
   FrameArrayIterator(Isolate* isolate, Handle<FrameArray> array,
@@ -179,6 +199,7 @@
   int next_frame_ix_;
 
   WasmStackFrame wasm_frame_;
+  AsmJsWasmStackFrame asm_wasm_frame_;
   JSStackFrame js_frame_;
 };
 
@@ -499,7 +520,8 @@
   T(UnsupportedTimeZone, "Unsupported time zone specified %")                  \
   T(ValueOutOfRange, "Value % out of range for % options property %")          \
   /* SyntaxError */                                                            \
-  T(AmbiguousExport, "Multiple star exports provide name '%'")                 \
+  T(AmbiguousExport,                                                           \
+    "The requested module contains conflicting star exports for name '%'")     \
   T(BadGetterArity, "Getter must not have any formal parameters.")             \
   T(BadSetterArity, "Setter must have exactly one formal parameter.")          \
   T(ConstructorIsAccessor, "Class constructor may not be an accessor")         \
@@ -604,7 +626,8 @@
   T(UnexpectedTokenString, "Unexpected string")                                \
   T(UnexpectedTokenRegExp, "Unexpected regular expression")                    \
   T(UnknownLabel, "Undefined label '%'")                                       \
-  T(UnresolvableExport, "Module does not provide an export named '%'")         \
+  T(UnresolvableExport,                                                        \
+    "The requested module does not provide an export named '%'")               \
   T(UnterminatedArgList, "missing ) after argument list")                      \
   T(UnterminatedRegExp, "Invalid regular expression: missing /")               \
   T(UnterminatedTemplate, "Unterminated template literal")                     \
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index f5b235d..865e64c 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -1784,13 +1784,44 @@
   addu(at, at, src.rm());  // Add base register.
 }
 
+// Helper for base-reg + upper part of offset, when offset is larger than int16.
+// Loads higher part of the offset to AT register.
+// Returns lower part of the offset to be used as offset
+// in Load/Store instructions
+int32_t Assembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) {
+  DCHECK(!src.rm().is(at));
+  int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
+  // If the highest bit of the lower part of the offset is 1, this would make
+  // the offset in the load/store instruction negative. We need to compensate
+  // for this by adding 1 to the upper part of the offset.
+  if (src.offset_ & kNegOffset) {
+    hi += 1;
+  }
+  lui(at, hi);
+  addu(at, at, src.rm());
+  return (src.offset_ & kImm16Mask);
+}
+
+// Helper for loading base-reg + upper offset's part to AT reg when we are using
+// two 32-bit loads/stores instead of one 64-bit
+int32_t Assembler::LoadUpperOffsetForTwoMemoryAccesses(const MemOperand& src) {
+  DCHECK(!src.rm().is(at));
+  if (is_int16((src.offset_ & kImm16Mask) + kIntSize)) {
+    // Only if lower part of offset + kIntSize fits in 16bits
+    return LoadRegPlusUpperOffsetPartToAt(src);
+  }
+  // In case offset's lower part + kIntSize doesn't fit in 16bits,
+  // load reg + hole offset to AT
+  LoadRegPlusOffsetToAt(src);
+  return 0;
+}
 
 void Assembler::lb(Register rd, const MemOperand& rs) {
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(LB, at, rd, 0);  // Equiv to lb(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(LB, at, rd, off16);
   }
 }
 
@@ -1799,8 +1830,8 @@
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(LBU, at, rd, 0);  // Equiv to lbu(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(LBU, at, rd, off16);
   }
 }
 
@@ -1809,8 +1840,8 @@
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(LH, at, rd, 0);  // Equiv to lh(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(LH, at, rd, off16);
   }
 }
 
@@ -1819,8 +1850,8 @@
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(LHU, at, rd, 0);  // Equiv to lhu(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(LHU, at, rd, off16);
   }
 }
 
@@ -1829,8 +1860,8 @@
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(LW, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(LW, at, rd, off16);
   }
 }
 
@@ -1855,8 +1886,8 @@
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to store.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(SB, at, rd, 0);  // Equiv to sb(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(SB, at, rd, off16);
   }
 }
 
@@ -1865,8 +1896,8 @@
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to store.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(SH, at, rd, 0);  // Equiv to sh(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(SH, at, rd, off16);
   }
 }
 
@@ -1875,8 +1906,8 @@
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to store.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(SW, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(SW, at, rd, off16);
   }
 }
 
@@ -2172,8 +2203,8 @@
   if (is_int16(src.offset_)) {
     GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
-    LoadRegPlusOffsetToAt(src);
-    GenInstrImmediate(LWC1, at, fd, 0);
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+    GenInstrImmediate(LWC1, at, fd, off16);
   }
 }
 
@@ -2190,11 +2221,11 @@
       GenInstrImmediate(LWC1, src.rm(), nextfpreg,
                         src.offset_ + Register::kExponentOffset);
     } else {  // Offset > 16 bits, use multiple instructions to load.
-      LoadRegPlusOffsetToAt(src);
-      GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
+      int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
+      GenInstrImmediate(LWC1, at, fd, off16 + Register::kMantissaOffset);
       FPURegister nextfpreg;
       nextfpreg.setcode(fd.code() + 1);
-      GenInstrImmediate(LWC1, at, nextfpreg, Register::kExponentOffset);
+      GenInstrImmediate(LWC1, at, nextfpreg, off16 + Register::kExponentOffset);
     }
   } else {
     DCHECK(IsFp64Mode() || IsFpxxMode());
@@ -2207,9 +2238,9 @@
                         src.offset_ + Register::kExponentOffset);
       mthc1(at, fd);
     } else {  // Offset > 16 bits, use multiple instructions to load.
-      LoadRegPlusOffsetToAt(src);
-      GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
-      GenInstrImmediate(LW, at, at, Register::kExponentOffset);
+      int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
+      GenInstrImmediate(LWC1, at, fd, off16 + Register::kMantissaOffset);
+      GenInstrImmediate(LW, at, at, off16 + Register::kExponentOffset);
       mthc1(at, fd);
     }
   }
@@ -2220,8 +2251,8 @@
   if (is_int16(src.offset_)) {
     GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
-    LoadRegPlusOffsetToAt(src);
-    GenInstrImmediate(SWC1, at, fd, 0);
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+    GenInstrImmediate(SWC1, at, fd, off16);
   }
 }
 
@@ -2240,11 +2271,11 @@
       GenInstrImmediate(SWC1, src.rm(), nextfpreg,
                         src.offset_ + Register::kExponentOffset);
     } else {  // Offset > 16 bits, use multiple instructions to load.
-      LoadRegPlusOffsetToAt(src);
-      GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
+      int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
+      GenInstrImmediate(SWC1, at, fd, off16 + Register::kMantissaOffset);
       FPURegister nextfpreg;
       nextfpreg.setcode(fd.code() + 1);
-      GenInstrImmediate(SWC1, at, nextfpreg, Register::kExponentOffset);
+      GenInstrImmediate(SWC1, at, nextfpreg, off16 + Register::kExponentOffset);
     }
   } else {
     DCHECK(IsFp64Mode() || IsFpxxMode());
@@ -2257,10 +2288,10 @@
       GenInstrImmediate(SW, src.rm(), at,
                         src.offset_ + Register::kExponentOffset);
     } else {  // Offset > 16 bits, use multiple instructions to load.
-      LoadRegPlusOffsetToAt(src);
-      GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
+      int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
+      GenInstrImmediate(SWC1, at, fd, off16 + Register::kMantissaOffset);
       mfhc1(t8, fd);
-      GenInstrImmediate(SW, at, t8, Register::kExponentOffset);
+      GenInstrImmediate(SW, at, t8, off16 + Register::kExponentOffset);
     }
   }
 }
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index e58abd8..1df6e3f 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -1055,7 +1055,8 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+  void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+                         int id);
 
   static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
                                        intptr_t pc_delta);
@@ -1177,6 +1178,8 @@
 
   // Helpers.
   void LoadRegPlusOffsetToAt(const MemOperand& src);
+  int32_t LoadRegPlusUpperOffsetPartToAt(const MemOperand& src);
+  int32_t LoadUpperOffsetForTwoMemoryAccesses(const MemOperand& src);
 
   // Relocation for a type-recording IC has the AST id added to it.  This
   // member variable is a way to pass the information from the call site to
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 43e6735..966214b 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -566,7 +566,7 @@
   // If either is a Smi (we know that not both are), then they can only
   // be strictly equal if the other is a HeapNumber.
   STATIC_ASSERT(kSmiTag == 0);
-  DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+  DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
   __ And(t2, lhs, Operand(rhs));
   __ JumpIfNotSmi(t2, &not_smis, t0);
   // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
@@ -1625,13 +1625,10 @@
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   __ Addu(a1, a1, Operand(2));  // a1 was a smi.
 
-  __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
-  __ JumpIfSmi(a0, &runtime);
-  __ GetObjectType(a0, a2, a2);
-  __ Branch(&runtime, ne, a2, Operand(JS_OBJECT_TYPE));
+  // Check that the last match info is a FixedArray.
+  __ lw(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
+  __ JumpIfSmi(last_match_info_elements, &runtime);
   // Check that the object has fast elements.
-  __ lw(last_match_info_elements,
-        FieldMemOperand(a0, JSArray::kElementsOffset));
   __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
   __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
   __ Branch(&runtime, ne, a0, Operand(at));
@@ -1639,7 +1636,7 @@
   // additional information.
   __ lw(a0,
         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
-  __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
+  __ Addu(a2, a1, Operand(RegExpMatchInfo::kLastMatchOverhead));
   __ sra(at, a0, kSmiTagSize);
   __ Branch(&runtime, gt, a2, Operand(at));
 
@@ -1648,28 +1645,20 @@
   // Store the capture count.
   __ sll(a2, a1, kSmiTagSize + kSmiShiftSize);  // To smi.
   __ sw(a2, FieldMemOperand(last_match_info_elements,
-                             RegExpImpl::kLastCaptureCountOffset));
+                            RegExpMatchInfo::kNumberOfCapturesOffset));
   // Store last subject and last input.
-  __ sw(subject,
-         FieldMemOperand(last_match_info_elements,
-                         RegExpImpl::kLastSubjectOffset));
+  __ sw(subject, FieldMemOperand(last_match_info_elements,
+                                 RegExpMatchInfo::kLastSubjectOffset));
   __ mov(a2, subject);
   __ RecordWriteField(last_match_info_elements,
-                      RegExpImpl::kLastSubjectOffset,
-                      subject,
-                      t3,
-                      kRAHasNotBeenSaved,
-                      kDontSaveFPRegs);
+                      RegExpMatchInfo::kLastSubjectOffset, subject, t3,
+                      kRAHasNotBeenSaved, kDontSaveFPRegs);
   __ mov(subject, a2);
-  __ sw(subject,
-         FieldMemOperand(last_match_info_elements,
-                         RegExpImpl::kLastInputOffset));
+  __ sw(subject, FieldMemOperand(last_match_info_elements,
+                                 RegExpMatchInfo::kLastInputOffset));
   __ RecordWriteField(last_match_info_elements,
-                      RegExpImpl::kLastInputOffset,
-                      subject,
-                      t3,
-                      kRAHasNotBeenSaved,
-                      kDontSaveFPRegs);
+                      RegExpMatchInfo::kLastInputOffset, subject, t3,
+                      kRAHasNotBeenSaved, kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   ExternalReference address_of_static_offsets_vector =
@@ -1681,9 +1670,8 @@
   Label next_capture, done;
   // Capture register counter starts from number of capture registers and
   // counts down until wrapping after zero.
-  __ Addu(a0,
-         last_match_info_elements,
-         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
+  __ Addu(a0, last_match_info_elements,
+          Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag));
   __ bind(&next_capture);
   __ Subu(a1, a1, Operand(1));
   __ Branch(&done, lt, a1, Operand(zero_reg));
@@ -1699,7 +1687,7 @@
   __ bind(&done);
 
   // Return last match info.
-  __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
+  __ mov(v0, last_match_info_elements);
   __ DropAndRet(4);
 
   // Do the runtime call to execute the regexp.
@@ -1917,6 +1905,7 @@
 }
 
 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+  // a0 - number of arguments
   // a1 - function
   // a3 - slot id
   // a2 - vector
@@ -1924,25 +1913,22 @@
   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
   __ Branch(miss, ne, a1, Operand(at));
 
-  __ li(a0, Operand(arg_count()));
-
   // Increment the call count for monomorphic function calls.
   IncrementCallCount(masm, a2, a3);
 
   __ mov(a2, t0);
   __ mov(a3, a1);
-  ArrayConstructorStub stub(masm->isolate(), arg_count());
+  ArrayConstructorStub stub(masm->isolate());
   __ TailCallStub(&stub);
 }
 
 
 void CallICStub::Generate(MacroAssembler* masm) {
+  // a0 - number of arguments
   // a1 - function
   // a3 - slot id (Smi)
   // a2 - vector
   Label extra_checks_or_miss, call, call_function, call_count_incremented;
-  int argc = arg_count();
-  ParameterCount actual(argc);
 
   // The checks. First, does r1 match the recorded monomorphic target?
   __ Lsa(t0, a2, a3, kPointerSizeLog2 - kSmiTagSize);
@@ -1976,9 +1962,7 @@
 
   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
                                                     tail_call_mode()),
-          RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
-          USE_DELAY_SLOT);
-  __ li(a0, Operand(argc));  // In delay slot.
+          RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
 
   __ bind(&extra_checks_or_miss);
   Label uninitialized, miss, not_allocation_site;
@@ -2019,9 +2003,7 @@
   __ bind(&call_count_incremented);
 
   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
-          RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
-          USE_DELAY_SLOT);
-  __ li(a0, Operand(argc));  // In delay slot.
+          RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
 
   __ bind(&uninitialized);
 
@@ -2050,11 +2032,15 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     CreateWeakCellStub create_stub(masm->isolate());
+    __ SmiTag(a0);
+    __ Push(a0);
     __ Push(a2, a3);
     __ Push(cp, a1);
     __ CallStub(&create_stub);
     __ Pop(cp, a1);
     __ Pop(a2, a3);
+    __ Pop(a0);
+    __ SmiUntag(a0);
   }
 
   __ Branch(&call_function);
@@ -2071,6 +2057,10 @@
 void CallICStub::GenerateMiss(MacroAssembler* masm) {
   FrameScope scope(masm, StackFrame::INTERNAL);
 
+  // Preserve the number of arguments as Smi.
+  __ SmiTag(a0);
+  __ Push(a0);
+
   // Push the receiver and the function and feedback info.
   __ Push(a1, a2, a3);
 
@@ -2079,6 +2069,10 @@
 
   // Move result to a1 and exit the internal frame.
   __ mov(a1, v0);
+
+  // Restore number of arguments.
+  __ Pop(a0);
+  __ SmiUntag(a0);
 }
 
 
@@ -3183,16 +3177,6 @@
   Label need_incremental;
   Label need_incremental_pop_scratch;
 
-  __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
-  __ lw(regs_.scratch1(),
-        MemOperand(regs_.scratch0(),
-                   MemoryChunk::kWriteBarrierCounterOffset));
-  __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
-  __ sw(regs_.scratch1(),
-         MemOperand(regs_.scratch0(),
-                    MemoryChunk::kWriteBarrierCounterOffset));
-  __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
-
   // Let's look at the color of the object:  If it is not black we don't have
   // to inform the incremental marker.
   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -3276,21 +3260,6 @@
   __ Addu(sp, sp, a1);
 }
 
-
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate());
-  stub.GenerateForTrampoline(masm);
-}
-
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate());
-  stub.GenerateForTrampoline(masm);
-}
-
-
 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(a2);
   CallICStub stub(isolate(), state());
@@ -3298,14 +3267,6 @@
 }
 
 
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
                              Register receiver_map, Register scratch1,
                              Register scratch2, bool is_polymorphic,
@@ -3392,180 +3353,12 @@
   __ Jump(t9);
 }
 
-
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // a1
-  Register name = LoadWithVectorDescriptor::NameRegister();          // a2
-  Register vector = LoadWithVectorDescriptor::VectorRegister();      // a3
-  Register slot = LoadWithVectorDescriptor::SlotRegister();          // a0
-  Register feedback = t0;
-  Register receiver_map = t1;
-  Register scratch1 = t4;
-
-  __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
-  __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
-  __ Branch(&not_array, ne, at, Operand(scratch1));
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, true, &miss);
-
-  __ bind(&not_array);
-  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
-  __ Branch(&miss, ne, at, Operand(feedback));
-  masm->isolate()->load_stub_cache()->GenerateProbe(
-      masm, receiver, name, feedback, receiver_map, scratch1, t5);
-
-  __ bind(&miss);
-  LoadIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
-  GenerateImpl(masm, false);
-}
-
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // a1
-  Register key = LoadWithVectorDescriptor::NameRegister();           // a2
-  Register vector = LoadWithVectorDescriptor::VectorRegister();      // a3
-  Register slot = LoadWithVectorDescriptor::SlotRegister();          // a0
-  Register feedback = t0;
-  Register receiver_map = t1;
-  Register scratch1 = t4;
-
-  __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
-  __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  __ bind(&try_array);
-  // Is it a fixed array?
-  __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
-  __ Branch(&not_array, ne, at, Operand(scratch1));
-  // We have a polymorphic element handler.
-  __ JumpIfNotSmi(key, &miss);
-
-  Label polymorphic, try_poly_name;
-  __ bind(&polymorphic);
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, true, &miss);
-
-  __ bind(&not_array);
-  // Is it generic?
-  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
-  __ Branch(&try_poly_name, ne, at, Operand(feedback));
-  Handle<Code> megamorphic_stub =
-      KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
-  __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
-  __ bind(&try_poly_name);
-  // We might have a name in feedback, and a fixed array in the next slot.
-  __ Branch(&miss, ne, key, Operand(feedback));
-  // If the name comparison succeeded, we know we have a fixed array with
-  // at least one map/handler pair.
-  __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
-  __ lw(feedback,
-        FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, false, &miss);
-
-  __ bind(&miss);
-  KeyedLoadIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
-  StoreICStub stub(isolate(), state());
-  stub.GenerateForTrampoline(masm);
-}
-
 void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
   KeyedStoreICStub stub(isolate(), state());
   stub.GenerateForTrampoline(masm);
 }
 
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // a1
-  Register key = StoreWithVectorDescriptor::NameRegister();           // a2
-  Register vector = StoreWithVectorDescriptor::VectorRegister();      // a3
-  Register slot = StoreWithVectorDescriptor::SlotRegister();          // t0
-  DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0));          // a0
-  Register feedback = t1;
-  Register receiver_map = t2;
-  Register scratch1 = t5;
-
-  __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
-  __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
-  __ Branch(&not_array, ne, scratch1, Operand(at));
-
-  Register scratch2 = t4;
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
-                   &miss);
-
-  __ bind(&not_array);
-  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
-  __ Branch(&miss, ne, feedback, Operand(at));
-  masm->isolate()->store_stub_cache()->GenerateProbe(
-      masm, receiver, key, feedback, receiver_map, scratch1, scratch2);
-
-  __ bind(&miss);
-  StoreIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ Branch(USE_DELAY_SLOT, &compare_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);  // In delay slot.
-}
-
 void KeyedStoreICStub::Generate(MacroAssembler* masm) {
   GenerateImpl(masm, false);
 }
@@ -3908,29 +3701,18 @@
 void ArrayConstructorStub::GenerateDispatchToArrayStub(
     MacroAssembler* masm,
     AllocationSiteOverrideMode mode) {
-  if (argument_count() == ANY) {
-    Label not_zero_case, not_one_case;
-    __ And(at, a0, a0);
-    __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
-    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+  Label not_zero_case, not_one_case;
+  __ And(at, a0, a0);
+  __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
+  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
 
-    __ bind(&not_zero_case);
-    __ Branch(&not_one_case, gt, a0, Operand(1));
-    CreateArrayDispatchOneArgument(masm, mode);
+  __ bind(&not_zero_case);
+  __ Branch(&not_one_case, gt, a0, Operand(1));
+  CreateArrayDispatchOneArgument(masm, mode);
 
-    __ bind(&not_one_case);
-    ArrayNArgumentsConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else if (argument_count() == NONE) {
-    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-  } else if (argument_count() == ONE) {
-    CreateArrayDispatchOneArgument(masm, mode);
-  } else if (argument_count() == MORE_THAN_ONE) {
-    ArrayNArgumentsConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else {
-    UNREACHABLE();
-  }
+  __ bind(&not_one_case);
+  ArrayNArgumentsConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
 }
 
 
@@ -3983,23 +3765,10 @@
 
   // Subclassing.
   __ bind(&subclassing);
-  switch (argument_count()) {
-    case ANY:
-    case MORE_THAN_ONE:
-      __ Lsa(at, sp, a0, kPointerSizeLog2);
-      __ sw(a1, MemOperand(at));
-      __ li(at, Operand(3));
-      __ addu(a0, a0, at);
-      break;
-    case NONE:
-      __ sw(a1, MemOperand(sp, 0 * kPointerSize));
-      __ li(a0, Operand(3));
-      break;
-    case ONE:
-      __ sw(a1, MemOperand(sp, 1 * kPointerSize));
-      __ li(a0, Operand(4));
-      break;
-  }
+  __ Lsa(at, sp, a0, kPointerSizeLog2);
+  __ sw(a1, MemOperand(at));
+  __ li(at, Operand(3));
+  __ addu(a0, a0, at);
   __ Push(a3, a2);
   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
 }
@@ -4260,7 +4029,7 @@
     __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
     __ sw(a1, FieldMemOperand(v0, JSArray::kPropertiesOffset));
     __ sw(a1, FieldMemOperand(v0, JSArray::kElementsOffset));
-    __ Move(a1, Smi::FromInt(0));
+    __ Move(a1, Smi::kZero);
     __ Ret(USE_DELAY_SLOT);
     __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));  // In delay slot
     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
@@ -4421,7 +4190,7 @@
       FixedArray::kHeaderSize + 2 * kPointerSize;
   // If there are no mapped parameters, we do not need the parameter_map.
   Label param_map_size;
-  DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+  DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
   __ Branch(USE_DELAY_SLOT, &param_map_size, eq, t2, Operand(zero_reg));
   __ mov(t5, zero_reg);  // In delay slot: param map size = 0 when t2 == 0.
   __ sll(t5, t2, 1);
@@ -4486,13 +4255,13 @@
   // Initialize parameter map. If there are no mapped arguments, we're done.
   Label skip_parameter_map;
   Label skip3;
-  __ Branch(&skip3, ne, t2, Operand(Smi::FromInt(0)));
+  __ Branch(&skip3, ne, t2, Operand(Smi::kZero));
   // Move backing store address to a1, because it is
   // expected there when filling in the unmapped arguments.
   __ mov(a1, t0);
   __ bind(&skip3);
 
-  __ Branch(&skip_parameter_map, eq, t2, Operand(Smi::FromInt(0)));
+  __ Branch(&skip_parameter_map, eq, t2, Operand(Smi::kZero));
 
   __ LoadRoot(t1, Heap::kSloppyArgumentsElementsMapRootIndex);
   __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset));
@@ -4537,7 +4306,7 @@
   __ sw(t3, MemOperand(t6));
   __ Addu(t5, t5, Operand(Smi::FromInt(1)));
   __ bind(&parameters_test);
-  __ Branch(&parameters_loop, ne, t1, Operand(Smi::FromInt(0)));
+  __ Branch(&parameters_loop, ne, t1, Operand(Smi::kZero));
 
   // t1 = argument count (tagged).
   __ lw(t1, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
@@ -4692,119 +4461,6 @@
 }
 
 
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register context_reg = cp;
-  Register slot_reg = a2;
-  Register value_reg = a0;
-  Register cell_reg = t0;
-  Register cell_value_reg = t1;
-  Register cell_details_reg = t2;
-  Label fast_heapobject_case, fast_smi_case, slow_case;
-
-  if (FLAG_debug_code) {
-    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-    __ Check(ne, kUnexpectedValue, value_reg, Operand(at));
-  }
-
-  // Go up context chain to the script context.
-  for (int i = 0; i < depth(); ++i) {
-    __ lw(cell_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
-    context_reg = cell_reg;
-  }
-
-  // Load the PropertyCell at the specified slot.
-  __ Lsa(at, context_reg, slot_reg, kPointerSizeLog2);
-  __ lw(cell_reg, ContextMemOperand(at, 0));
-
-  // Load PropertyDetails for the cell (actually only the cell_type and kind).
-  __ lw(cell_details_reg,
-        FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset));
-  __ SmiUntag(cell_details_reg);
-  __ And(cell_details_reg, cell_details_reg,
-         PropertyDetails::PropertyCellTypeField::kMask |
-             PropertyDetails::KindField::kMask |
-             PropertyDetails::kAttributesReadOnlyMask);
-
-  // Check if PropertyCell holds mutable data.
-  Label not_mutable_data;
-  __ Branch(&not_mutable_data, ne, cell_details_reg,
-            Operand(PropertyDetails::PropertyCellTypeField::encode(
-                        PropertyCellType::kMutable) |
-                    PropertyDetails::KindField::encode(kData)));
-  __ JumpIfSmi(value_reg, &fast_smi_case);
-  __ bind(&fast_heapobject_case);
-  __ sw(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
-  __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
-                      cell_details_reg, kRAHasNotBeenSaved, kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  // RecordWriteField clobbers the value register, so we need to reload.
-  __ Ret(USE_DELAY_SLOT);
-  __ lw(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
-  __ bind(&not_mutable_data);
-
-  // Check if PropertyCell value matches the new value (relevant for Constant,
-  // ConstantType and Undefined cells).
-  Label not_same_value;
-  __ lw(cell_value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
-  __ Branch(&not_same_value, ne, value_reg, Operand(cell_value_reg));
-  // Make sure the PropertyCell is not marked READ_ONLY.
-  __ And(at, cell_details_reg, PropertyDetails::kAttributesReadOnlyMask);
-  __ Branch(&slow_case, ne, at, Operand(zero_reg));
-  if (FLAG_debug_code) {
-    Label done;
-    // This can only be true for Constant, ConstantType and Undefined cells,
-    // because we never store the_hole via this stub.
-    __ Branch(&done, eq, cell_details_reg,
-              Operand(PropertyDetails::PropertyCellTypeField::encode(
-                          PropertyCellType::kConstant) |
-                      PropertyDetails::KindField::encode(kData)));
-    __ Branch(&done, eq, cell_details_reg,
-              Operand(PropertyDetails::PropertyCellTypeField::encode(
-                          PropertyCellType::kConstantType) |
-                      PropertyDetails::KindField::encode(kData)));
-    __ Check(eq, kUnexpectedValue, cell_details_reg,
-             Operand(PropertyDetails::PropertyCellTypeField::encode(
-                         PropertyCellType::kUndefined) |
-                     PropertyDetails::KindField::encode(kData)));
-    __ bind(&done);
-  }
-  __ Ret();
-  __ bind(&not_same_value);
-
-  // Check if PropertyCell contains data with constant type (and is not
-  // READ_ONLY).
-  __ Branch(&slow_case, ne, cell_details_reg,
-            Operand(PropertyDetails::PropertyCellTypeField::encode(
-                        PropertyCellType::kConstantType) |
-                    PropertyDetails::KindField::encode(kData)));
-
-  // Now either both old and new values must be SMIs or both must be heap
-  // objects with same map.
-  Label value_is_heap_object;
-  __ JumpIfNotSmi(value_reg, &value_is_heap_object);
-  __ JumpIfNotSmi(cell_value_reg, &slow_case);
-  // Old and new values are SMIs, no need for a write barrier here.
-  __ bind(&fast_smi_case);
-  __ Ret(USE_DELAY_SLOT);
-  __ sw(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
-  __ bind(&value_is_heap_object);
-  __ JumpIfSmi(cell_value_reg, &slow_case);
-  Register cell_value_map_reg = cell_value_reg;
-  __ lw(cell_value_map_reg,
-        FieldMemOperand(cell_value_reg, HeapObject::kMapOffset));
-  __ Branch(&fast_heapobject_case, eq, cell_value_map_reg,
-            FieldMemOperand(value_reg, HeapObject::kMapOffset));
-
-  // Fallback to the runtime.
-  __ bind(&slow_case);
-  __ SmiTag(slot_reg);
-  __ Push(slot_reg, value_reg);
-  __ TailCallRuntime(is_strict(language_mode())
-                         ? Runtime::kStoreGlobalViaContext_Strict
-                         : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
-
 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   return ref0.address() - ref1.address();
 }
@@ -5074,7 +4730,7 @@
   __ sw(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
   __ sw(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
   // should_throw_on_error -> false
-  DCHECK(Smi::FromInt(0) == nullptr);
+  DCHECK(Smi::kZero == nullptr);
   __ sw(zero_reg,
         MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
   __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
diff --git a/src/mips/interface-descriptors-mips.cc b/src/mips/interface-descriptors-mips.cc
index aed4142..486ae68 100644
--- a/src/mips/interface-descriptors-mips.cc
+++ b/src/mips/interface-descriptors-mips.cc
@@ -29,9 +29,9 @@
 const Register LoadDescriptor::NameRegister() { return a2; }
 const Register LoadDescriptor::SlotRegister() { return a0; }
 
-
 const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
 
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return t0; }
 
 const Register StoreDescriptor::ReceiverRegister() { return a1; }
 const Register StoreDescriptor::NameRegister() { return a2; }
@@ -44,10 +44,6 @@
 const Register StoreTransitionDescriptor::VectorRegister() { return a3; }
 const Register StoreTransitionDescriptor::MapRegister() { return t1; }
 
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return a2; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return a0; }
-
-
 const Register StringCompareDescriptor::LeftRegister() { return a1; }
 const Register StringCompareDescriptor::RightRegister() { return a0; }
 
@@ -160,7 +156,7 @@
 
 void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {a1, a3, a2};
+  Register registers[] = {a1, a0, a3, a2};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -209,13 +205,6 @@
 }
 
 
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a2, a1, a0};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
 void TransitionElementsKindDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {a0, a1};
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index d61717d..c3abe4f 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -500,85 +500,6 @@
 // Allocation support.
 
 
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
-                                            Register scratch,
-                                            Label* miss) {
-  Label same_contexts;
-  Register temporary = t8;
-
-  DCHECK(!holder_reg.is(scratch));
-  DCHECK(!holder_reg.is(at));
-  DCHECK(!scratch.is(at));
-
-  // Load current lexical context from the active StandardFrame, which
-  // may require crawling past STUB frames.
-  Label load_context;
-  Label has_context;
-  mov(at, fp);
-  bind(&load_context);
-  lw(scratch, MemOperand(at, CommonFrameConstants::kContextOrFrameTypeOffset));
-  // Passing temporary register, otherwise JumpIfNotSmi modifies register at.
-  JumpIfNotSmi(scratch, &has_context, temporary);
-  lw(at, MemOperand(at, CommonFrameConstants::kCallerFPOffset));
-  Branch(&load_context);
-  bind(&has_context);
-
-  // In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
-  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
-      scratch, Operand(zero_reg));
-#endif
-
-  // Load the native context of the current context.
-  lw(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
-
-  // Check the context is a native context.
-  if (emit_debug_code()) {
-    push(holder_reg);  // Temporarily save holder on the stack.
-    // Read the first word and compare to the native_context_map.
-    lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
-    LoadRoot(at, Heap::kNativeContextMapRootIndex);
-    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
-          holder_reg, Operand(at));
-    pop(holder_reg);  // Restore holder.
-  }
-
-  // Check if both contexts are the same.
-  lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-  Branch(&same_contexts, eq, scratch, Operand(at));
-
-  // Check the context is a native context.
-  if (emit_debug_code()) {
-    push(holder_reg);  // Temporarily save holder on the stack.
-    mov(holder_reg, at);  // Move at to its holding place.
-    LoadRoot(at, Heap::kNullValueRootIndex);
-    Check(ne, kJSGlobalProxyContextShouldNotBeNull,
-          holder_reg, Operand(at));
-
-    lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
-    LoadRoot(at, Heap::kNativeContextMapRootIndex);
-    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
-          holder_reg, Operand(at));
-    // Restore at is not needed. at is reloaded below.
-    pop(holder_reg);  // Restore holder.
-    // Restore at to holder's context.
-    lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-  }
-
-  // Check that the security token in the calling global object is
-  // compatible with the security token in the receiving global
-  // object.
-  int token_offset = Context::kHeaderSize +
-                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
-  lw(scratch, FieldMemOperand(scratch, token_offset));
-  lw(at, FieldMemOperand(at, token_offset));
-  Branch(miss, ne, scratch, Operand(at));
-
-  bind(&same_contexts);
-}
-
-
 // Compute the hash code from the untagged key.  This must be kept in sync with
 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
 // code-stub-hydrogen.cc
@@ -619,87 +540,6 @@
   And(reg0, reg0, Operand(0x3fffffff));
 }
 
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
-                                              Register elements,
-                                              Register key,
-                                              Register result,
-                                              Register reg0,
-                                              Register reg1,
-                                              Register reg2) {
-  // Register use:
-  //
-  // elements - holds the slow-case elements of the receiver on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // key      - holds the smi key on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  //
-  // result   - holds the result on exit if the load succeeded.
-  //            Allowed to be the same as 'key' or 'result'.
-  //            Unchanged on bailout so 'key' or 'result' can be used
-  //            in further computation.
-  //
-  // Scratch registers:
-  //
-  // reg0 - holds the untagged key on entry and holds the hash once computed.
-  //
-  // reg1 - Used to hold the capacity mask of the dictionary.
-  //
-  // reg2 - Used for the index into the dictionary.
-  // at   - Temporary (avoid MacroAssembler instructions also using 'at').
-  Label done;
-
-  GetNumberHash(reg0, reg1);
-
-  // Compute the capacity mask.
-  lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
-  sra(reg1, reg1, kSmiTagSize);
-  Subu(reg1, reg1, Operand(1));
-
-  // Generate an unrolled loop that performs a few probes before giving up.
-  for (int i = 0; i < kNumberDictionaryProbes; i++) {
-    // Use reg2 for index calculations and keep the hash intact in reg0.
-    mov(reg2, reg0);
-    // Compute the masked index: (hash + i + i * i) & mask.
-    if (i > 0) {
-      Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
-    }
-    and_(reg2, reg2, reg1);
-
-    // Scale the index by multiplying by the element size.
-    DCHECK(SeededNumberDictionary::kEntrySize == 3);
-    Lsa(reg2, reg2, reg2, 1);  // reg2 = reg2 * 3.
-
-    // Check if the key is identical to the name.
-    Lsa(reg2, elements, reg2, kPointerSizeLog2);
-
-    lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
-    if (i != kNumberDictionaryProbes - 1) {
-      Branch(&done, eq, key, Operand(at));
-    } else {
-      Branch(miss, ne, key, Operand(at));
-    }
-  }
-
-  bind(&done);
-  // Check that the value is a field property.
-  // reg2: elements + (index * kPointerSize).
-  const int kDetailsOffset =
-      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
-  DCHECK_EQ(DATA, 0);
-  And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
-  Branch(miss, ne, at, Operand(zero_reg));
-
-  // Get the value at the masked, scaled index and return.
-  const int kValueOffset =
-      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
-  lw(result, FieldMemOperand(reg2, kValueOffset));
-}
-
-
 // ---------------------------------------------------------------------------
 // Instruction macros.
 
@@ -1217,26 +1057,18 @@
 void MacroAssembler::ByteSwapSigned(Register dest, Register src,
                                     int operand_size) {
   DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
-  if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
-    if (operand_size == 2) {
-      seh(src, src);
-    } else if (operand_size == 1) {
-      seb(src, src);
-    }
-    // No need to do any preparation if operand_size is 4
 
+  if (operand_size == 2) {
+    Seh(src, src);
+  } else if (operand_size == 1) {
+    Seb(src, src);
+  }
+  // No need to do any preparation if operand_size is 4
+
+  if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
     wsbh(dest, src);
     rotr(dest, dest, 16);
   } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
-    if (operand_size == 1) {
-      sll(src, src, 24);
-      sra(src, src, 24);
-    } else if (operand_size == 2) {
-      sll(src, src, 16);
-      sra(src, src, 16);
-    }
-    // No need to do any preparation if operand_size is 4
-
     Register tmp = t0;
     Register tmp2 = t1;
 
@@ -1917,6 +1749,26 @@
   }
 }
 
+void MacroAssembler::Seb(Register rd, Register rt) {
+  if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+    seb(rd, rt);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson));
+    sll(rd, rt, 24);
+    sra(rd, rd, 24);
+  }
+}
+
+void MacroAssembler::Seh(Register rd, Register rt) {
+  if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+    seh(rd, rt);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson));
+    sll(rd, rt, 16);
+    sra(rd, rd, 16);
+  }
+}
+
 void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
   if (IsMipsArchVariant(kMips32r6)) {
     // r6 neg_s changes the sign for NaN-like operands as well.
@@ -4654,75 +4506,6 @@
   STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
 }
 
-
-void MacroAssembler::CopyBytes(Register src,
-                               Register dst,
-                               Register length,
-                               Register scratch) {
-  Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
-
-  // Align src before copying in word size chunks.
-  Branch(&byte_loop, le, length, Operand(kPointerSize));
-  bind(&align_loop_1);
-  And(scratch, src, kPointerSize - 1);
-  Branch(&word_loop, eq, scratch, Operand(zero_reg));
-  lbu(scratch, MemOperand(src));
-  Addu(src, src, 1);
-  sb(scratch, MemOperand(dst));
-  Addu(dst, dst, 1);
-  Subu(length, length, Operand(1));
-  Branch(&align_loop_1, ne, length, Operand(zero_reg));
-
-  // Copy bytes in word size chunks.
-  bind(&word_loop);
-  if (emit_debug_code()) {
-    And(scratch, src, kPointerSize - 1);
-    Assert(eq, kExpectingAlignmentForCopyBytes,
-        scratch, Operand(zero_reg));
-  }
-  Branch(&byte_loop, lt, length, Operand(kPointerSize));
-  lw(scratch, MemOperand(src));
-  Addu(src, src, kPointerSize);
-
-  // TODO(kalmard) check if this can be optimized to use sw in most cases.
-  // Can't use unaligned access - copy byte by byte.
-  if (kArchEndian == kLittle) {
-    sb(scratch, MemOperand(dst, 0));
-    srl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 1));
-    srl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 2));
-    srl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 3));
-  } else {
-    sb(scratch, MemOperand(dst, 3));
-    srl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 2));
-    srl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 1));
-    srl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 0));
-  }
-
-  Addu(dst, dst, 4);
-
-  Subu(length, length, Operand(kPointerSize));
-  Branch(&word_loop);
-
-  // Copy the last bytes if any left.
-  bind(&byte_loop);
-  Branch(&done, eq, length, Operand(zero_reg));
-  bind(&byte_loop_1);
-  lbu(scratch, MemOperand(src));
-  Addu(src, src, 1);
-  sb(scratch, MemOperand(dst));
-  Addu(dst, dst, 1);
-  Subu(length, length, Operand(1));
-  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
-  bind(&done);
-}
-
-
 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
                                                 Register end_address,
                                                 Register filler) {
@@ -4735,20 +4518,6 @@
   Branch(&loop, ult, current_address, Operand(end_address));
 }
 
-
-void MacroAssembler::CheckFastElements(Register map,
-                                       Register scratch,
-                                       Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  STATIC_ASSERT(FAST_ELEMENTS == 2);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
-  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
-  Branch(fail, hi, scratch,
-         Operand(Map::kMaximumBitField2FastHoleyElementValue));
-}
-
-
 void MacroAssembler::CheckFastObjectElements(Register map,
                                              Register scratch,
                                              Label* fail) {
@@ -5344,18 +5113,6 @@
   return has_frame_ || !stub->SometimesSetsUpAFrame();
 }
 
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
-  // If the hash field contains an array index pick it out. The assert checks
-  // that the constants for the maximum number of digits for an array index
-  // cached in the hash field and the number of bits reserved for it does not
-  // conflict.
-  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
-         (1 << String::kArrayIndexValueBits));
-  DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
-}
-
-
 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
                                                FPURegister result,
                                                Register scratch1,
@@ -6473,7 +6230,7 @@
   lw(at, FieldMemOperand(string, String::kLengthOffset));
   Check(lt, kIndexIsTooLarge, index, Operand(at));
 
-  DCHECK(Smi::FromInt(0) == 0);
+  DCHECK(Smi::kZero == 0);
   Check(ge, kIndexIsNegative, index, Operand(zero_reg));
 
   SmiUntag(index, index);
@@ -6733,7 +6490,7 @@
 
   // For all objects but the receiver, check that the cache is empty.
   EnumLength(a3, a1);
-  Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
+  Branch(call_runtime, ne, a3, Operand(Smi::kZero));
 
   bind(&start);
 
@@ -6803,13 +6560,14 @@
   ExternalReference new_space_allocation_top_adr =
       ExternalReference::new_space_allocation_top_address(isolate());
   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
-  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+  const int kMementoLastWordOffset =
+      kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
 
   // Bail out if the object is not in new space.
   JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
   // If the object is in new space, we need to check whether it is on the same
   // page as the current top.
-  Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  Addu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
   li(at, Operand(new_space_allocation_top_adr));
   lw(at, MemOperand(at));
   Xor(scratch_reg, scratch_reg, Operand(at));
@@ -6818,7 +6576,7 @@
   // The object is on a different page than allocation top. Bail out if the
   // object sits on the page boundary as no memento can follow and we cannot
   // touch the memory following it.
-  Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  Addu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
   Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
   And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
   Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
@@ -6827,10 +6585,10 @@
   // If top is on the same page as the current object, we need to check whether
   // we are below top.
   bind(&top_check);
-  Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  Addu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
   li(at, Operand(new_space_allocation_top_adr));
   lw(at, MemOperand(at));
-  Branch(no_memento_found, gt, scratch_reg, Operand(at));
+  Branch(no_memento_found, ge, scratch_reg, Operand(at));
   // Memento map check.
   bind(&map_check);
   lw(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 4024e52..824a3bf 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -495,24 +495,8 @@
   // ---------------------------------------------------------------------------
   // Inline caching support.
 
-  // Generate code for checking access rights - used for security checks
-  // on access to global objects across environments. The holder register
-  // is left untouched, whereas both scratch registers are clobbered.
-  void CheckAccessGlobalProxy(Register holder_reg,
-                              Register scratch,
-                              Label* miss);
-
   void GetNumberHash(Register reg0, Register scratch);
 
-  void LoadFromNumberDictionary(Label* miss,
-                                Register elements,
-                                Register key,
-                                Register result,
-                                Register reg0,
-                                Register reg1,
-                                Register reg2);
-
-
   inline void MarkCode(NopMarkerTypes type) {
     nop(type);
   }
@@ -842,6 +826,8 @@
   // MIPS32 R2 instruction macro.
   void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
   void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+  void Seb(Register rd, Register rt);
+  void Seh(Register rd, Register rt);
   void Neg_s(FPURegister fd, FPURegister fs);
   void Neg_d(FPURegister fd, FPURegister fs);
 
@@ -1141,14 +1127,6 @@
   // Must preserve the result register.
   void PopStackHandler();
 
-  // Copies a number of bytes from src to dst. All registers are clobbered. On
-  // exit src and dst will point to the place just after where the last byte was
-  // read or written and length will be zero.
-  void CopyBytes(Register src,
-                 Register dst,
-                 Register length,
-                 Register scratch);
-
   // Initialize fields with filler values.  Fields starting at |current_address|
   // not including |end_address| are overwritten with the value in |filler|.  At
   // the end the loop, |current_address| takes the value of |end_address|.
@@ -1180,12 +1158,6 @@
         FieldMemOperand(object_map, Map::kInstanceTypeOffset));
   }
 
-  // Check if a map for a JSObject indicates that the object has fast elements.
-  // Jump to the specified label if it does not.
-  void CheckFastElements(Register map,
-                         Register scratch,
-                         Label* fail);
-
   // Check if a map for a JSObject indicates that the object can have both smi
   // and HeapObject elements.  Jump to the specified label if it does not.
   void CheckFastObjectElements(Register map,
@@ -1276,13 +1248,6 @@
     return eq;
   }
 
-
-  // Picks out an array index from the hash field.
-  // Register use:
-  //   hash - holds the index's hash. Clobbered.
-  //   index - holds the overwritten index on exit.
-  void IndexFromHash(Register hash, Register index);
-
   // Get the number of least significant bits from a register.
   void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
   void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
diff --git a/src/mips64/assembler-mips64.h b/src/mips64/assembler-mips64.h
index dc3198c..056cc42 100644
--- a/src/mips64/assembler-mips64.h
+++ b/src/mips64/assembler-mips64.h
@@ -1117,7 +1117,8 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+  void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+                         int id);
 
   static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
                                        intptr_t pc_delta);
diff --git a/src/mips64/code-stubs-mips64.cc b/src/mips64/code-stubs-mips64.cc
index e089b54..97f5b73 100644
--- a/src/mips64/code-stubs-mips64.cc
+++ b/src/mips64/code-stubs-mips64.cc
@@ -563,7 +563,7 @@
   // If either is a Smi (we know that not both are), then they can only
   // be strictly equal if the other is a HeapNumber.
   STATIC_ASSERT(kSmiTag == 0);
-  DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+  DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
   __ And(a6, lhs, Operand(rhs));
   __ JumpIfNotSmi(a6, &not_smis, a4);
   // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
@@ -1625,13 +1625,10 @@
   __ Daddu(a1, a1, Operand(1));
   __ dsll(a1, a1, 1);  // Multiply by 2.
 
-  __ ld(a0, MemOperand(sp, kLastMatchInfoOffset));
-  __ JumpIfSmi(a0, &runtime);
-  __ GetObjectType(a0, a2, a2);
-  __ Branch(&runtime, ne, a2, Operand(JS_OBJECT_TYPE));
+  // Check that the last match info is a FixedArray.
+  __ ld(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
+  __ JumpIfSmi(last_match_info_elements, &runtime);
   // Check that the object has fast elements.
-  __ ld(last_match_info_elements,
-        FieldMemOperand(a0, JSArray::kElementsOffset));
   __ ld(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
   __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
   __ Branch(&runtime, ne, a0, Operand(at));
@@ -1639,7 +1636,7 @@
   // additional information.
   __ ld(a0,
         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
-  __ Daddu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
+  __ Daddu(a2, a1, Operand(RegExpMatchInfo::kLastMatchOverhead));
 
   __ SmiUntag(at, a0);
   __ Branch(&runtime, gt, a2, Operand(at));
@@ -1649,28 +1646,20 @@
   // Store the capture count.
   __ SmiTag(a2, a1);  // To smi.
   __ sd(a2, FieldMemOperand(last_match_info_elements,
-                             RegExpImpl::kLastCaptureCountOffset));
+                            RegExpMatchInfo::kNumberOfCapturesOffset));
   // Store last subject and last input.
-  __ sd(subject,
-         FieldMemOperand(last_match_info_elements,
-                         RegExpImpl::kLastSubjectOffset));
+  __ sd(subject, FieldMemOperand(last_match_info_elements,
+                                 RegExpMatchInfo::kLastSubjectOffset));
   __ mov(a2, subject);
   __ RecordWriteField(last_match_info_elements,
-                      RegExpImpl::kLastSubjectOffset,
-                      subject,
-                      a7,
-                      kRAHasNotBeenSaved,
-                      kDontSaveFPRegs);
+                      RegExpMatchInfo::kLastSubjectOffset, subject, a7,
+                      kRAHasNotBeenSaved, kDontSaveFPRegs);
   __ mov(subject, a2);
-  __ sd(subject,
-         FieldMemOperand(last_match_info_elements,
-                         RegExpImpl::kLastInputOffset));
+  __ sd(subject, FieldMemOperand(last_match_info_elements,
+                                 RegExpMatchInfo::kLastInputOffset));
   __ RecordWriteField(last_match_info_elements,
-                      RegExpImpl::kLastInputOffset,
-                      subject,
-                      a7,
-                      kRAHasNotBeenSaved,
-                      kDontSaveFPRegs);
+                      RegExpMatchInfo::kLastInputOffset, subject, a7,
+                      kRAHasNotBeenSaved, kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   ExternalReference address_of_static_offsets_vector =
@@ -1682,9 +1671,8 @@
   Label next_capture, done;
   // Capture register counter starts from number of capture registers and
   // counts down until wrapping after zero.
-  __ Daddu(a0,
-         last_match_info_elements,
-         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
+  __ Daddu(a0, last_match_info_elements,
+           Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag));
   __ bind(&next_capture);
   __ Dsubu(a1, a1, Operand(1));
   __ Branch(&done, lt, a1, Operand(zero_reg));
@@ -1700,7 +1688,7 @@
   __ bind(&done);
 
   // Return last match info.
-  __ ld(v0, MemOperand(sp, kLastMatchInfoOffset));
+  __ mov(v0, last_match_info_elements);
   __ DropAndRet(4);
 
   // Do the runtime call to execute the regexp.
@@ -1964,6 +1952,7 @@
 }
 
 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+  // a0 - number of arguments
   // a1 - function
   // a3 - slot id
   // a2 - vector
@@ -1971,25 +1960,22 @@
   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
   __ Branch(miss, ne, a1, Operand(at));
 
-  __ li(a0, Operand(arg_count()));
-
   // Increment the call count for monomorphic function calls.
   IncrementCallCount(masm, a2, a3);
 
   __ mov(a2, a4);
   __ mov(a3, a1);
-  ArrayConstructorStub stub(masm->isolate(), arg_count());
+  ArrayConstructorStub stub(masm->isolate());
   __ TailCallStub(&stub);
 }
 
 
 void CallICStub::Generate(MacroAssembler* masm) {
+  // a0 - number of arguments
   // a1 - function
   // a3 - slot id (Smi)
   // a2 - vector
   Label extra_checks_or_miss, call, call_function, call_count_incremented;
-  int argc = arg_count();
-  ParameterCount actual(argc);
 
   // The checks. First, does r1 match the recorded monomorphic target?
   __ dsrl(a4, a3, 32 - kPointerSizeLog2);
@@ -2023,9 +2009,7 @@
 
   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
                                                     tail_call_mode()),
-          RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
-          USE_DELAY_SLOT);
-  __ li(a0, Operand(argc));  // In delay slot.
+          RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
 
   __ bind(&extra_checks_or_miss);
   Label uninitialized, miss, not_allocation_site;
@@ -2067,9 +2051,7 @@
   __ bind(&call_count_incremented);
 
   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
-          RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
-          USE_DELAY_SLOT);
-  __ li(a0, Operand(argc));  // In delay slot.
+          RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
 
   __ bind(&uninitialized);
 
@@ -2098,11 +2080,15 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     CreateWeakCellStub create_stub(masm->isolate());
+    __ SmiTag(a0);
+    __ Push(a0);
     __ Push(a2, a3);
     __ Push(cp, a1);
     __ CallStub(&create_stub);
     __ Pop(cp, a1);
     __ Pop(a2, a3);
+    __ Pop(a0);
+    __ SmiUntag(a0);
   }
 
   __ Branch(&call_function);
@@ -2119,6 +2105,10 @@
 void CallICStub::GenerateMiss(MacroAssembler* masm) {
   FrameScope scope(masm, StackFrame::INTERNAL);
 
+  // Preserve number of arguments as Smi.
+  __ SmiTag(a0);
+  __ Push(a0);
+
   // Push the receiver and the function and feedback info.
   __ Push(a1, a2, a3);
 
@@ -2127,6 +2117,10 @@
 
   // Move result to a1 and exit the internal frame.
   __ mov(a1, v0);
+
+  // Restore number of arguments.
+  __ Pop(a0);
+  __ SmiUntag(a0);
 }
 
 
@@ -3186,16 +3180,6 @@
   Label need_incremental;
   Label need_incremental_pop_scratch;
 
-  __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
-  __ ld(regs_.scratch1(),
-        MemOperand(regs_.scratch0(),
-                   MemoryChunk::kWriteBarrierCounterOffset));
-  __ Dsubu(regs_.scratch1(), regs_.scratch1(), Operand(1));
-  __ sd(regs_.scratch1(),
-         MemOperand(regs_.scratch0(),
-                    MemoryChunk::kWriteBarrierCounterOffset));
-  __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
-
   // Let's look at the color of the object:  If it is not black we don't have
   // to inform the incremental marker.
   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -3279,21 +3263,6 @@
   __ Daddu(sp, sp, a1);
 }
 
-
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate());
-  stub.GenerateForTrampoline(masm);
-}
-
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate());
-  stub.GenerateForTrampoline(masm);
-}
-
-
 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(a2);
   CallICStub stub(isolate(), state());
@@ -3301,14 +3270,6 @@
 }
 
 
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
                              Register receiver_map, Register scratch1,
                              Register scratch2, bool is_polymorphic,
@@ -3395,182 +3356,12 @@
   __ Jump(t9);
 }
 
-
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // a1
-  Register name = LoadWithVectorDescriptor::NameRegister();          // a2
-  Register vector = LoadWithVectorDescriptor::VectorRegister();      // a3
-  Register slot = LoadWithVectorDescriptor::SlotRegister();          // a0
-  Register feedback = a4;
-  Register receiver_map = a5;
-  Register scratch1 = a6;
-
-  __ SmiScale(feedback, slot, kPointerSizeLog2);
-  __ Daddu(feedback, vector, Operand(feedback));
-  __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
-  __ Branch(&not_array, ne, scratch1, Operand(at));
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, true, &miss);
-
-  __ bind(&not_array);
-  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
-  __ Branch(&miss, ne, feedback, Operand(at));
-  masm->isolate()->load_stub_cache()->GenerateProbe(
-      masm, receiver, name, feedback, receiver_map, scratch1, a7);
-
-  __ bind(&miss);
-  LoadIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ Branch(&compare_map);
-}
-
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
-  GenerateImpl(masm, false);
-}
-
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // a1
-  Register key = LoadWithVectorDescriptor::NameRegister();           // a2
-  Register vector = LoadWithVectorDescriptor::VectorRegister();      // a3
-  Register slot = LoadWithVectorDescriptor::SlotRegister();          // a0
-  Register feedback = a4;
-  Register receiver_map = a5;
-  Register scratch1 = a6;
-
-  __ SmiScale(feedback, slot, kPointerSizeLog2);
-  __ Daddu(feedback, vector, Operand(feedback));
-  __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  __ bind(&try_array);
-  // Is it a fixed array?
-  __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
-  __ Branch(&not_array, ne, scratch1, Operand(at));
-  // We have a polymorphic element handler.
-  __ JumpIfNotSmi(key, &miss);
-
-  Label polymorphic, try_poly_name;
-  __ bind(&polymorphic);
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, true, &miss);
-
-  __ bind(&not_array);
-  // Is it generic?
-  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
-  __ Branch(&try_poly_name, ne, feedback, Operand(at));
-  Handle<Code> megamorphic_stub =
-      KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
-  __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
-  __ bind(&try_poly_name);
-  // We might have a name in feedback, and a fixed array in the next slot.
-  __ Branch(&miss, ne, key, Operand(feedback));
-  // If the name comparison succeeded, we know we have a fixed array with
-  // at least one map/handler pair.
-  __ SmiScale(feedback, slot, kPointerSizeLog2);
-  __ Daddu(feedback, vector, Operand(feedback));
-  __ ld(feedback,
-        FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, false, &miss);
-
-  __ bind(&miss);
-  KeyedLoadIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ Branch(&compare_map);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
-  StoreICStub stub(isolate(), state());
-  stub.GenerateForTrampoline(masm);
-}
-
 void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
   KeyedStoreICStub stub(isolate(), state());
   stub.GenerateForTrampoline(masm);
 }
 
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // a1
-  Register key = StoreWithVectorDescriptor::NameRegister();           // a2
-  Register vector = StoreWithVectorDescriptor::VectorRegister();      // a3
-  Register slot = StoreWithVectorDescriptor::SlotRegister();          // a4
-  DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0));          // a0
-  Register feedback = a5;
-  Register receiver_map = a6;
-  Register scratch1 = a7;
-
-  __ SmiScale(scratch1, slot, kPointerSizeLog2);
-  __ Daddu(feedback, vector, Operand(scratch1));
-  __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ Branch(&not_array, ne, scratch1, Heap::kFixedArrayMapRootIndex);
-
-  Register scratch2 = t0;
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
-                   &miss);
-
-  __ bind(&not_array);
-  __ Branch(&miss, ne, feedback, Heap::kmegamorphic_symbolRootIndex);
-  masm->isolate()->store_stub_cache()->GenerateProbe(
-      masm, receiver, key, feedback, receiver_map, scratch1, scratch2);
-
-  __ bind(&miss);
-  StoreIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ Branch(USE_DELAY_SLOT, &compare_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);  // In delay slot.
-}
-
 void KeyedStoreICStub::Generate(MacroAssembler* masm) {
   GenerateImpl(masm, false);
 }
@@ -3913,29 +3704,18 @@
 void ArrayConstructorStub::GenerateDispatchToArrayStub(
     MacroAssembler* masm,
     AllocationSiteOverrideMode mode) {
-  if (argument_count() == ANY) {
-    Label not_zero_case, not_one_case;
-    __ And(at, a0, a0);
-    __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
-    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+  Label not_zero_case, not_one_case;
+  __ And(at, a0, a0);
+  __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
+  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
 
-    __ bind(&not_zero_case);
-    __ Branch(&not_one_case, gt, a0, Operand(1));
-    CreateArrayDispatchOneArgument(masm, mode);
+  __ bind(&not_zero_case);
+  __ Branch(&not_one_case, gt, a0, Operand(1));
+  CreateArrayDispatchOneArgument(masm, mode);
 
-    __ bind(&not_one_case);
-    ArrayNArgumentsConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else if (argument_count() == NONE) {
-    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-  } else if (argument_count() == ONE) {
-    CreateArrayDispatchOneArgument(masm, mode);
-  } else if (argument_count() == MORE_THAN_ONE) {
-    ArrayNArgumentsConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else {
-    UNREACHABLE();
-  }
+  __ bind(&not_one_case);
+  ArrayNArgumentsConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
 }
 
 
@@ -3988,23 +3768,10 @@
 
   // Subclassing.
   __ bind(&subclassing);
-  switch (argument_count()) {
-    case ANY:
-    case MORE_THAN_ONE:
-      __ Dlsa(at, sp, a0, kPointerSizeLog2);
-      __ sd(a1, MemOperand(at));
-      __ li(at, Operand(3));
-      __ Daddu(a0, a0, at);
-      break;
-    case NONE:
-      __ sd(a1, MemOperand(sp, 0 * kPointerSize));
-      __ li(a0, Operand(3));
-      break;
-    case ONE:
-      __ sd(a1, MemOperand(sp, 1 * kPointerSize));
-      __ li(a0, Operand(4));
-      break;
-  }
+  __ Dlsa(at, sp, a0, kPointerSizeLog2);
+  __ sd(a1, MemOperand(at));
+  __ li(at, Operand(3));
+  __ Daddu(a0, a0, at);
   __ Push(a3, a2);
   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
 }
@@ -4267,7 +4034,7 @@
     __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
     __ sd(a1, FieldMemOperand(v0, JSArray::kPropertiesOffset));
     __ sd(a1, FieldMemOperand(v0, JSArray::kElementsOffset));
-    __ Move(a1, Smi::FromInt(0));
+    __ Move(a1, Smi::kZero);
     __ Ret(USE_DELAY_SLOT);
     __ sd(a1, FieldMemOperand(v0, JSArray::kLengthOffset));  // In delay slot
     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
@@ -4434,7 +4201,7 @@
       FixedArray::kHeaderSize + 2 * kPointerSize;
   // If there are no mapped parameters, we do not need the parameter_map.
   Label param_map_size;
-  DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+  DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
   __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a6, Operand(zero_reg));
   __ mov(t1, zero_reg);  // In delay slot: param map size = 0 when a6 == 0.
   __ SmiScale(t1, a6, kPointerSizeLog2);
@@ -4500,13 +4267,13 @@
   // Initialize parameter map. If there are no mapped arguments, we're done.
   Label skip_parameter_map;
   Label skip3;
-  __ Branch(&skip3, ne, a6, Operand(Smi::FromInt(0)));
+  __ Branch(&skip3, ne, a6, Operand(Smi::kZero));
   // Move backing store address to a1, because it is
   // expected there when filling in the unmapped arguments.
   __ mov(a1, a4);
   __ bind(&skip3);
 
-  __ Branch(&skip_parameter_map, eq, a6, Operand(Smi::FromInt(0)));
+  __ Branch(&skip_parameter_map, eq, a6, Operand(Smi::kZero));
 
   __ LoadRoot(a5, Heap::kSloppyArgumentsElementsMapRootIndex);
   __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
@@ -4553,7 +4320,7 @@
   __ sd(a7, MemOperand(t2));
   __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
   __ bind(&parameters_test);
-  __ Branch(&parameters_loop, ne, a5, Operand(Smi::FromInt(0)));
+  __ Branch(&parameters_loop, ne, a5, Operand(Smi::kZero));
 
   // Restore t1 = argument count (tagged).
   __ ld(a5, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
@@ -4714,119 +4481,6 @@
 }
 
 
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register context_reg = cp;
-  Register slot_reg = a2;
-  Register value_reg = a0;
-  Register cell_reg = a4;
-  Register cell_value_reg = a5;
-  Register cell_details_reg = a6;
-  Label fast_heapobject_case, fast_smi_case, slow_case;
-
-  if (FLAG_debug_code) {
-    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-    __ Check(ne, kUnexpectedValue, value_reg, Operand(at));
-  }
-
-  // Go up context chain to the script context.
-  for (int i = 0; i < depth(); ++i) {
-    __ ld(cell_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
-    context_reg = cell_reg;
-  }
-
-  // Load the PropertyCell at the specified slot.
-  __ Dlsa(at, context_reg, slot_reg, kPointerSizeLog2);
-  __ ld(cell_reg, ContextMemOperand(at, 0));
-
-  // Load PropertyDetails for the cell (actually only the cell_type and kind).
-  __ ld(cell_details_reg,
-        FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset));
-  __ SmiUntag(cell_details_reg);
-  __ And(cell_details_reg, cell_details_reg,
-         PropertyDetails::PropertyCellTypeField::kMask |
-             PropertyDetails::KindField::kMask |
-             PropertyDetails::kAttributesReadOnlyMask);
-
-  // Check if PropertyCell holds mutable data.
-  Label not_mutable_data;
-  __ Branch(&not_mutable_data, ne, cell_details_reg,
-            Operand(PropertyDetails::PropertyCellTypeField::encode(
-                        PropertyCellType::kMutable) |
-                    PropertyDetails::KindField::encode(kData)));
-  __ JumpIfSmi(value_reg, &fast_smi_case);
-  __ bind(&fast_heapobject_case);
-  __ sd(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
-  __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
-                      cell_details_reg, kRAHasNotBeenSaved, kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  // RecordWriteField clobbers the value register, so we need to reload.
-  __ Ret(USE_DELAY_SLOT);
-  __ ld(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
-  __ bind(&not_mutable_data);
-
-  // Check if PropertyCell value matches the new value (relevant for Constant,
-  // ConstantType and Undefined cells).
-  Label not_same_value;
-  __ ld(cell_value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
-  __ Branch(&not_same_value, ne, value_reg, Operand(cell_value_reg));
-  // Make sure the PropertyCell is not marked READ_ONLY.
-  __ And(at, cell_details_reg, PropertyDetails::kAttributesReadOnlyMask);
-  __ Branch(&slow_case, ne, at, Operand(zero_reg));
-  if (FLAG_debug_code) {
-    Label done;
-    // This can only be true for Constant, ConstantType and Undefined cells,
-    // because we never store the_hole via this stub.
-    __ Branch(&done, eq, cell_details_reg,
-              Operand(PropertyDetails::PropertyCellTypeField::encode(
-                          PropertyCellType::kConstant) |
-                      PropertyDetails::KindField::encode(kData)));
-    __ Branch(&done, eq, cell_details_reg,
-              Operand(PropertyDetails::PropertyCellTypeField::encode(
-                          PropertyCellType::kConstantType) |
-                      PropertyDetails::KindField::encode(kData)));
-    __ Check(eq, kUnexpectedValue, cell_details_reg,
-             Operand(PropertyDetails::PropertyCellTypeField::encode(
-                         PropertyCellType::kUndefined) |
-                     PropertyDetails::KindField::encode(kData)));
-    __ bind(&done);
-  }
-  __ Ret();
-  __ bind(&not_same_value);
-
-  // Check if PropertyCell contains data with constant type (and is not
-  // READ_ONLY).
-  __ Branch(&slow_case, ne, cell_details_reg,
-            Operand(PropertyDetails::PropertyCellTypeField::encode(
-                        PropertyCellType::kConstantType) |
-                    PropertyDetails::KindField::encode(kData)));
-
-  // Now either both old and new values must be SMIs or both must be heap
-  // objects with same map.
-  Label value_is_heap_object;
-  __ JumpIfNotSmi(value_reg, &value_is_heap_object);
-  __ JumpIfNotSmi(cell_value_reg, &slow_case);
-  // Old and new values are SMIs, no need for a write barrier here.
-  __ bind(&fast_smi_case);
-  __ Ret(USE_DELAY_SLOT);
-  __ sd(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
-  __ bind(&value_is_heap_object);
-  __ JumpIfSmi(cell_value_reg, &slow_case);
-  Register cell_value_map_reg = cell_value_reg;
-  __ ld(cell_value_map_reg,
-        FieldMemOperand(cell_value_reg, HeapObject::kMapOffset));
-  __ Branch(&fast_heapobject_case, eq, cell_value_map_reg,
-            FieldMemOperand(value_reg, HeapObject::kMapOffset));
-
-  // Fallback to the runtime.
-  __ bind(&slow_case);
-  __ SmiTag(slot_reg);
-  __ Push(slot_reg, value_reg);
-  __ TailCallRuntime(is_strict(language_mode())
-                         ? Runtime::kStoreGlobalViaContext_Strict
-                         : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
-
 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   int64_t offset = (ref0.address() - ref1.address());
   DCHECK(static_cast<int>(offset) == offset);
@@ -5100,7 +4754,7 @@
   __ sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
   __ sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
   // should_throw_on_error -> false
-  DCHECK(Smi::FromInt(0) == nullptr);
+  DCHECK(Smi::kZero == nullptr);
   __ sd(zero_reg,
         MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
   __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
diff --git a/src/mips64/interface-descriptors-mips64.cc b/src/mips64/interface-descriptors-mips64.cc
index e5b9c2e..c6a917f 100644
--- a/src/mips64/interface-descriptors-mips64.cc
+++ b/src/mips64/interface-descriptors-mips64.cc
@@ -29,9 +29,9 @@
 const Register LoadDescriptor::NameRegister() { return a2; }
 const Register LoadDescriptor::SlotRegister() { return a0; }
 
-
 const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
 
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return a4; }
 
 const Register StoreDescriptor::ReceiverRegister() { return a1; }
 const Register StoreDescriptor::NameRegister() { return a2; }
@@ -44,10 +44,6 @@
 const Register StoreTransitionDescriptor::VectorRegister() { return a3; }
 const Register StoreTransitionDescriptor::MapRegister() { return a5; }
 
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return a2; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return a0; }
-
-
 const Register StringCompareDescriptor::LeftRegister() { return a1; }
 const Register StringCompareDescriptor::RightRegister() { return a0; }
 
@@ -153,7 +149,7 @@
 
 void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {a1, a3, a2};
+  Register registers[] = {a1, a0, a3, a2};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -209,13 +205,6 @@
 }
 
 
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a2, a1, a0};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
 void TransitionElementsKindDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {a0, a1};
diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc
index dd12f9b..a3ab4a8 100644
--- a/src/mips64/macro-assembler-mips64.cc
+++ b/src/mips64/macro-assembler-mips64.cc
@@ -517,85 +517,6 @@
 // Allocation support.
 
 
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
-                                            Register scratch,
-                                            Label* miss) {
-  Label same_contexts;
-  Register temporary = t8;
-
-  DCHECK(!holder_reg.is(scratch));
-  DCHECK(!holder_reg.is(at));
-  DCHECK(!scratch.is(at));
-
-  // Load current lexical context from the active StandardFrame, which
-  // may require crawling past STUB frames.
-  Label load_context;
-  Label has_context;
-  mov(at, fp);
-  bind(&load_context);
-  ld(scratch, MemOperand(at, CommonFrameConstants::kContextOrFrameTypeOffset));
-  // Passing temporary register, otherwise JumpIfNotSmi modifies register at.
-  JumpIfNotSmi(scratch, &has_context, temporary);
-  ld(at, MemOperand(at, CommonFrameConstants::kCallerFPOffset));
-  Branch(&load_context);
-  bind(&has_context);
-
-  // In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
-  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
-      scratch, Operand(zero_reg));
-#endif
-
-  // Load the native context of the current context.
-  ld(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
-
-  // Check the context is a native context.
-  if (emit_debug_code()) {
-    push(holder_reg);  // Temporarily save holder on the stack.
-    // Read the first word and compare to the native_context_map.
-    ld(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
-    LoadRoot(at, Heap::kNativeContextMapRootIndex);
-    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
-          holder_reg, Operand(at));
-    pop(holder_reg);  // Restore holder.
-  }
-
-  // Check if both contexts are the same.
-  ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-  Branch(&same_contexts, eq, scratch, Operand(at));
-
-  // Check the context is a native context.
-  if (emit_debug_code()) {
-    push(holder_reg);  // Temporarily save holder on the stack.
-    mov(holder_reg, at);  // Move at to its holding place.
-    LoadRoot(at, Heap::kNullValueRootIndex);
-    Check(ne, kJSGlobalProxyContextShouldNotBeNull,
-          holder_reg, Operand(at));
-
-    ld(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
-    LoadRoot(at, Heap::kNativeContextMapRootIndex);
-    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
-          holder_reg, Operand(at));
-    // Restore at is not needed. at is reloaded below.
-    pop(holder_reg);  // Restore holder.
-    // Restore at to holder's context.
-    ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-  }
-
-  // Check that the security token in the calling global object is
-  // compatible with the security token in the receiving global
-  // object.
-  int token_offset = Context::kHeaderSize +
-                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
-  ld(scratch, FieldMemOperand(scratch, token_offset));
-  ld(at, FieldMemOperand(at, token_offset));
-  Branch(miss, ne, scratch, Operand(at));
-
-  bind(&same_contexts);
-}
-
-
 // Compute the hash code from the untagged key.  This must be kept in sync with
 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
 // code-stub-hydrogen.cc
@@ -637,87 +558,6 @@
   And(reg0, reg0, Operand(0x3fffffff));
 }
 
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
-                                              Register elements,
-                                              Register key,
-                                              Register result,
-                                              Register reg0,
-                                              Register reg1,
-                                              Register reg2) {
-  // Register use:
-  //
-  // elements - holds the slow-case elements of the receiver on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // key      - holds the smi key on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  //
-  // result   - holds the result on exit if the load succeeded.
-  //            Allowed to be the same as 'key' or 'result'.
-  //            Unchanged on bailout so 'key' or 'result' can be used
-  //            in further computation.
-  //
-  // Scratch registers:
-  //
-  // reg0 - holds the untagged key on entry and holds the hash once computed.
-  //
-  // reg1 - Used to hold the capacity mask of the dictionary.
-  //
-  // reg2 - Used for the index into the dictionary.
-  // at   - Temporary (avoid MacroAssembler instructions also using 'at').
-  Label done;
-
-  GetNumberHash(reg0, reg1);
-
-  // Compute the capacity mask.
-  ld(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
-  SmiUntag(reg1, reg1);
-  Dsubu(reg1, reg1, Operand(1));
-
-  // Generate an unrolled loop that performs a few probes before giving up.
-  for (int i = 0; i < kNumberDictionaryProbes; i++) {
-    // Use reg2 for index calculations and keep the hash intact in reg0.
-    mov(reg2, reg0);
-    // Compute the masked index: (hash + i + i * i) & mask.
-    if (i > 0) {
-      Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
-    }
-    and_(reg2, reg2, reg1);
-
-    // Scale the index by multiplying by the element size.
-    DCHECK(SeededNumberDictionary::kEntrySize == 3);
-    Dlsa(reg2, reg2, reg2, 1);  // reg2 = reg2 * 3.
-
-    // Check if the key is identical to the name.
-    Dlsa(reg2, elements, reg2, kPointerSizeLog2);
-
-    ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
-    if (i != kNumberDictionaryProbes - 1) {
-      Branch(&done, eq, key, Operand(at));
-    } else {
-      Branch(miss, ne, key, Operand(at));
-    }
-  }
-
-  bind(&done);
-  // Check that the value is a field property.
-  // reg2: elements + (index * kPointerSize).
-  const int kDetailsOffset =
-      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  ld(reg1, FieldMemOperand(reg2, kDetailsOffset));
-  DCHECK_EQ(DATA, 0);
-  And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
-  Branch(miss, ne, at, Operand(zero_reg));
-
-  // Get the value at the masked, scaled index and return.
-  const int kValueOffset =
-      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
-  ld(result, FieldMemOperand(reg2, kValueOffset));
-}
-
-
 // ---------------------------------------------------------------------------
 // Instruction macros.
 
@@ -1918,11 +1758,27 @@
   ext_(rt, rs, pos, size);
 }
 
+void MacroAssembler::ExtractBits(Register rt, Register rs, uint16_t pos,
+                                 uint16_t size) {
+  DCHECK(pos < 64);
+  DCHECK(size > 0 && size <= 64);
+  DCHECK(pos + size <= 64);
+  if (pos < 32) {
+    if (size <= 32) {
+      Dext(rt, rs, pos, size);
+    } else {
+      Dextm(rt, rs, pos, size);
+    }
+  } else if (pos < 64) {
+    DCHECK(size <= 32);
+    Dextu(rt, rs, pos, size);
+  }
+}
 
 void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
                           uint16_t size) {
   DCHECK(pos < 32);
-  DCHECK(pos + size < 33);
+  DCHECK(size > 0 && size <= 32);
   dext_(rt, rs, pos, size);
 }
 
@@ -1930,7 +1786,8 @@
 void MacroAssembler::Dextm(Register rt, Register rs, uint16_t pos,
                            uint16_t size) {
   DCHECK(pos < 32);
-  DCHECK(size <= 64);
+  DCHECK(size > 32 && size <= 64);
+  DCHECK((pos + size) > 32 && (pos + size) <= 64);
   dextm(rt, rs, pos, size);
 }
 
@@ -1938,7 +1795,8 @@
 void MacroAssembler::Dextu(Register rt, Register rs, uint16_t pos,
                            uint16_t size) {
   DCHECK(pos >= 32 && pos < 64);
-  DCHECK(size < 33);
+  DCHECK(size > 0 && size <= 32);
+  DCHECK((pos + size) > 32 && (pos + size) <= 64);
   dextu(rt, rs, pos, size);
 }
 
@@ -4787,90 +4645,6 @@
   STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
 }
 
-
-void MacroAssembler::CopyBytes(Register src,
-                               Register dst,
-                               Register length,
-                               Register scratch) {
-  Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
-
-  // Align src before copying in word size chunks.
-  Branch(&byte_loop, le, length, Operand(kPointerSize));
-  bind(&align_loop_1);
-  And(scratch, src, kPointerSize - 1);
-  Branch(&word_loop, eq, scratch, Operand(zero_reg));
-  lbu(scratch, MemOperand(src));
-  Daddu(src, src, 1);
-  sb(scratch, MemOperand(dst));
-  Daddu(dst, dst, 1);
-  Dsubu(length, length, Operand(1));
-  Branch(&align_loop_1, ne, length, Operand(zero_reg));
-
-  // Copy bytes in word size chunks.
-  bind(&word_loop);
-  if (emit_debug_code()) {
-    And(scratch, src, kPointerSize - 1);
-    Assert(eq, kExpectingAlignmentForCopyBytes,
-        scratch, Operand(zero_reg));
-  }
-  Branch(&byte_loop, lt, length, Operand(kPointerSize));
-  ld(scratch, MemOperand(src));
-  Daddu(src, src, kPointerSize);
-
-  // TODO(kalmard) check if this can be optimized to use sw in most cases.
-  // Can't use unaligned access - copy byte by byte.
-  if (kArchEndian == kLittle) {
-    sb(scratch, MemOperand(dst, 0));
-    dsrl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 1));
-    dsrl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 2));
-    dsrl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 3));
-    dsrl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 4));
-    dsrl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 5));
-    dsrl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 6));
-    dsrl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 7));
-  } else {
-    sb(scratch, MemOperand(dst, 7));
-    dsrl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 6));
-    dsrl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 5));
-    dsrl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 4));
-    dsrl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 3));
-    dsrl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 2));
-    dsrl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 1));
-    dsrl(scratch, scratch, 8);
-    sb(scratch, MemOperand(dst, 0));
-  }
-  Daddu(dst, dst, 8);
-
-  Dsubu(length, length, Operand(kPointerSize));
-  Branch(&word_loop);
-
-  // Copy the last bytes if any left.
-  bind(&byte_loop);
-  Branch(&done, eq, length, Operand(zero_reg));
-  bind(&byte_loop_1);
-  lbu(scratch, MemOperand(src));
-  Daddu(src, src, 1);
-  sb(scratch, MemOperand(dst));
-  Daddu(dst, dst, 1);
-  Dsubu(length, length, Operand(1));
-  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
-  bind(&done);
-}
-
-
 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
                                                 Register end_address,
                                                 Register filler) {
@@ -4883,20 +4657,6 @@
   Branch(&loop, ult, current_address, Operand(end_address));
 }
 
-
-void MacroAssembler::CheckFastElements(Register map,
-                                       Register scratch,
-                                       Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  STATIC_ASSERT(FAST_ELEMENTS == 2);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
-  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
-  Branch(fail, hi, scratch,
-         Operand(Map::kMaximumBitField2FastHoleyElementValue));
-}
-
-
 void MacroAssembler::CheckFastObjectElements(Register map,
                                              Register scratch,
                                              Label* fail) {
@@ -5557,18 +5317,6 @@
   return has_frame_ || !stub->SometimesSetsUpAFrame();
 }
 
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
-  // If the hash field contains an array index pick it out. The assert checks
-  // that the constants for the maximum number of digits for an array index
-  // cached in the hash field and the number of bits reserved for it does not
-  // conflict.
-  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
-         (1 << String::kArrayIndexValueBits));
-  DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
-}
-
-
 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
                                                FPURegister result,
                                                Register scratch1,
@@ -6897,7 +6645,7 @@
   ld(at, FieldMemOperand(string, String::kLengthOffset));
   Check(lt, kIndexIsTooLarge, index, Operand(at));
 
-  DCHECK(Smi::FromInt(0) == 0);
+  DCHECK(Smi::kZero == 0);
   Check(ge, kIndexIsNegative, index, Operand(zero_reg));
 }
 
@@ -7162,7 +6910,7 @@
 
   // For all objects but the receiver, check that the cache is empty.
   EnumLength(a3, a1);
-  Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
+  Branch(call_runtime, ne, a3, Operand(Smi::kZero));
 
   bind(&start);
 
@@ -7232,13 +6980,14 @@
   ExternalReference new_space_allocation_top_adr =
       ExternalReference::new_space_allocation_top_address(isolate());
   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
-  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+  const int kMementoLastWordOffset =
+      kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
 
   // Bail out if the object is not in new space.
   JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
   // If the object is in new space, we need to check whether it is on the same
   // page as the current top.
-  Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
   li(at, Operand(new_space_allocation_top_adr));
   ld(at, MemOperand(at));
   Xor(scratch_reg, scratch_reg, Operand(at));
@@ -7247,7 +6996,7 @@
   // The object is on a different page than allocation top. Bail out if the
   // object sits on the page boundary as no memento can follow and we cannot
   // touch the memory following it.
-  Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
   Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
   And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
   Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
@@ -7256,10 +7005,10 @@
   // If top is on the same page as the current object, we need to check whether
   // we are below top.
   bind(&top_check);
-  Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
   li(at, Operand(new_space_allocation_top_adr));
   ld(at, MemOperand(at));
-  Branch(no_memento_found, gt, scratch_reg, Operand(at));
+  Branch(no_memento_found, ge, scratch_reg, Operand(at));
   // Memento map check.
   bind(&map_check);
   ld(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
diff --git a/src/mips64/macro-assembler-mips64.h b/src/mips64/macro-assembler-mips64.h
index 4f67d70..5a1cf27 100644
--- a/src/mips64/macro-assembler-mips64.h
+++ b/src/mips64/macro-assembler-mips64.h
@@ -527,24 +527,8 @@
   // ---------------------------------------------------------------------------
   // Inline caching support.
 
-  // Generate code for checking access rights - used for security checks
-  // on access to global objects across environments. The holder register
-  // is left untouched, whereas both scratch registers are clobbered.
-  void CheckAccessGlobalProxy(Register holder_reg,
-                              Register scratch,
-                              Label* miss);
-
   void GetNumberHash(Register reg0, Register scratch);
 
-  void LoadFromNumberDictionary(Label* miss,
-                                Register elements,
-                                Register key,
-                                Register result,
-                                Register reg0,
-                                Register reg1,
-                                Register reg2);
-
-
   inline void MarkCode(NopMarkerTypes type) {
     nop(type);
   }
@@ -889,6 +873,9 @@
   void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
   void Dins(Register rt, Register rs, uint16_t pos, uint16_t size);
   void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+
+  void ExtractBits(Register rt, Register rs, uint16_t pos, uint16_t size);
+
   void Dext(Register rt, Register rs, uint16_t pos, uint16_t size);
   void Dextm(Register rt, Register rs, uint16_t pos, uint16_t size);
   void Dextu(Register rt, Register rs, uint16_t pos, uint16_t size);
@@ -1199,14 +1186,6 @@
   // Must preserve the result register.
   void PopStackHandler();
 
-  // Copies a number of bytes from src to dst. All registers are clobbered. On
-  // exit src and dst will point to the place just after where the last byte was
-  // read or written and length will be zero.
-  void CopyBytes(Register src,
-                 Register dst,
-                 Register length,
-                 Register scratch);
-
   // Initialize fields with filler values.  Fields starting at |current_address|
   // not including |end_address| are overwritten with the value in |filler|.  At
   // the end the loop, |current_address| takes the value of |end_address|.
@@ -1238,12 +1217,6 @@
         FieldMemOperand(object_map, Map::kInstanceTypeOffset));
   }
 
-  // Check if a map for a JSObject indicates that the object has fast elements.
-  // Jump to the specified label if it does not.
-  void CheckFastElements(Register map,
-                         Register scratch,
-                         Label* fail);
-
   // Check if a map for a JSObject indicates that the object can have both smi
   // and HeapObject elements.  Jump to the specified label if it does not.
   void CheckFastObjectElements(Register map,
@@ -1334,13 +1307,6 @@
     return eq;
   }
 
-
-  // Picks out an array index from the hash field.
-  // Register use:
-  //   hash - holds the index's hash. Clobbered.
-  //   index - holds the overwritten index on exit.
-  void IndexFromHash(Register hash, Register index);
-
   // Get the number of least significant bits from a register.
   void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
   void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
diff --git a/src/mips64/simulator-mips64.cc b/src/mips64/simulator-mips64.cc
index 02387d0..4a8e007 100644
--- a/src/mips64/simulator-mips64.cc
+++ b/src/mips64/simulator-mips64.cc
@@ -4879,7 +4879,7 @@
     while (program_counter != end_sim_pc) {
       Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
       icount_++;
-      if (icount_ == static_cast<uint64_t>(::v8::internal::FLAG_stop_sim_at)) {
+      if (icount_ == static_cast<int64_t>(::v8::internal::FLAG_stop_sim_at)) {
         MipsDebugger dbg(this);
         dbg.Debug();
       } else {
diff --git a/src/objects-body-descriptors-inl.h b/src/objects-body-descriptors-inl.h
index 0252b64..f7a1a71 100644
--- a/src/objects-body-descriptors-inl.h
+++ b/src/objects-body-descriptors-inl.h
@@ -468,6 +468,8 @@
     case JS_VALUE_TYPE:
     case JS_DATE_TYPE:
     case JS_ARRAY_TYPE:
+    case JS_MODULE_NAMESPACE_TYPE:
+    case JS_FIXED_ARRAY_ITERATOR_TYPE:
     case JS_TYPED_ARRAY_TYPE:
     case JS_DATA_VIEW_TYPE:
     case JS_SET_TYPE:
@@ -475,6 +477,43 @@
     case JS_SET_ITERATOR_TYPE:
     case JS_MAP_ITERATOR_TYPE:
     case JS_STRING_ITERATOR_TYPE:
+
+    case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
+
     case JS_REGEXP_TYPE:
     case JS_GLOBAL_PROXY_TYPE:
     case JS_GLOBAL_OBJECT_TYPE:
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 3c43f23..2580bfb 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -140,6 +140,12 @@
     case JS_ARRAY_TYPE:
       JSArray::cast(this)->JSArrayVerify();
       break;
+    case JS_MODULE_NAMESPACE_TYPE:
+      JSModuleNamespace::cast(this)->JSModuleNamespaceVerify();
+      break;
+    case JS_FIXED_ARRAY_ITERATOR_TYPE:
+      JSFixedArrayIterator::cast(this)->JSFixedArrayIteratorVerify();
+      break;
     case JS_SET_TYPE:
       JSSet::cast(this)->JSSetVerify();
       break;
@@ -152,6 +158,44 @@
     case JS_MAP_ITERATOR_TYPE:
       JSMapIterator::cast(this)->JSMapIteratorVerify();
       break;
+    case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
+      JSArrayIterator::cast(this)->JSArrayIteratorVerify();
+      break;
+
     case JS_STRING_ITERATOR_TYPE:
       JSStringIterator::cast(this)->JSStringIteratorVerify();
       break;
@@ -339,8 +383,8 @@
   CHECK(!heap->InNewSpace(this));
   CHECK(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
   CHECK(instance_size() == kVariableSizeSentinel ||
-         (kPointerSize <= instance_size() &&
-          instance_size() < heap->Capacity()));
+        (kPointerSize <= instance_size() &&
+         static_cast<size_t>(instance_size()) < heap->Capacity()));
   CHECK(GetBackPointer()->IsUndefined(heap->isolate()) ||
         !Map::cast(GetBackPointer())->is_stable());
   VerifyHeapPointer(prototype());
@@ -677,7 +721,7 @@
       last_gc_pc = it.rinfo()->pc();
     }
   }
-  CHECK(raw_type_feedback_info() == Smi::FromInt(0) ||
+  CHECK(raw_type_feedback_info() == Smi::kZero ||
         raw_type_feedback_info()->IsSmi() == IsCodeStubOrIC());
 }
 
@@ -728,9 +772,33 @@
   CHECK(length()->IsNumber() || length()->IsUndefined(isolate));
   // If a GC was caused while constructing this array, the elements
   // pointer may point to a one pointer filler map.
-  if (ElementsAreSafeToExamine()) {
-    CHECK(elements()->IsUndefined(isolate) || elements()->IsFixedArray() ||
-          elements()->IsFixedDoubleArray());
+  if (!ElementsAreSafeToExamine()) return;
+  if (elements()->IsUndefined(isolate)) return;
+  CHECK(elements()->IsFixedArray() || elements()->IsFixedDoubleArray());
+  if (!length()->IsNumber()) return;
+  // Verify that the length and the elements backing store are in sync.
+  if (length()->IsSmi() && HasFastElements()) {
+    int size = Smi::cast(length())->value();
+    // Holey / Packed backing stores might have slack or might have not been
+    // properly initialized yet.
+    CHECK(size <= elements()->length() ||
+          elements() == isolate->heap()->empty_fixed_array());
+  } else {
+    CHECK(HasDictionaryElements());
+    uint32_t array_length;
+    CHECK(length()->ToArrayLength(&array_length));
+    if (array_length == 0xffffffff) {
+      CHECK(length()->ToArrayLength(&array_length));
+    }
+    if (array_length != 0) {
+      SeededNumberDictionary* dict = SeededNumberDictionary::cast(elements());
+      // The dictionary can never have more elements than the array length + 1.
+      // If the backing store grows the verification might be triggered with
+      // the old length in place.
+      uint32_t nof_elements = static_cast<uint32_t>(dict->NumberOfElements());
+      if (nof_elements != 0) nof_elements--;
+      CHECK_LE(nof_elements, array_length);
+    }
   }
 }
 
@@ -782,6 +850,16 @@
   CHECK(table()->IsHashTable() || table()->IsUndefined(GetIsolate()));
 }
 
+void JSArrayIterator::JSArrayIteratorVerify() {
+  CHECK(IsJSArrayIterator());
+  JSObjectVerify();
+  CHECK(object()->IsJSReceiver() || object()->IsUndefined(GetIsolate()));
+
+  CHECK_GE(index()->Number(), 0);
+  CHECK_LE(index()->Number(), kMaxSafeInteger);
+  CHECK(object_map()->IsMap() || object_map()->IsUndefined(GetIsolate()));
+}
+
 void JSStringIterator::JSStringIteratorVerify() {
   CHECK(IsJSStringIterator());
   JSObjectVerify();
@@ -872,7 +950,7 @@
   VerifyPointer(buffer());
   Isolate* isolate = GetIsolate();
   CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined(isolate) ||
-        buffer() == Smi::FromInt(0));
+        buffer() == Smi::kZero);
 
   VerifyPointer(raw_byte_offset());
   CHECK(raw_byte_offset()->IsSmi() || raw_byte_offset()->IsHeapNumber() ||
@@ -909,30 +987,89 @@
   value()->ObjectVerify();
 }
 
-void PromiseContainer::PromiseContainerVerify() {
-  CHECK(IsPromiseContainer());
-  thenable()->ObjectVerify();
-  then()->ObjectVerify();
-  resolve()->ObjectVerify();
-  reject()->ObjectVerify();
-  before_debug_event()->ObjectVerify();
-  after_debug_event()->ObjectVerify();
+void PromiseResolveThenableJobInfo::PromiseResolveThenableJobInfoVerify() {
+  Isolate* isolate = GetIsolate();
+  CHECK(IsPromiseResolveThenableJobInfo());
+  CHECK(thenable()->IsJSReceiver());
+  CHECK(then()->IsJSReceiver());
+  CHECK(resolve()->IsJSFunction());
+  CHECK(reject()->IsJSFunction());
+  CHECK(debug_id()->IsNumber() || debug_id()->IsUndefined(isolate));
+  CHECK(debug_name()->IsString() || debug_name()->IsUndefined(isolate));
+  CHECK(context()->IsContext());
+}
+
+void PromiseReactionJobInfo::PromiseReactionJobInfoVerify() {
+  Isolate* isolate = GetIsolate();
+  CHECK(IsPromiseReactionJobInfo());
+  CHECK(value()->IsObject());
+  CHECK(tasks()->IsJSArray() || tasks()->IsCallable());
+  CHECK(deferred()->IsJSObject() || deferred()->IsUndefined(isolate));
+  CHECK(debug_id()->IsNumber() || debug_id()->IsUndefined(isolate));
+  CHECK(debug_name()->IsString() || debug_name()->IsUndefined(isolate));
+  CHECK(context()->IsContext());
+}
+
+void JSModuleNamespace::JSModuleNamespaceVerify() {
+  CHECK(IsJSModuleNamespace());
+  VerifyPointer(module());
+}
+
+void JSFixedArrayIterator::JSFixedArrayIteratorVerify() {
+  CHECK(IsJSFixedArrayIterator());
+
+  VerifyPointer(array());
+  VerifyPointer(initial_next());
+  VerifySmiField(kIndexOffset);
+
+  CHECK_LE(index(), array()->length());
+}
+
+void ModuleInfoEntry::ModuleInfoEntryVerify() {
+  Isolate* isolate = GetIsolate();
+  CHECK(IsModuleInfoEntry());
+
+  CHECK(export_name()->IsUndefined(isolate) || export_name()->IsString());
+  CHECK(local_name()->IsUndefined(isolate) || local_name()->IsString());
+  CHECK(import_name()->IsUndefined(isolate) || import_name()->IsString());
+
+  VerifySmiField(kModuleRequestOffset);
+  VerifySmiField(kCellIndexOffset);
+  VerifySmiField(kBegPosOffset);
+  VerifySmiField(kEndPosOffset);
+
+  CHECK_IMPLIES(import_name()->IsString(), module_request() >= 0);
+  CHECK_IMPLIES(export_name()->IsString() && import_name()->IsString(),
+                local_name()->IsUndefined(isolate));
 }
 
 void Module::ModuleVerify() {
   CHECK(IsModule());
-  CHECK(code()->IsSharedFunctionInfo() || code()->IsJSFunction());
-  code()->ObjectVerify();
-  exports()->ObjectVerify();
-  requested_modules()->ObjectVerify();
-  VerifySmiField(kFlagsOffset);
-  embedder_data()->ObjectVerify();
-  CHECK(shared()->name()->IsSymbol());
-  // TODO(neis): Check more.
+
+  VerifyPointer(code());
+  VerifyPointer(exports());
+  VerifyPointer(module_namespace());
+  VerifyPointer(requested_modules());
+  VerifySmiField(kHashOffset);
+
+  CHECK((!instantiated() && code()->IsSharedFunctionInfo()) ||
+        (instantiated() && !evaluated() && code()->IsJSFunction()) ||
+        (instantiated() && evaluated() && code()->IsModuleInfo()));
+
+  CHECK(module_namespace()->IsUndefined(GetIsolate()) ||
+        module_namespace()->IsJSModuleNamespace());
+  if (module_namespace()->IsJSModuleNamespace()) {
+    CHECK_EQ(JSModuleNamespace::cast(module_namespace())->module(), this);
+  }
+
+  CHECK_EQ(requested_modules()->length(), info()->module_requests()->length());
+
+  CHECK_NE(hash(), 0);
 }
 
 void PrototypeInfo::PrototypeInfoVerify() {
   CHECK(IsPrototypeInfo());
+  CHECK(weak_cell()->IsWeakCell() || weak_cell()->IsUndefined(GetIsolate()));
   if (prototype_users()->IsWeakFixedArray()) {
     WeakFixedArray::cast(prototype_users())->FixedArrayVerify();
   } else {
@@ -941,6 +1078,13 @@
   CHECK(validity_cell()->IsCell() || validity_cell()->IsSmi());
 }
 
+void Tuple3::Tuple3Verify() {
+  CHECK(IsTuple3());
+  VerifyObjectField(kValue1Offset);
+  VerifyObjectField(kValue2Offset);
+  VerifyObjectField(kValue3Offset);
+}
+
 void ContextExtension::ContextExtensionVerify() {
   CHECK(IsContextExtension());
   VerifyObjectField(kScopeInfoOffset);
@@ -1013,6 +1157,7 @@
   VerifyPointer(instance_template());
   VerifyPointer(signature());
   VerifyPointer(access_check_info());
+  VerifyPointer(cached_property_name());
 }
 
 
diff --git a/src/objects-inl.h b/src/objects-inl.h
index af12615..1a8274c 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -305,7 +305,7 @@
                                      Handle<Object> object,
                                      Representation representation) {
   if (representation.IsSmi() && object->IsUninitialized(isolate)) {
-    return handle(Smi::FromInt(0), isolate);
+    return handle(Smi::kZero, isolate);
   }
   if (!representation.IsDouble()) return object;
   double value;
@@ -690,6 +690,12 @@
 
 bool HeapObject::IsJSProxy() const { return map()->IsJSProxyMap(); }
 
+bool HeapObject::IsJSArrayIterator() const {
+  InstanceType instance_type = map()->instance_type();
+  return (instance_type >= FIRST_ARRAY_ITERATOR_TYPE &&
+          instance_type <= LAST_ARRAY_ITERATOR_TYPE);
+}
+
 TYPE_CHECKER(JSSet, JS_SET_TYPE)
 TYPE_CHECKER(JSMap, JS_MAP_TYPE)
 TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE)
@@ -702,6 +708,7 @@
 TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
 TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
 TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE)
+TYPE_CHECKER(JSFixedArrayIterator, JS_FIXED_ARRAY_ITERATOR_TYPE)
 
 bool HeapObject::IsJSWeakCollection() const {
   return IsJSWeakMap() || IsJSWeakSet();
@@ -715,6 +722,8 @@
 
 bool HeapObject::IsArrayList() const { return IsFixedArray(); }
 
+bool HeapObject::IsRegExpMatchInfo() const { return IsFixedArray(); }
+
 bool Object::IsLayoutDescriptor() const {
   return IsSmi() || IsFixedTypedArrayBase();
 }
@@ -794,10 +803,6 @@
   return map() == GetHeap()->scope_info_map();
 }
 
-bool HeapObject::IsModuleInfoEntry() const {
-  return map() == GetHeap()->module_info_entry_map();
-}
-
 bool HeapObject::IsModuleInfo() const {
   return map() == GetHeap()->module_info_map();
 }
@@ -1589,9 +1594,9 @@
 
 
 void AllocationSite::Initialize() {
-  set_transition_info(Smi::FromInt(0));
+  set_transition_info(Smi::kZero);
   SetElementsKind(GetInitialFastElementsKind());
-  set_nested_site(Smi::FromInt(0));
+  set_nested_site(Smi::kZero);
   set_pretenure_data(0);
   set_pretenure_create_count(0);
   set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()),
@@ -1665,18 +1670,6 @@
   return DONT_TRACK_ALLOCATION_SITE;
 }
 
-
-AllocationSiteMode AllocationSite::GetMode(ElementsKind from,
-                                           ElementsKind to) {
-  if (IsFastSmiElementsKind(from) &&
-      IsMoreGeneralElementsKindTransition(from, to)) {
-    return TRACK_ALLOCATION_SITE;
-  }
-
-  return DONT_TRACK_ALLOCATION_SITE;
-}
-
-
 inline bool AllocationSite::CanTrack(InstanceType type) {
   if (FLAG_allocation_site_pretenuring) {
     return type == JS_ARRAY_TYPE ||
@@ -2019,7 +2012,7 @@
   // initializing the root empty weak cell.
   DCHECK(GetHeap()->gc_state() == Heap::MARK_COMPACT ||
          this == GetHeap()->empty_weak_cell());
-  WRITE_FIELD(this, kValueOffset, Smi::FromInt(0));
+  WRITE_FIELD(this, kValueOffset, Smi::kZero);
 }
 
 
@@ -2034,9 +2027,7 @@
   CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kValueOffset, val, mode);
 }
 
-
-bool WeakCell::cleared() const { return value() == Smi::FromInt(0); }
-
+bool WeakCell::cleared() const { return value() == Smi::kZero; }
 
 Object* WeakCell::next() const { return READ_FIELD(this, kNextOffset); }
 
@@ -2116,6 +2107,8 @@
       return JSObject::kHeaderSize;
     case JS_STRING_ITERATOR_TYPE:
       return JSStringIterator::kSize;
+    case JS_FIXED_ARRAY_ITERATOR_TYPE:
+      return JSFixedArrayIterator::kHeaderSize;
     default:
       UNREACHABLE();
       return 0;
@@ -2364,9 +2357,8 @@
   CHECK(!obj->IsUndefined(isolate));
   return Handle<T>(T::cast(obj), isolate);
 }
-
-bool FixedArray::is_the_hole(int index) {
-  return get(index) == GetHeap()->the_hole_value();
+bool FixedArray::is_the_hole(Isolate* isolate, int index) {
+  return get(index)->IsTheHole(isolate);
 }
 
 void FixedArray::set(int index, Smi* value) {
@@ -2436,6 +2428,9 @@
   WRITE_UINT64_FIELD(this, offset, kHoleNanInt64);
 }
 
+bool FixedDoubleArray::is_the_hole(Isolate* isolate, int index) {
+  return is_the_hole(index);
+}
 
 bool FixedDoubleArray::is_the_hole(int index) {
   return get_representation(index) == kHoleNanInt64;
@@ -2469,7 +2464,7 @@
 
 
 void WeakFixedArray::Clear(int index) {
-  FixedArray::cast(this)->set(index + kFirstIndex, Smi::FromInt(0));
+  FixedArray::cast(this)->set(index + kFirstIndex, Smi::kZero);
 }
 
 
@@ -2534,6 +2529,48 @@
       ->set(kFirstIndex + index, undefined, SKIP_WRITE_BARRIER);
 }
 
+int RegExpMatchInfo::NumberOfCaptureRegisters() {
+  DCHECK_GE(length(), kLastMatchOverhead);
+  Object* obj = get(kNumberOfCapturesIndex);
+  return Smi::cast(obj)->value();
+}
+
+void RegExpMatchInfo::SetNumberOfCaptureRegisters(int value) {
+  DCHECK_GE(length(), kLastMatchOverhead);
+  set(kNumberOfCapturesIndex, Smi::FromInt(value));
+}
+
+String* RegExpMatchInfo::LastSubject() {
+  DCHECK_GE(length(), kLastMatchOverhead);
+  Object* obj = get(kLastSubjectIndex);
+  return String::cast(obj);
+}
+
+void RegExpMatchInfo::SetLastSubject(String* value) {
+  DCHECK_GE(length(), kLastMatchOverhead);
+  set(kLastSubjectIndex, value);
+}
+
+Object* RegExpMatchInfo::LastInput() {
+  DCHECK_GE(length(), kLastMatchOverhead);
+  return get(kLastInputIndex);
+}
+
+void RegExpMatchInfo::SetLastInput(Object* value) {
+  DCHECK_GE(length(), kLastMatchOverhead);
+  set(kLastInputIndex, value);
+}
+
+int RegExpMatchInfo::Capture(int i) {
+  DCHECK_LT(i, NumberOfCaptureRegisters());
+  Object* obj = get(kFirstCaptureIndex + i);
+  return Smi::cast(obj)->value();
+}
+
+void RegExpMatchInfo::SetCapture(int i, int value) {
+  DCHECK_LT(i, NumberOfCaptureRegisters());
+  set(kFirstCaptureIndex + i, Smi::FromInt(value));
+}
 
 WriteBarrierMode HeapObject::GetWriteBarrierMode(
     const DisallowHeapAllocation& promise) {
@@ -2563,7 +2600,7 @@
   DCHECK(map() != GetHeap()->fixed_cow_array_map());
   DCHECK(index >= 0 && index < this->length());
   int offset = kHeaderSize + index * kPointerSize;
-  WRITE_FIELD(this, offset, value);
+  NOBARRIER_WRITE_FIELD(this, offset, value);
   CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
 }
 
@@ -2574,7 +2611,7 @@
   DCHECK(array->map() != array->GetHeap()->fixed_cow_array_map());
   DCHECK(index >= 0 && index < array->length());
   DCHECK(!array->GetHeap()->InNewSpace(value));
-  WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
+  NOBARRIER_WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
 }
 
 
@@ -2641,6 +2678,11 @@
   return (flags & kIsWasmFrame) != 0;
 }
 
+bool FrameArray::IsAsmJsWasmFrame(int frame_ix) const {
+  const int flags = Flags(frame_ix)->value();
+  return (flags & kIsAsmJsWasmFrame) != 0;
+}
+
 int FrameArray::FrameCount() const {
   const int frame_count = Smi::cast(get(kFrameCountIndex))->value();
   DCHECK_LE(0, frame_count);
@@ -3117,7 +3159,6 @@
 
 // static
 int HashTableBase::ComputeCapacity(int at_least_space_for) {
-  const int kMinCapacity = 4;
   int capacity = base::bits::RoundUpToPowerOfTwo32(at_least_space_for * 2);
   return Max(capacity, kMinCapacity);
 }
@@ -3279,6 +3320,8 @@
 CAST_ACCESSOR(JSMap)
 CAST_ACCESSOR(JSMapIterator)
 CAST_ACCESSOR(JSMessageObject)
+CAST_ACCESSOR(JSModuleNamespace)
+CAST_ACCESSOR(JSFixedArrayIterator)
 CAST_ACCESSOR(JSObject)
 CAST_ACCESSOR(JSProxy)
 CAST_ACCESSOR(JSReceiver)
@@ -3286,6 +3329,7 @@
 CAST_ACCESSOR(JSSet)
 CAST_ACCESSOR(JSSetIterator)
 CAST_ACCESSOR(JSStringIterator)
+CAST_ACCESSOR(JSArrayIterator)
 CAST_ACCESSOR(JSTypedArray)
 CAST_ACCESSOR(JSValue)
 CAST_ACCESSOR(JSWeakCollection)
@@ -3293,7 +3337,6 @@
 CAST_ACCESSOR(JSWeakSet)
 CAST_ACCESSOR(LayoutDescriptor)
 CAST_ACCESSOR(Map)
-CAST_ACCESSOR(ModuleInfoEntry)
 CAST_ACCESSOR(ModuleInfo)
 CAST_ACCESSOR(Name)
 CAST_ACCESSOR(NameDictionary)
@@ -3306,6 +3349,7 @@
 CAST_ACCESSOR(OrderedHashSet)
 CAST_ACCESSOR(PropertyCell)
 CAST_ACCESSOR(TemplateList)
+CAST_ACCESSOR(RegExpMatchInfo)
 CAST_ACCESSOR(ScopeInfo)
 CAST_ACCESSOR(SeededNumberDictionary)
 CAST_ACCESSOR(SeqOneByteString)
@@ -3329,6 +3373,24 @@
 CAST_ACCESSOR(WeakFixedArray)
 CAST_ACCESSOR(WeakHashTable)
 
+template <class T>
+PodArray<T>* PodArray<T>::cast(Object* object) {
+  SLOW_DCHECK(object->IsByteArray());
+  return reinterpret_cast<PodArray<T>*>(object);
+}
+template <class T>
+const PodArray<T>* PodArray<T>::cast(const Object* object) {
+  SLOW_DCHECK(object->IsByteArray());
+  return reinterpret_cast<const PodArray<T>*>(object);
+}
+
+// static
+template <class T>
+Handle<PodArray<T>> PodArray<T>::New(Isolate* isolate, int length,
+                                     PretenureFlag pretenure) {
+  return Handle<PodArray<T>>::cast(
+      isolate->factory()->NewByteArray(length * sizeof(T), pretenure));
+}
 
 // static
 template <class Traits>
@@ -3371,6 +3433,7 @@
 DEFINE_DEOPT_ELEMENT_ACCESSORS(OptimizationId, Smi)
 DEFINE_DEOPT_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
 DEFINE_DEOPT_ELEMENT_ACCESSORS(WeakCellCache, Object)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
 
 #undef DEFINE_DEOPT_ELEMENT_ACCESSORS
 
@@ -4059,24 +4122,20 @@
   return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
 }
 
-const byte* ByteArray::data() const {
-  return reinterpret_cast<const byte*>(FIELD_ADDR_CONST(this, kHeaderSize));
-}
-
 void ByteArray::set(int index, byte value) {
   DCHECK(index >= 0 && index < this->length());
   WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
 }
 
 void ByteArray::copy_in(int index, const byte* buffer, int length) {
-  DCHECK(index >= 0 && length >= 0 && index + length >= index &&
+  DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
          index + length <= this->length());
   byte* dst_addr = FIELD_ADDR(this, kHeaderSize + index * kCharSize);
   memcpy(dst_addr, buffer, length);
 }
 
 void ByteArray::copy_out(int index, byte* buffer, int length) {
-  DCHECK(index >= 0 && length >= 0 && index + length >= index &&
+  DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
          index + length <= this->length());
   const byte* src_addr = FIELD_ADDR(this, kHeaderSize + index * kCharSize);
   memcpy(buffer, src_addr, length);
@@ -4231,7 +4290,7 @@
 
 
 int FixedTypedArrayBase::DataSize(InstanceType type) {
-  if (base_pointer() == Smi::FromInt(0)) return 0;
+  if (base_pointer() == Smi::kZero) return 0;
   return length() * ElementSize(type);
 }
 
@@ -5005,8 +5064,8 @@
 inline bool Code::is_interpreter_trampoline_builtin() {
   Builtins* builtins = GetIsolate()->builtins();
   return this == *builtins->InterpreterEntryTrampoline() ||
-         this == *builtins->InterpreterEnterBytecodeDispatch() ||
-         this == *builtins->InterpreterMarkBaselineOnReturn();
+         this == *builtins->InterpreterEnterBytecodeAdvance() ||
+         this == *builtins->InterpreterEnterBytecodeDispatch();
 }
 
 inline bool Code::has_unwinding_info() const {
@@ -5654,12 +5713,20 @@
 
 ACCESSORS(Box, value, Object, kValueOffset)
 
-ACCESSORS(PromiseContainer, thenable, JSReceiver, kThenableOffset)
-ACCESSORS(PromiseContainer, then, JSReceiver, kThenOffset)
-ACCESSORS(PromiseContainer, resolve, JSFunction, kResolveOffset)
-ACCESSORS(PromiseContainer, reject, JSFunction, kRejectOffset)
-ACCESSORS(PromiseContainer, before_debug_event, Object, kBeforeDebugEventOffset)
-ACCESSORS(PromiseContainer, after_debug_event, Object, kAfterDebugEventOffset)
+ACCESSORS(PromiseResolveThenableJobInfo, thenable, JSReceiver, kThenableOffset)
+ACCESSORS(PromiseResolveThenableJobInfo, then, JSReceiver, kThenOffset)
+ACCESSORS(PromiseResolveThenableJobInfo, resolve, JSFunction, kResolveOffset)
+ACCESSORS(PromiseResolveThenableJobInfo, reject, JSFunction, kRejectOffset)
+ACCESSORS(PromiseResolveThenableJobInfo, debug_id, Object, kDebugIdOffset)
+ACCESSORS(PromiseResolveThenableJobInfo, debug_name, Object, kDebugNameOffset)
+ACCESSORS(PromiseResolveThenableJobInfo, context, Context, kContextOffset);
+
+ACCESSORS(PromiseReactionJobInfo, value, Object, kValueOffset);
+ACCESSORS(PromiseReactionJobInfo, tasks, Object, kTasksOffset);
+ACCESSORS(PromiseReactionJobInfo, deferred, Object, kDeferredOffset);
+ACCESSORS(PromiseReactionJobInfo, debug_id, Object, kDebugIdOffset);
+ACCESSORS(PromiseReactionJobInfo, debug_name, Object, kDebugNameOffset);
+ACCESSORS(PromiseReactionJobInfo, context, Context, kContextOffset);
 
 Map* PrototypeInfo::ObjectCreateMap() {
   return Map::cast(WeakCell::cast(object_create_map())->value());
@@ -5702,6 +5769,7 @@
   return nullptr;
 }
 
+ACCESSORS(PrototypeInfo, weak_cell, Object, kWeakCellOffset)
 ACCESSORS(PrototypeInfo, prototype_users, Object, kPrototypeUsersOffset)
 ACCESSORS(PrototypeInfo, object_create_map, Object, kObjectCreateMap)
 SMI_ACCESSORS(PrototypeInfo, registry_slot, kRegistrySlotOffset)
@@ -5709,27 +5777,46 @@
 SMI_ACCESSORS(PrototypeInfo, bit_field, kBitFieldOffset)
 BOOL_ACCESSORS(PrototypeInfo, bit_field, should_be_fast_map, kShouldBeFastBit)
 
+ACCESSORS(Tuple3, value1, Object, kValue1Offset)
+ACCESSORS(Tuple3, value2, Object, kValue2Offset)
+ACCESSORS(Tuple3, value3, Object, kValue3Offset)
+
 ACCESSORS(ContextExtension, scope_info, ScopeInfo, kScopeInfoOffset)
 ACCESSORS(ContextExtension, extension, Object, kExtensionOffset)
 
+ACCESSORS(JSModuleNamespace, module, Module, kModuleOffset)
+
+ACCESSORS(JSFixedArrayIterator, array, FixedArray, kArrayOffset)
+SMI_ACCESSORS(JSFixedArrayIterator, index, kIndexOffset)
+ACCESSORS(JSFixedArrayIterator, initial_next, JSFunction, kNextOffset)
+
 ACCESSORS(Module, code, Object, kCodeOffset)
 ACCESSORS(Module, exports, ObjectHashTable, kExportsOffset)
+ACCESSORS(Module, regular_exports, FixedArray, kRegularExportsOffset)
+ACCESSORS(Module, regular_imports, FixedArray, kRegularImportsOffset)
+ACCESSORS(Module, module_namespace, HeapObject, kModuleNamespaceOffset)
 ACCESSORS(Module, requested_modules, FixedArray, kRequestedModulesOffset)
-SMI_ACCESSORS(Module, flags, kFlagsOffset)
-BOOL_ACCESSORS(Module, flags, evaluated, kEvaluatedBit)
-ACCESSORS(Module, embedder_data, Object, kEmbedderDataOffset)
+SMI_ACCESSORS(Module, hash, kHashOffset)
 
-SharedFunctionInfo* Module::shared() const {
-  return code()->IsSharedFunctionInfo() ? SharedFunctionInfo::cast(code())
-                                        : JSFunction::cast(code())->shared();
+bool Module::evaluated() const { return code()->IsModuleInfo(); }
+
+void Module::set_evaluated() {
+  DCHECK(instantiated());
+  DCHECK(!evaluated());
+  return set_code(
+      JSFunction::cast(code())->shared()->scope_info()->ModuleDescriptorInfo());
 }
 
+bool Module::instantiated() const { return !code()->IsSharedFunctionInfo(); }
+
 ModuleInfo* Module::info() const {
-  return shared()->scope_info()->ModuleDescriptorInfo();
+  if (evaluated()) return ModuleInfo::cast(code());
+  ScopeInfo* scope_info = instantiated()
+                              ? JSFunction::cast(code())->shared()->scope_info()
+                              : SharedFunctionInfo::cast(code())->scope_info();
+  return scope_info->ModuleDescriptorInfo();
 }
 
-uint32_t Module::Hash() const { return Symbol::cast(shared()->name())->Hash(); }
-
 ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
 ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
 
@@ -5781,6 +5868,8 @@
           kAccessCheckInfoOffset)
 ACCESSORS(FunctionTemplateInfo, shared_function_info, Object,
           kSharedFunctionInfoOffset)
+ACCESSORS(FunctionTemplateInfo, cached_property_name, Object,
+          kCachedPropertyNameOffset)
 
 SMI_ACCESSORS(FunctionTemplateInfo, flag, kFlagOffset)
 
@@ -5848,10 +5937,8 @@
 SMI_ACCESSORS(Script, flags, kFlagsOffset)
 ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
 ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset)
-ACCESSORS_CHECKED(Script, wasm_object, JSObject, kEvalFromSharedOffset,
+ACCESSORS_CHECKED(Script, wasm_compiled_module, Object, kEvalFromSharedOffset,
                   this->type() == TYPE_WASM)
-SMI_ACCESSORS_CHECKED(Script, wasm_function_index, kEvalFromPositionOffset,
-                      this->type() == TYPE_WASM)
 
 Script::CompilationType Script::compilation_type() {
   return BooleanBit::get(flags(), kCompilationTypeBit) ?
@@ -5957,10 +6044,6 @@
                kAllowLazyCompilation)
 BOOL_ACCESSORS(SharedFunctionInfo,
                compiler_hints,
-               allows_lazy_compilation_without_context,
-               kAllowLazyCompilationWithoutContext)
-BOOL_ACCESSORS(SharedFunctionInfo,
-               compiler_hints,
                uses_arguments,
                kUsesArguments)
 BOOL_ACCESSORS(SharedFunctionInfo,
@@ -5973,6 +6056,8 @@
                kNeverCompiled)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_declaration,
                kIsDeclaration)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, marked_for_tier_up,
+               kMarkedForTierUp)
 
 #if V8_HOST_ARCH_32_BIT
 SMI_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
@@ -6189,6 +6274,10 @@
   if (is_compiled()) set_never_compiled(false);
 }
 
+bool SharedFunctionInfo::IsInterpreted() const {
+  return code()->is_interpreter_trampoline_builtin();
+}
+
 bool SharedFunctionInfo::HasBaselineCode() const {
   return code()->kind() == Code::FUNCTION;
 }
@@ -6417,12 +6506,12 @@
   return type != Script::TYPE_NORMAL;
 }
 
-
-bool SharedFunctionInfo::IsSubjectToDebugging() { return !IsBuiltin(); }
-
+bool SharedFunctionInfo::IsSubjectToDebugging() {
+  return !IsBuiltin() && !HasAsmWasmData();
+}
 
 bool SharedFunctionInfo::OptimizedCodeMapIsCleared() const {
-  return optimized_code_map() == GetHeap()->cleared_optimized_code_map();
+  return optimized_code_map() == GetHeap()->empty_fixed_array();
 }
 
 
@@ -6430,6 +6519,10 @@
   return code()->kind() == Code::OPTIMIZED_FUNCTION;
 }
 
+bool JSFunction::IsInterpreted() {
+  return code()->is_interpreter_trampoline_builtin();
+}
+
 bool JSFunction::IsMarkedForBaseline() {
   return code() ==
          GetIsolate()->builtins()->builtin(Builtins::kCompileBaseline);
@@ -6475,11 +6568,10 @@
 }
 
 AbstractCode* JSFunction::abstract_code() {
-  Code* code = this->code();
-  if (code->is_interpreter_trampoline_builtin()) {
+  if (IsInterpreted()) {
     return AbstractCode::cast(shared()->bytecode_array());
   } else {
-    return AbstractCode::cast(code);
+    return AbstractCode::cast(code());
   }
 }
 
@@ -6674,6 +6766,8 @@
   return continuation() == kGeneratorExecuting;
 }
 
+TYPE_CHECKER(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE)
+
 ACCESSORS(JSValue, value, Object, kValueOffset)
 
 
@@ -6921,7 +7015,7 @@
 
 
 Object* JSArrayBufferView::byte_offset() const {
-  if (WasNeutered()) return Smi::FromInt(0);
+  if (WasNeutered()) return Smi::kZero;
   return Object::cast(READ_FIELD(this, kByteOffsetOffset));
 }
 
@@ -6933,7 +7027,7 @@
 
 
 Object* JSArrayBufferView::byte_length() const {
-  if (WasNeutered()) return Smi::FromInt(0);
+  if (WasNeutered()) return Smi::kZero;
   return Object::cast(READ_FIELD(this, kByteLengthOffset));
 }
 
@@ -6957,7 +7051,7 @@
 
 
 Object* JSTypedArray::length() const {
-  if (WasNeutered()) return Smi::FromInt(0);
+  if (WasNeutered()) return Smi::kZero;
   return Object::cast(READ_FIELD(this, kLengthOffset));
 }
 
@@ -7035,6 +7129,18 @@
   FixedArray::cast(data())->set(index, value);
 }
 
+void JSRegExp::SetLastIndex(int index) {
+  static const int offset =
+      kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
+  Smi* value = Smi::FromInt(index);
+  WRITE_FIELD(this, offset, value);
+}
+
+Object* JSRegExp::LastIndex() {
+  static const int offset =
+      kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
+  return READ_FIELD(this, offset);
+}
 
 ElementsKind JSObject::GetElementsKind() {
   ElementsKind kind = map()->elements_kind();
@@ -7484,7 +7590,7 @@
 
 
 bool JSReceiver::HasFastProperties() {
-  DCHECK(properties()->IsDictionary() == map()->is_dictionary_map());
+  DCHECK_EQ(properties()->IsDictionary(), map()->is_dictionary_map());
   return !properties()->IsDictionary();
 }
 
@@ -7586,6 +7692,11 @@
   return iter.GetCurrent() != global;
 }
 
+inline int JSGlobalProxy::SizeWithInternalFields(int internal_field_count) {
+  DCHECK_GE(internal_field_count, 0);
+  return kSize + internal_field_count * kPointerSize;
+}
+
 Smi* JSReceiver::GetOrCreateIdentityHash(Isolate* isolate,
                                          Handle<JSReceiver> object) {
   return object->IsJSProxy() ? JSProxy::GetOrCreateIdentityHash(
@@ -7632,6 +7743,14 @@
   set_flag(BooleanBit::set(flag(), kSpecialDataProperty, value));
 }
 
+bool AccessorInfo::replace_on_access() {
+  return BooleanBit::get(flag(), kReplaceOnAccess);
+}
+
+void AccessorInfo::set_replace_on_access(bool value) {
+  set_flag(BooleanBit::set(flag(), kReplaceOnAccess, value));
+}
+
 bool AccessorInfo::is_sloppy() { return BooleanBit::get(flag(), kIsSloppy); }
 
 void AccessorInfo::set_is_sloppy(bool value) {
@@ -7709,7 +7828,7 @@
 void Dictionary<Derived, Shape, Key>::SetEntry(int entry,
                                                Handle<Object> key,
                                                Handle<Object> value) {
-  this->SetEntry(entry, key, value, PropertyDetails(Smi::FromInt(0)));
+  this->SetEntry(entry, key, value, PropertyDetails(Smi::kZero));
 }
 
 
@@ -7775,7 +7894,7 @@
 }
 
 Map* UnseededNumberDictionaryShape::GetMap(Isolate* isolate) {
-  return *isolate->factory()->unseeded_number_dictionary_map();
+  return isolate->heap()->unseeded_number_dictionary_map();
 }
 
 uint32_t SeededNumberDictionaryShape::SeededHash(uint32_t key, uint32_t seed) {
@@ -7946,15 +8065,13 @@
 FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(SCOPE_INFO_FIELD_ACCESSORS)
 #undef SCOPE_INFO_FIELD_ACCESSORS
 
-Object* ModuleInfoEntry::export_name() const { return get(kExportNameIndex); }
-
-Object* ModuleInfoEntry::local_name() const { return get(kLocalNameIndex); }
-
-Object* ModuleInfoEntry::import_name() const { return get(kImportNameIndex); }
-
-Object* ModuleInfoEntry::module_request() const {
-  return get(kModuleRequestIndex);
-}
+ACCESSORS(ModuleInfoEntry, export_name, Object, kExportNameOffset)
+ACCESSORS(ModuleInfoEntry, local_name, Object, kLocalNameOffset)
+ACCESSORS(ModuleInfoEntry, import_name, Object, kImportNameOffset)
+SMI_ACCESSORS(ModuleInfoEntry, module_request, kModuleRequestOffset)
+SMI_ACCESSORS(ModuleInfoEntry, cell_index, kCellIndexOffset)
+SMI_ACCESSORS(ModuleInfoEntry, beg_pos, kBegPosOffset)
+SMI_ACCESSORS(ModuleInfoEntry, end_pos, kEndPosOffset)
 
 FixedArray* ModuleInfo::module_requests() const {
   return FixedArray::cast(get(kModuleRequestsIndex));
@@ -8101,9 +8218,9 @@
 
 
 void TypeFeedbackInfo::initialize_storage() {
-  WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(0));
-  WRITE_FIELD(this, kStorage2Offset, Smi::FromInt(0));
-  WRITE_FIELD(this, kStorage3Offset, Smi::FromInt(0));
+  WRITE_FIELD(this, kStorage1Offset, Smi::kZero);
+  WRITE_FIELD(this, kStorage2Offset, Smi::kZero);
+  WRITE_FIELD(this, kStorage3Offset, Smi::kZero);
 }
 
 
@@ -8273,6 +8390,10 @@
 ACCESSORS(JSIteratorResult, value, Object, kValueOffset)
 ACCESSORS(JSIteratorResult, done, Object, kDoneOffset)
 
+ACCESSORS(JSArrayIterator, object, Object, kIteratedObjectOffset)
+ACCESSORS(JSArrayIterator, index, Object, kNextIndexOffset)
+ACCESSORS(JSArrayIterator, object_map, Object, kIteratedObjectMapOffset)
+
 ACCESSORS(JSStringIterator, string, String, kStringOffset)
 SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
 
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 9054371..83e00b9 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -66,11 +66,12 @@
       break;
     case HEAP_NUMBER_TYPE:
       HeapNumber::cast(this)->HeapNumberPrint(os);
+      os << "\n";
       break;
     case MUTABLE_HEAP_NUMBER_TYPE:
       os << "<mutable ";
       HeapNumber::cast(this)->HeapNumberPrint(os);
-      os << ">";
+      os << ">\n";
       break;
     case SIMD128_VALUE_TYPE:
       Simd128Value::cast(this)->Simd128ValuePrint(os);
@@ -102,6 +103,44 @@
     TYPED_ARRAYS(PRINT_FIXED_TYPED_ARRAY)
 #undef PRINT_FIXED_TYPED_ARRAY
 
+    case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
+      JSArrayIterator::cast(this)->JSArrayIteratorPrint(os);
+      break;
+
     case FILLER_TYPE:
       os << "filler";
       break;
@@ -166,6 +205,9 @@
     case JS_WEAK_SET_TYPE:
       JSWeakSet::cast(this)->JSWeakSetPrint(os);
       break;
+    case JS_MODULE_NAMESPACE_TYPE:
+      JSModuleNamespace::cast(this)->JSModuleNamespacePrint(os);
+      break;
     case FOREIGN_TYPE:
       Foreign::cast(this)->ForeignPrint(os);
       break;
@@ -190,6 +232,9 @@
     case JS_TYPED_ARRAY_TYPE:
       JSTypedArray::cast(this)->JSTypedArrayPrint(os);
       break;
+    case JS_FIXED_ARRAY_ITERATOR_TYPE:
+      JSFixedArrayIterator::cast(this)->JSFixedArrayIteratorPrint(os);
+      break;
     case JS_DATA_VIEW_TYPE:
       JSDataView::cast(this)->JSDataViewPrint(os);
       break;
@@ -946,6 +991,34 @@
   JSObjectPrintBody(os, this, !WasNeutered());
 }
 
+void JSArrayIterator::JSArrayIteratorPrint(std::ostream& os) {  // NOLING
+  JSObjectPrintHeader(os, this, "JSArrayIterator");
+
+  InstanceType instance_type = map()->instance_type();
+  std::string type;
+  if (instance_type <= LAST_ARRAY_KEY_ITERATOR_TYPE) {
+    type = "keys";
+  } else if (instance_type <= LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE) {
+    type = "entries";
+  } else {
+    type = "values";
+  }
+
+  os << "\n - type = " << type;
+  os << "\n - object = " << Brief(object());
+  os << "\n - index = " << Brief(index());
+
+  JSObjectPrintBody(os, this);
+}
+
+void JSFixedArrayIterator::JSFixedArrayIteratorPrint(
+    std::ostream& os) {  // NOLINT
+  JSObjectPrintHeader(os, this, "JSFixedArrayIterator");
+  os << "\n - array = " << Brief(array());
+  os << "\n - index = " << index();
+  os << "\n - initial_next = " << Brief(initial_next());
+  JSObjectPrintBody(os, this);
+}
 
 void JSDataView::JSDataViewPrint(std::ostream& os) {  // NOLINT
   JSObjectPrintHeader(os, this, "JSDataView");
@@ -1031,18 +1104,18 @@
 
 
 void JSGlobalProxy::JSGlobalProxyPrint(std::ostream& os) {  // NOLINT
-  os << "global_proxy ";
-  JSObjectPrint(os);
-  os << "native context : " << Brief(native_context());
-  os << "\n";
+  JSObjectPrintHeader(os, this, "JSGlobalProxy");
+  os << "\n - native context = " << Brief(native_context());
+  os << "\n - hash = " << Brief(hash());
+  JSObjectPrintBody(os, this);
 }
 
 
 void JSGlobalObject::JSGlobalObjectPrint(std::ostream& os) {  // NOLINT
-  os << "global ";
-  JSObjectPrint(os);
-  os << "native context : " << Brief(native_context());
-  os << "\n";
+  JSObjectPrintHeader(os, this, "JSGlobalObject");
+  os << "\n - native context = " << Brief(native_context());
+  os << "\n - global proxy = " << Brief(global_proxy());
+  JSObjectPrintBody(os, this);
 }
 
 
@@ -1147,14 +1220,40 @@
   os << "\n";
 }
 
-void PromiseContainer::PromiseContainerPrint(std::ostream& os) {  // NOLINT
-  HeapObject::PrintHeader(os, "PromiseContainer");
+void PromiseResolveThenableJobInfo::PromiseResolveThenableJobInfoPrint(
+    std::ostream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "PromiseResolveThenableJobInfo");
   os << "\n - thenable: " << Brief(thenable());
   os << "\n - then: " << Brief(then());
   os << "\n - resolve: " << Brief(resolve());
   os << "\n - reject: " << Brief(reject());
-  os << "\n - before debug event: " << Brief(before_debug_event());
-  os << "\n - after debug event: " << Brief(after_debug_event());
+  os << "\n - debug id: " << Brief(debug_id());
+  os << "\n - debug name: " << Brief(debug_name());
+  os << "\n - context: " << Brief(context());
+  os << "\n";
+}
+
+void PromiseReactionJobInfo::PromiseReactionJobInfoPrint(
+    std::ostream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "PromiseReactionJobInfo");
+  os << "\n - value: " << Brief(value());
+  os << "\n - tasks: " << Brief(tasks());
+  os << "\n - deferred: " << Brief(deferred());
+  os << "\n - debug id: " << Brief(debug_id());
+  os << "\n - debug name: " << Brief(debug_name());
+  os << "\n - reaction context: " << Brief(context());
+  os << "\n";
+}
+
+void ModuleInfoEntry::ModuleInfoEntryPrint(std::ostream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "ModuleInfoEntry");
+  os << "\n - export_name: " << Brief(export_name());
+  os << "\n - local_name: " << Brief(local_name());
+  os << "\n - import_name: " << Brief(import_name());
+  os << "\n - module_request: " << module_request();
+  os << "\n - cell_index: " << cell_index();
+  os << "\n - beg_pos: " << beg_pos();
+  os << "\n - end_pos: " << end_pos();
   os << "\n";
 }
 
@@ -1164,15 +1263,30 @@
   os << "\n - exports: " << Brief(exports());
   os << "\n - requested_modules: " << Brief(requested_modules());
   os << "\n - evaluated: " << evaluated();
-  os << "\n - embedder_data: " << Brief(embedder_data());
+  os << "\n";
+}
+
+void JSModuleNamespace::JSModuleNamespacePrint(std::ostream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "JSModuleNamespace");
+  os << "\n - module: " << Brief(module());
   os << "\n";
 }
 
 void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "PrototypeInfo");
+  os << "\n - weak cell: " << Brief(weak_cell());
   os << "\n - prototype users: " << Brief(prototype_users());
   os << "\n - registry slot: " << registry_slot();
   os << "\n - validity cell: " << Brief(validity_cell());
+  os << "\n - object create map: " << Brief(object_create_map());
+  os << "\n";
+}
+
+void Tuple3::Tuple3Print(std::ostream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "Tuple3");
+  os << "\n - value1: " << Brief(value1());
+  os << "\n - value2: " << Brief(value2());
+  os << "\n - value3: " << Brief(value3());
   os << "\n";
 }
 
@@ -1238,6 +1352,7 @@
   os << "\n - instance_template: " << Brief(instance_template());
   os << "\n - signature: " << Brief(signature());
   os << "\n - access_check_info: " << Brief(access_check_info());
+  os << "\n - cached_property_name: " << Brief(cached_property_name());
   os << "\n - hidden_prototype: " << (hidden_prototype() ? "true" : "false");
   os << "\n - undetectable: " << (undetectable() ? "true" : "false");
   os << "\n - need_access_check: " << (needs_access_check() ? "true" : "false");
diff --git a/src/objects.cc b/src/objects.cc
index 44271db..e711a21 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -62,6 +62,7 @@
 #include "src/string-stream.h"
 #include "src/utils.h"
 #include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
 #include "src/zone/zone.h"
 
 #ifdef ENABLE_DISASSEMBLER
@@ -1199,7 +1200,7 @@
 Handle<TemplateList> TemplateList::New(Isolate* isolate, int size) {
   Handle<FixedArray> list =
       isolate->factory()->NewFixedArray(kLengthIndex + size);
-  list->set(kLengthIndex, Smi::FromInt(0));
+  list->set(kLengthIndex, Smi::kZero);
   return Handle<TemplateList>::cast(list);
 }
 
@@ -1352,8 +1353,19 @@
     Handle<Object> result = args.Call(call_fun, name);
     RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
     if (result.is_null()) return isolate->factory()->undefined_value();
-    // Rebox handle before return.
-    return handle(*result, isolate);
+    Handle<Object> reboxed_result = handle(*result, isolate);
+    if (info->replace_on_access() && receiver->IsJSReceiver()) {
+      args.Call(reinterpret_cast<GenericNamedPropertySetterCallback>(
+                    &Accessors::ReconfigureToDataProperty),
+                name, result);
+      RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+    }
+    return reboxed_result;
+  }
+
+  // AccessorPair with 'cached' private property.
+  if (it->TryLookupCachedProperty()) {
+    return Object::GetProperty(it);
   }
 
   // Regular accessor.
@@ -1417,12 +1429,20 @@
       return Nothing<bool>();
     }
 
-    v8::AccessorNameSetterCallback call_fun =
-        v8::ToCData<v8::AccessorNameSetterCallback>(info->setter());
-    // TODO(verwaest): We should not get here anymore once all AccessorInfos are
-    // marked as special_data_property. They cannot both be writable and not
-    // have a setter.
-    if (call_fun == nullptr) return Just(true);
+    // The actual type of call_fun is either v8::AccessorNameSetterCallback or
+    // i::Accesors::AccessorNameBooleanSetterCallback, depending on whether the
+    // AccessorInfo was created by the API or internally (see accessors.cc).
+    // Here we handle both cases using GenericNamedPropertySetterCallback and
+    // its Call method.
+    GenericNamedPropertySetterCallback call_fun =
+        v8::ToCData<GenericNamedPropertySetterCallback>(info->setter());
+
+    if (call_fun == nullptr) {
+      // TODO(verwaest): We should not get here anymore once all AccessorInfos
+      // are marked as special_data_property. They cannot both be writable and
+      // not have a setter.
+      return Just(true);
+    }
 
     if (info->is_sloppy() && !receiver->IsJSReceiver()) {
       ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -1432,9 +1452,15 @@
 
     PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
                                    should_throw);
-    args.Call(call_fun, name, value);
+    Handle<Object> result = args.Call(call_fun, name, value);
+    // In the case of AccessorNameSetterCallback, we know that the result value
+    // cannot have been set, so the result of Call will be null.  In the case of
+    // AccessorNameBooleanSetterCallback, the result will either be null
+    // (signalling an exception) or a boolean Oddball.
     RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
-    return Just(true);
+    if (result.is_null()) return Just(true);
+    DCHECK(result->BooleanValue() || should_throw == DONT_THROW);
+    return Just(result->BooleanValue());
   }
 
   // Regular accessor.
@@ -1911,7 +1937,7 @@
   }
 }
 
-Map* Object::GetRootMap(Isolate* isolate) {
+Map* Object::GetPrototypeChainRootMap(Isolate* isolate) {
   DisallowHeapAllocation no_alloc;
   if (IsSmi()) {
     Context* native_context = isolate->context()->native_context();
@@ -1921,11 +1947,15 @@
   // The object is either a number, a string, a symbol, a boolean, a SIMD value,
   // a real JS object, or a Harmony proxy.
   HeapObject* heap_object = HeapObject::cast(this);
-  if (heap_object->IsJSReceiver()) {
-    return heap_object->map();
+  return heap_object->map()->GetPrototypeChainRootMap(isolate);
+}
+
+Map* Map::GetPrototypeChainRootMap(Isolate* isolate) {
+  DisallowHeapAllocation no_alloc;
+  if (IsJSReceiverMap()) {
+    return this;
   }
-  int constructor_function_index =
-      heap_object->map()->GetConstructorFunctionIndex();
+  int constructor_function_index = GetConstructorFunctionIndex();
   if (constructor_function_index != Map::kNoConstructorFunctionIndex) {
     Context* native_context = isolate->context()->native_context();
     JSFunction* constructor_function =
@@ -2152,6 +2182,8 @@
   return os;
 }
 
+// Declaration of the static Smi::kZero constant.
+Smi* const Smi::kZero(nullptr);
 
 void Smi::SmiPrint(std::ostream& os) const {  // NOLINT
   os << value();
@@ -2184,8 +2216,12 @@
 
 Handle<String> String::SlowFlatten(Handle<ConsString> cons,
                                    PretenureFlag pretenure) {
-  DCHECK(AllowHeapAllocation::IsAllowed());
   DCHECK(cons->second()->length() != 0);
+
+  // TurboFan can create cons strings with empty first parts.
+  if (cons->first()->length() == 0) return handle(cons->second());
+
+  DCHECK(AllowHeapAllocation::IsAllowed());
   Isolate* isolate = cons->GetIsolate();
   int length = cons->length();
   PretenureFlag tenure = isolate->heap()->InNewSpace(*cons) ? pretenure
@@ -3171,11 +3207,11 @@
 void JSObject::UpdatePrototypeUserRegistration(Handle<Map> old_map,
                                                Handle<Map> new_map,
                                                Isolate* isolate) {
-  if (!old_map->is_prototype_map()) return;
+  DCHECK(old_map->is_prototype_map());
   DCHECK(new_map->is_prototype_map());
   bool was_registered = JSObject::UnregisterPrototypeUser(old_map, isolate);
   new_map->set_prototype_info(old_map->prototype_info());
-  old_map->set_prototype_info(Smi::FromInt(0));
+  old_map->set_prototype_info(Smi::kZero);
   if (FLAG_trace_prototype_users) {
     PrintF("Moving prototype_info %p from map %p to map %p.\n",
            reinterpret_cast<void*>(new_map->prototype_info()),
@@ -3331,7 +3367,7 @@
         value = handle(object->RawFastPropertyAt(index), isolate);
         if (!old_representation.IsDouble() && representation.IsDouble()) {
           if (old_representation.IsNone()) {
-            value = handle(Smi::FromInt(0), isolate);
+            value = handle(Smi::kZero, isolate);
           }
           value = Object::NewStorageFor(isolate, value, representation);
         } else if (old_representation.IsDouble() &&
@@ -3430,7 +3466,8 @@
   if (expected_additional_properties > 0) {
     property_count += expected_additional_properties;
   } else {
-    property_count += 2;  // Make space for two more properties.
+    // Make space for two more properties.
+    property_count += NameDictionary::kInitialCapacity;
   }
   Handle<NameDictionary> dictionary =
       NameDictionary::New(isolate, property_count);
@@ -3520,7 +3557,7 @@
 
     for (int i = 0; i < inobject_properties; i++) {
       FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
-      object->RawFastPropertyAtPut(index, Smi::FromInt(0));
+      object->RawFastPropertyAtPut(index, Smi::kZero);
     }
   }
 
@@ -3537,22 +3574,26 @@
 
 }  // namespace
 
+// static
+void JSObject::NotifyMapChange(Handle<Map> old_map, Handle<Map> new_map,
+                               Isolate* isolate) {
+  if (!old_map->is_prototype_map()) return;
+
+  InvalidatePrototypeChains(*old_map);
+
+  // If the map was registered with its prototype before, ensure that it
+  // registers with its new prototype now. This preserves the invariant that
+  // when a map on a prototype chain is registered with its prototype, then
+  // all prototypes further up the chain are also registered with their
+  // respective prototypes.
+  UpdatePrototypeUserRegistration(old_map, new_map, isolate);
+}
+
 void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
                             int expected_additional_properties) {
   if (object->map() == *new_map) return;
   Handle<Map> old_map(object->map());
-  if (old_map->is_prototype_map()) {
-    // If this object is a prototype (the callee will check), invalidate any
-    // prototype chains involving it.
-    InvalidatePrototypeChains(object->map());
-
-    // If the map was registered with its prototype before, ensure that it
-    // registers with its new prototype now. This preserves the invariant that
-    // when a map on a prototype chain is registered with its prototype, then
-    // all prototypes further up the chain are also registered with their
-    // respective prototypes.
-    UpdatePrototypeUserRegistration(old_map, new_map, new_map->GetIsolate());
-  }
+  NotifyMapChange(old_map, new_map, new_map->GetIsolate());
 
   if (old_map->is_dictionary_map()) {
     // For slow-to-fast migrations JSObject::MigrateSlowToFast()
@@ -3799,7 +3840,7 @@
   PropertyDetails details = instance_descriptors()->GetDetails(descriptor);
   if (details.type() != DATA) return;
 
-  Zone zone(GetIsolate()->allocator());
+  Zone zone(GetIsolate()->allocator(), ZONE_NAME);
   ZoneQueue<Map*> backlog(&zone);
   backlog.push(this);
 
@@ -3894,7 +3935,7 @@
   field_owner->UpdateFieldType(modify_index, name, new_representation,
                                wrapped_type);
   field_owner->dependent_code()->DeoptimizeDependentCodeGroup(
-      isolate, DependentCode::kFieldTypeGroup);
+      isolate, DependentCode::kFieldOwnerGroup);
 
   if (FLAG_trace_generalization) {
     map->PrintGeneralization(
@@ -4636,21 +4677,10 @@
                                     value, it->GetReceiver(), language_mode);
 
       case LookupIterator::INTERCEPTOR: {
-        Handle<Map> store_target_map;
-        if (it->GetReceiver()->IsJSObject()) {
-          store_target_map = handle(it->GetStoreTarget()->map(), it->isolate());
-        }
         if (it->HolderIsReceiverOrHiddenPrototype()) {
           Maybe<bool> result =
               JSObject::SetPropertyWithInterceptor(it, should_throw, value);
           if (result.IsNothing() || result.FromJust()) return result;
-          // Interceptor modified the store target but failed to set the
-          // property.
-          Utils::ApiCheck(store_target_map.is_null() ||
-                              *store_target_map == it->GetStoreTarget()->map(),
-                          it->IsElement() ? "v8::IndexedPropertySetterCallback"
-                                          : "v8::NamedPropertySetterCallback",
-                          "Interceptor silently changed store target.");
         } else {
           Maybe<PropertyAttributes> maybe_attributes =
               JSObject::GetPropertyAttributesWithInterceptor(it);
@@ -4658,13 +4688,6 @@
           if ((maybe_attributes.FromJust() & READ_ONLY) != 0) {
             return WriteToReadOnlyProperty(it, value, should_throw);
           }
-          // Interceptor modified the store target but failed to set the
-          // property.
-          Utils::ApiCheck(store_target_map.is_null() ||
-                              *store_target_map == it->GetStoreTarget()->map(),
-                          it->IsElement() ? "v8::IndexedPropertySetterCallback"
-                                          : "v8::NamedPropertySetterCallback",
-                          "Interceptor silently changed store target.");
           if (maybe_attributes.FromJust() == ABSENT) break;
           *found = false;
           return Nothing<bool>();
@@ -5786,17 +5809,10 @@
             it->TransitionToAccessorPair(accessors, attributes);
           }
 
-          Maybe<bool> result =
-              JSObject::SetPropertyWithAccessor(it, value, should_throw);
-
-          if (current_attributes == attributes || result.IsNothing()) {
-            return result;
-          }
-
-        } else {
-          it->ReconfigureDataProperty(value, attributes);
+          return JSObject::SetPropertyWithAccessor(it, value, should_throw);
         }
 
+        it->ReconfigureDataProperty(value, attributes);
         return Just(true);
       }
       case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -5989,7 +6005,7 @@
   Handle<Map> new_map = Map::CopyDropDescriptors(old_map);
   new_map->set_dictionary_map(false);
 
-  UpdatePrototypeUserRegistration(old_map, new_map, isolate);
+  NotifyMapChange(old_map, new_map, isolate);
 
 #if TRACE_MAPS
   if (FLAG_trace_maps) {
@@ -6174,27 +6190,13 @@
 }
 
 
-static Smi* GenerateIdentityHash(Isolate* isolate) {
-  int hash_value;
-  int attempts = 0;
-  do {
-    // Generate a random 32-bit hash value but limit range to fit
-    // within a smi.
-    hash_value = isolate->random_number_generator()->NextInt() & Smi::kMaxValue;
-    attempts++;
-  } while (hash_value == 0 && attempts < 30);
-  hash_value = hash_value != 0 ? hash_value : 1;  // never return 0
-
-  return Smi::FromInt(hash_value);
-}
-
 template <typename ProxyType>
 static Smi* GetOrCreateIdentityHashHelper(Isolate* isolate,
                                           Handle<ProxyType> proxy) {
   Object* maybe_hash = proxy->hash();
   if (maybe_hash->IsSmi()) return Smi::cast(maybe_hash);
 
-  Smi* hash = GenerateIdentityHash(isolate);
+  Smi* hash = Smi::FromInt(isolate->GenerateIdentityHash(Smi::kMaxValue));
   proxy->set_hash(hash);
   return hash;
 }
@@ -6224,7 +6226,7 @@
     if (maybe_hash->IsSmi()) return Smi::cast(maybe_hash);
   }
 
-  Smi* hash = GenerateIdentityHash(isolate);
+  Smi* hash = Smi::FromInt(isolate->GenerateIdentityHash(Smi::kMaxValue));
   CHECK(AddDataProperty(&it, handle(hash, isolate), NONE, THROW_ON_ERROR,
                         CERTAINLY_NOT_STORE_FROM_KEYED)
             .IsJust());
@@ -6561,7 +6563,7 @@
     return JSProxy::DefineOwnProperty(isolate, Handle<JSProxy>::cast(object),
                                       key, desc, should_throw);
   }
-  // TODO(jkummerow): Support Modules (ES6 9.4.6.6)
+  // TODO(neis): Special case for JSModuleNamespace?
 
   // OrdinaryDefineOwnProperty, by virtue of calling
   // DefineOwnPropertyIgnoreAttributes, can handle arguments (ES6 9.4.4.2)
@@ -6596,29 +6598,12 @@
 
   // Handle interceptor
   if (it.state() == LookupIterator::INTERCEPTOR) {
-    Handle<Map> store_target_map;
-    if (it.GetReceiver()->IsJSObject()) {
-      store_target_map = handle(it.GetStoreTarget()->map(), it.isolate());
-    }
     if (it.HolderIsReceiverOrHiddenPrototype()) {
       Maybe<bool> result = DefinePropertyWithInterceptorInternal(
           &it, it.GetInterceptor(), should_throw, *desc);
       if (result.IsNothing() || result.FromJust()) {
         return result;
       }
-      // Interceptor modified the store target but failed to set the
-      // property.
-      if (!store_target_map.is_null() &&
-          *store_target_map != it.GetStoreTarget()->map()) {
-        it.isolate()->PushStackTraceAndDie(
-            0xabababaa, v8::ToCData<void*>(it.GetInterceptor()->definer()),
-            nullptr, 0xabababab);
-      }
-      Utils::ApiCheck(store_target_map.is_null() ||
-                          *store_target_map == it.GetStoreTarget()->map(),
-                      it.IsElement() ? "v8::IndexedPropertyDefinerCallback"
-                                     : "v8::NamedPropertyDefinerCallback",
-                      "Interceptor silently changed store target.");
     }
   }
 
@@ -7981,12 +7966,31 @@
   return object->map()->is_extensible();
 }
 
+namespace {
 
 template <typename Dictionary>
-static void ApplyAttributesToDictionary(Dictionary* dictionary,
-                                        const PropertyAttributes attributes) {
+void DictionaryDetailsAtPut(Isolate* isolate, Handle<Dictionary> dictionary,
+                            int entry, PropertyDetails details) {
+  dictionary->DetailsAtPut(entry, details);
+}
+
+template <>
+void DictionaryDetailsAtPut<GlobalDictionary>(
+    Isolate* isolate, Handle<GlobalDictionary> dictionary, int entry,
+    PropertyDetails details) {
+  Object* value = dictionary->ValueAt(entry);
+  DCHECK(value->IsPropertyCell());
+  value = PropertyCell::cast(value)->value();
+  if (value->IsTheHole(isolate)) return;
+  PropertyCell::PrepareForValue(dictionary, entry, handle(value, isolate),
+                                details);
+}
+
+template <typename Dictionary>
+void ApplyAttributesToDictionary(Isolate* isolate,
+                                 Handle<Dictionary> dictionary,
+                                 const PropertyAttributes attributes) {
   int capacity = dictionary->Capacity();
-  Isolate* isolate = dictionary->GetIsolate();
   for (int i = 0; i < capacity; i++) {
     Object* k = dictionary->KeyAt(i);
     if (dictionary->IsKey(isolate, k) &&
@@ -8001,11 +8005,12 @@
       }
       details = details.CopyAddAttributes(
           static_cast<PropertyAttributes>(attrs));
-      dictionary->DetailsAtPut(i, details);
+      DictionaryDetailsAtPut<Dictionary>(isolate, dictionary, i, details);
     }
   }
 }
 
+}  // namespace
 
 template <PropertyAttributes attrs>
 Maybe<bool> JSObject::PreventExtensionsWithTransition(
@@ -8094,9 +8099,13 @@
 
     if (attrs != NONE) {
       if (object->IsJSGlobalObject()) {
-        ApplyAttributesToDictionary(object->global_dictionary(), attrs);
+        Handle<GlobalDictionary> dictionary(object->global_dictionary(),
+                                            isolate);
+        ApplyAttributesToDictionary(isolate, dictionary, attrs);
       } else {
-        ApplyAttributesToDictionary(object->property_dictionary(), attrs);
+        Handle<NameDictionary> dictionary(object->property_dictionary(),
+                                          isolate);
+        ApplyAttributesToDictionary(isolate, dictionary, attrs);
       }
     }
   }
@@ -8120,11 +8129,12 @@
   }
 
   if (object->elements() != isolate->heap()->empty_slow_element_dictionary()) {
-    SeededNumberDictionary* dictionary = object->element_dictionary();
+    Handle<SeededNumberDictionary> dictionary(object->element_dictionary(),
+                                              isolate);
     // Make sure we never go back to the fast case
-    object->RequireSlowElements(dictionary);
+    object->RequireSlowElements(*dictionary);
     if (attrs != NONE) {
-      ApplyAttributesToDictionary(dictionary, attrs);
+      ApplyAttributesToDictionary(isolate, dictionary, attrs);
     }
   }
 
@@ -8466,8 +8476,9 @@
       int length = object->IsJSArray()
                        ? Smi::cast(JSArray::cast(object)->length())->value()
                        : elements->length();
+      Isolate* isolate = GetIsolate();
       for (int i = 0; i < length; i++) {
-        if (!elements->is_the_hole(i)) return true;
+        if (!elements->is_the_hole(isolate, i)) return true;
       }
       return false;
     }
@@ -8907,7 +8918,7 @@
         // For prototype maps, the PrototypeInfo is not copied.
         DCHECK(memcmp(fresh->address(), new_map->address(),
                       kTransitionsOrPrototypeInfoOffset) == 0);
-        DCHECK(fresh->raw_transitions() == Smi::FromInt(0));
+        DCHECK(fresh->raw_transitions() == Smi::kZero);
         STATIC_ASSERT(kDescriptorsOffset ==
                       kTransitionsOrPrototypeInfoOffset + kPointerSize);
         DCHECK(memcmp(HeapObject::RawField(*fresh, kDescriptorsOffset),
@@ -10264,7 +10275,7 @@
     }
   }
   while (index < result->length()) {
-    result->set(index, Smi::FromInt(0));
+    result->set(index, Smi::kZero);
     index++;
   }
   return Handle<WeakFixedArray>::cast(result);
@@ -10329,6 +10340,15 @@
   return ret;
 }
 
+Handle<RegExpMatchInfo> RegExpMatchInfo::ReserveCaptures(
+    Handle<RegExpMatchInfo> match_info, int capture_count) {
+  DCHECK_GE(match_info->length(), kLastMatchOverhead);
+  const int required_length = kFirstCaptureIndex + capture_count;
+  Handle<FixedArray> result =
+      EnsureSpaceInFixedArray(match_info, required_length);
+  return Handle<RegExpMatchInfo>::cast(result);
+}
+
 // static
 Handle<FrameArray> FrameArray::AppendJSFrame(Handle<FrameArray> in,
                                              Handle<Object> receiver,
@@ -10349,14 +10369,14 @@
 
 // static
 Handle<FrameArray> FrameArray::AppendWasmFrame(Handle<FrameArray> in,
-                                               Handle<Object> wasm_object,
+                                               Handle<Object> wasm_instance,
                                                int wasm_function_index,
                                                Handle<AbstractCode> code,
                                                int offset, int flags) {
   const int frame_count = in->FrameCount();
   const int new_length = LengthFor(frame_count + 1);
   Handle<FrameArray> array = EnsureSpace(in, new_length);
-  array->SetWasmObject(frame_count, *wasm_object);
+  array->SetWasmInstance(frame_count, *wasm_instance);
   array->SetWasmFunctionIndex(frame_count, Smi::FromInt(wasm_function_index));
   array->SetCode(frame_count, *code);
   array->SetOffset(frame_count, Smi::FromInt(offset));
@@ -10387,15 +10407,11 @@
       factory->NewFixedArray(LengthFor(size), pretenure);
 
   result->set(kDescriptorLengthIndex, Smi::FromInt(number_of_descriptors));
-  result->set(kEnumCacheIndex, Smi::FromInt(0));
+  result->set(kEnumCacheIndex, Smi::kZero);
   return Handle<DescriptorArray>::cast(result);
 }
 
-
-void DescriptorArray::ClearEnumCache() {
-  set(kEnumCacheIndex, Smi::FromInt(0));
-}
-
+void DescriptorArray::ClearEnumCache() { set(kEnumCacheIndex, Smi::kZero); }
 
 void DescriptorArray::Replace(int index, Descriptor* descriptor) {
   descriptor->SetSortedKeyIndex(GetSortedKeyIndex(index));
@@ -10418,9 +10434,9 @@
     bridge_storage = FixedArray::cast(descriptors->get(kEnumCacheIndex));
   }
   bridge_storage->set(kEnumCacheBridgeCacheIndex, *new_cache);
-  bridge_storage->set(kEnumCacheBridgeIndicesCacheIndex,
-                      new_index_cache.is_null() ? Object::cast(Smi::FromInt(0))
-                                                : *new_index_cache);
+  bridge_storage->set(
+      kEnumCacheBridgeIndicesCacheIndex,
+      new_index_cache.is_null() ? Object::cast(Smi::kZero) : *new_index_cache);
   if (needs_new_enum_cache) {
     descriptors->set(kEnumCacheIndex, bridge_storage);
   }
@@ -10536,6 +10552,14 @@
   return Handle<DeoptimizationOutputData>::cast(result);
 }
 
+SharedFunctionInfo* DeoptimizationInputData::GetInlinedFunction(int index) {
+  if (index == -1) {
+    return SharedFunctionInfo::cast(this->SharedFunctionInfo());
+  } else {
+    return SharedFunctionInfo::cast(LiteralArray()->get(index));
+  }
+}
+
 const int LiteralsArray::kFeedbackVectorOffset =
     LiteralsArray::OffsetOfElementAt(LiteralsArray::kVectorIndex);
 
@@ -10722,7 +10746,7 @@
   // Fast case: short integer or some sorts of junk values.
   if (subject->IsSeqOneByteString()) {
     int len = subject->length();
-    if (len == 0) return handle(Smi::FromInt(0), isolate);
+    if (len == 0) return handle(Smi::kZero, isolate);
 
     DisallowHeapAllocation no_gc;
     uint8_t const* data = Handle<SeqOneByteString>::cast(subject)->GetChars();
@@ -11604,41 +11628,170 @@
   return result;
 }
 
-int String::IndexOf(Isolate* isolate, Handle<String> sub, Handle<String> pat,
-                    int start_index) {
+Object* String::IndexOf(Isolate* isolate, Handle<Object> receiver,
+                        Handle<Object> search, Handle<Object> position) {
+  if (receiver->IsNull(isolate) || receiver->IsUndefined(isolate)) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "String.prototype.indexOf")));
+  }
+  Handle<String> receiver_string;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver_string,
+                                     Object::ToString(isolate, receiver));
+
+  Handle<String> search_string;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, search_string,
+                                     Object::ToString(isolate, search));
+
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
+                                     Object::ToInteger(isolate, position));
+
+  double index = std::max(position->Number(), 0.0);
+  index = std::min(index, static_cast<double>(receiver_string->length()));
+
+  return Smi::FromInt(String::IndexOf(isolate, receiver_string, search_string,
+                                      static_cast<uint32_t>(index)));
+}
+
+namespace {
+
+template <typename T>
+int SearchString(Isolate* isolate, String::FlatContent receiver_content,
+                 Vector<T> pat_vector, int start_index) {
+  if (receiver_content.IsOneByte()) {
+    return SearchString(isolate, receiver_content.ToOneByteVector(), pat_vector,
+                        start_index);
+  }
+  return SearchString(isolate, receiver_content.ToUC16Vector(), pat_vector,
+                      start_index);
+}
+
+}  // namespace
+
+int String::IndexOf(Isolate* isolate, Handle<String> receiver,
+                    Handle<String> search, int start_index) {
   DCHECK(0 <= start_index);
-  DCHECK(start_index <= sub->length());
+  DCHECK(start_index <= receiver->length());
 
-  int pattern_length = pat->length();
-  if (pattern_length == 0) return start_index;
+  uint32_t search_length = search->length();
+  if (search_length == 0) return start_index;
 
-  int subject_length = sub->length();
-  if (start_index + pattern_length > subject_length) return -1;
+  uint32_t receiver_length = receiver->length();
+  if (start_index + search_length > receiver_length) return -1;
 
-  sub = String::Flatten(sub);
-  pat = String::Flatten(pat);
+  receiver = String::Flatten(receiver);
+  search = String::Flatten(search);
 
   DisallowHeapAllocation no_gc;  // ensure vectors stay valid
   // Extract flattened substrings of cons strings before getting encoding.
-  String::FlatContent seq_sub = sub->GetFlatContent();
-  String::FlatContent seq_pat = pat->GetFlatContent();
+  String::FlatContent receiver_content = receiver->GetFlatContent();
+  String::FlatContent search_content = search->GetFlatContent();
 
   // dispatch on type of strings
-  if (seq_pat.IsOneByte()) {
-    Vector<const uint8_t> pat_vector = seq_pat.ToOneByteVector();
-    if (seq_sub.IsOneByte()) {
-      return SearchString(isolate, seq_sub.ToOneByteVector(), pat_vector,
-                          start_index);
+  if (search_content.IsOneByte()) {
+    Vector<const uint8_t> pat_vector = search_content.ToOneByteVector();
+    return SearchString<const uint8_t>(isolate, receiver_content, pat_vector,
+                                       start_index);
+  }
+  Vector<const uc16> pat_vector = search_content.ToUC16Vector();
+  return SearchString<const uc16>(isolate, receiver_content, pat_vector,
+                                  start_index);
+}
+
+MaybeHandle<String> String::GetSubstitution(Isolate* isolate, Match* match,
+                                            Handle<String> replacement) {
+  Factory* factory = isolate->factory();
+
+  const int replacement_length = replacement->length();
+  const int captures_length = match->CaptureCount();
+
+  replacement = String::Flatten(replacement);
+
+  Handle<String> dollar_string =
+      factory->LookupSingleCharacterStringFromCode('$');
+  int next = String::IndexOf(isolate, replacement, dollar_string, 0);
+  if (next < 0) {
+    return replacement;
+  }
+
+  IncrementalStringBuilder builder(isolate);
+
+  if (next > 0) {
+    builder.AppendString(factory->NewSubString(replacement, 0, next));
+  }
+
+  while (true) {
+    int pos = next + 1;
+    if (pos < replacement_length) {
+      const uint16_t peek = replacement->Get(pos);
+      if (peek == '$') {  // $$
+        pos++;
+        builder.AppendCharacter('$');
+      } else if (peek == '&') {  // $& - match
+        pos++;
+        builder.AppendString(match->GetMatch());
+      } else if (peek == '`') {  // $` - prefix
+        pos++;
+        builder.AppendString(match->GetPrefix());
+      } else if (peek == '\'') {  // $' - suffix
+        pos++;
+        builder.AppendString(match->GetSuffix());
+      } else if (peek >= '0' && peek <= '9') {
+        // Valid indices are $1 .. $9, $01 .. $09 and $10 .. $99
+        int scaled_index = (peek - '0');
+        int advance = 1;
+
+        if (pos + 1 < replacement_length) {
+          const uint16_t next_peek = replacement->Get(pos + 1);
+          if (next_peek >= '0' && next_peek <= '9') {
+            const int new_scaled_index = scaled_index * 10 + (next_peek - '0');
+            if (new_scaled_index < captures_length) {
+              scaled_index = new_scaled_index;
+              advance = 2;
+            }
+          }
+        }
+
+        if (scaled_index != 0 && scaled_index < captures_length) {
+          bool capture_exists;
+          Handle<String> capture;
+          ASSIGN_RETURN_ON_EXCEPTION(
+              isolate, capture,
+              match->GetCapture(scaled_index, &capture_exists), String);
+          if (capture_exists) builder.AppendString(capture);
+          pos += advance;
+        } else {
+          builder.AppendCharacter('$');
+        }
+      } else {
+        builder.AppendCharacter('$');
+      }
+    } else {
+      builder.AppendCharacter('$');
     }
-    return SearchString(isolate, seq_sub.ToUC16Vector(), pat_vector,
-                        start_index);
+
+    // Go the the next $ in the replacement.
+    next = String::IndexOf(isolate, replacement, dollar_string, pos);
+
+    // Return if there are no more $ characters in the replacement. If we
+    // haven't reached the end, we need to append the suffix.
+    if (next < 0) {
+      if (pos < replacement_length) {
+        builder.AppendString(
+            factory->NewSubString(replacement, pos, replacement_length));
+      }
+      return builder.Finish();
+    }
+
+    // Append substring between the previous and the next $ character.
+    if (next > pos) {
+      builder.AppendString(factory->NewSubString(replacement, pos, next));
+    }
   }
-  Vector<const uc16> pat_vector = seq_pat.ToUC16Vector();
-  if (seq_sub.IsOneByte()) {
-    return SearchString(isolate, seq_sub.ToOneByteVector(), pat_vector,
-                        start_index);
-  }
-  return SearchString(isolate, seq_sub.ToUC16Vector(), pat_vector, start_index);
+
+  UNREACHABLE();
+  return MaybeHandle<String>();
 }
 
 namespace {  // for String.Prototype.lastIndexOf
@@ -12082,6 +12235,9 @@
   set_code_no_write_barrier(
       isolate->builtins()->builtin(Builtins::kCompileBaseline));
   // No write barrier required, since the builtin is part of the root set.
+  if (FLAG_mark_shared_functions_for_tier_up) {
+    shared()->set_marked_for_tier_up(true);
+  }
 }
 
 void JSFunction::MarkForOptimization() {
@@ -12092,6 +12248,9 @@
   set_code_no_write_barrier(
       isolate->builtins()->builtin(Builtins::kCompileOptimized));
   // No write barrier required, since the builtin is part of the root set.
+  if (FLAG_mark_shared_functions_for_tier_up) {
+    shared()->set_marked_for_tier_up(true);
+  }
 }
 
 
@@ -12112,9 +12271,15 @@
     ShortPrint();
     PrintF(" for concurrent recompilation.\n");
   }
+
   set_code_no_write_barrier(
       isolate->builtins()->builtin(Builtins::kCompileOptimizedConcurrent));
   // No write barrier required, since the builtin is part of the root set.
+  if (FLAG_mark_shared_functions_for_tier_up) {
+    // TODO(leszeks): The compilation isn't concurrent if we trigger it using
+    // this bit.
+    shared()->set_marked_for_tier_up(true);
+  }
 }
 
 // static
@@ -12142,22 +12307,6 @@
   return literals;
 }
 
-void SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(
-    Handle<SharedFunctionInfo> shared, Handle<Code> code) {
-  Isolate* isolate = shared->GetIsolate();
-  if (isolate->serializer_enabled()) return;
-  DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
-  // Empty code maps are unsupported.
-  if (!shared->OptimizedCodeMapIsCleared()) {
-    Handle<WeakCell> cell = isolate->factory()->NewWeakCell(code);
-    // A collection may have occured and cleared the optimized code map in the
-    // allocation above.
-    if (!shared->OptimizedCodeMapIsCleared()) {
-      shared->optimized_code_map()->set(kSharedCodeIndex, *cell);
-    }
-  }
-}
-
 // static
 void SharedFunctionInfo::AddToOptimizedCodeMap(
     Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
@@ -12174,13 +12323,11 @@
 
   if (shared->OptimizedCodeMapIsCleared()) {
     new_code_map = isolate->factory()->NewFixedArray(kInitialLength, TENURED);
-    new_code_map->set(kSharedCodeIndex, *isolate->factory()->empty_weak_cell(),
-                      SKIP_WRITE_BARRIER);
     entry = kEntriesStart;
   } else {
     Handle<FixedArray> old_code_map(shared->optimized_code_map(), isolate);
     entry = shared->SearchOptimizedCodeMapEntry(*native_context, osr_ast_id);
-    if (entry > kSharedCodeIndex) {
+    if (entry >= kEntriesStart) {
       // Just set the code and literals of the entry.
       if (!code.is_null()) {
         Handle<WeakCell> code_cell =
@@ -12250,8 +12397,8 @@
 
 
 void SharedFunctionInfo::ClearOptimizedCodeMap() {
-  FixedArray* cleared_map = GetHeap()->cleared_optimized_code_map();
-  set_optimized_code_map(cleared_map, SKIP_WRITE_BARRIER);
+  FixedArray* empty_fixed_array = GetHeap()->empty_fixed_array();
+  set_optimized_code_map(empty_fixed_array, SKIP_WRITE_BARRIER);
 }
 
 
@@ -12301,23 +12448,11 @@
     }
     dst += kEntryLength;
   }
-  if (WeakCell::cast(code_map->get(kSharedCodeIndex))->value() ==
-      optimized_code) {
-    // Evict context-independent code as well.
-    code_map->set(kSharedCodeIndex, heap->empty_weak_cell(),
-                  SKIP_WRITE_BARRIER);
-    if (FLAG_trace_opt) {
-      PrintF("[evicting entry from optimizing code map (%s) for ", reason);
-      ShortPrint();
-      PrintF(" (context-independent code)]\n");
-    }
-  }
   if (dst != length) {
     // Always trim even when array is cleared because of heap verifier.
     heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(code_map,
                                                            length - dst);
-    if (code_map->length() == kEntriesStart &&
-        WeakCell::cast(code_map->get(kSharedCodeIndex))->cleared()) {
+    if (code_map->length() == kEntriesStart) {
       ClearOptimizedCodeMap();
     }
   }
@@ -12331,8 +12466,7 @@
   // Always trim even when array is cleared because of heap verifier.
   GetHeap()->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(code_map,
                                                               shrink_by);
-  if (code_map->length() == kEntriesStart &&
-      WeakCell::cast(code_map->get(kSharedCodeIndex))->cleared()) {
+  if (code_map->length() == kEntriesStart) {
     ClearOptimizedCodeMap();
   }
 }
@@ -12554,7 +12688,7 @@
 
 
 static void InvalidatePrototypeChainsInternal(Map* map) {
-  if (!map->is_prototype_map()) return;
+  DCHECK(map->is_prototype_map());
   if (FLAG_trace_prototype_users) {
     PrintF("Invalidating prototype map %p 's cell\n",
            reinterpret_cast<void*>(map));
@@ -12624,7 +12758,8 @@
 // static
 Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
                                                         Isolate* isolate) {
-  Handle<Object> maybe_prototype(map->prototype(), isolate);
+  Handle<Object> maybe_prototype(
+      map->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
   if (!maybe_prototype->IsJSObject()) return Handle<Cell>::null();
   Handle<JSObject> prototype = Handle<JSObject>::cast(maybe_prototype);
   // Ensure the prototype is registered with its own prototypes so its cell
@@ -12648,6 +12783,24 @@
   return cell;
 }
 
+// static
+Handle<WeakCell> Map::GetOrCreatePrototypeWeakCell(Handle<JSObject> prototype,
+                                                   Isolate* isolate) {
+  DCHECK(!prototype.is_null());
+  Handle<PrototypeInfo> proto_info =
+      GetOrCreatePrototypeInfo(prototype, isolate);
+  Object* maybe_cell = proto_info->weak_cell();
+  // Return existing cell if it's already created.
+  if (maybe_cell->IsWeakCell()) {
+    Handle<WeakCell> cell(WeakCell::cast(maybe_cell), isolate);
+    DCHECK(!cell->cleared());
+    return cell;
+  }
+  // Otherwise create a new cell.
+  Handle<WeakCell> cell = isolate->factory()->NewWeakCell(prototype);
+  proto_info->set_weak_cell(*cell);
+  return cell;
+}
 
 // static
 void Map::SetPrototype(Handle<Map> map, Handle<Object> prototype,
@@ -13236,93 +13389,133 @@
 void Script::InitLineEnds(Handle<Script> script) {
   Isolate* isolate = script->GetIsolate();
   if (!script->line_ends()->IsUndefined(isolate)) return;
+  DCHECK_NE(Script::TYPE_WASM, script->type());
 
-  if (!script->source()->IsString()) {
-    DCHECK(script->source()->IsUndefined(isolate));
-    Handle<FixedArray> empty = isolate->factory()->NewFixedArray(0);
-    script->set_line_ends(*empty);
-    DCHECK(script->line_ends()->IsFixedArray());
-    return;
+  Object* src_obj = script->source();
+  if (!src_obj->IsString()) {
+    DCHECK(src_obj->IsUndefined(isolate));
+    script->set_line_ends(isolate->heap()->empty_fixed_array());
+  } else {
+    DCHECK(src_obj->IsString());
+    Handle<String> src(String::cast(src_obj), isolate);
+    Handle<FixedArray> array = String::CalculateLineEnds(src, true);
+    script->set_line_ends(*array);
   }
 
-  Handle<String> src(String::cast(script->source()), isolate);
-
-  Handle<FixedArray> array = String::CalculateLineEnds(src, true);
-
-  if (*array != isolate->heap()->empty_fixed_array()) {
-    array->set_map(isolate->heap()->fixed_cow_array_map());
-  }
-
-  script->set_line_ends(*array);
   DCHECK(script->line_ends()->IsFixedArray());
 }
 
+bool Script::GetPositionInfo(Handle<Script> script, int position,
+                             PositionInfo* info, OffsetFlag offset_flag) {
+  // For wasm, we do not create an artificial line_ends array, but do the
+  // translation directly.
+  if (script->type() == Script::TYPE_WASM) {
+    Handle<WasmCompiledModule> compiled_module(
+        WasmCompiledModule::cast(script->wasm_compiled_module()));
+    DCHECK_LE(0, position);
+    return wasm::GetPositionInfo(compiled_module,
+                                 static_cast<uint32_t>(position), info);
+  }
+
+  InitLineEnds(script);
+  return script->GetPositionInfo(position, info, offset_flag);
+}
+
+namespace {
+bool GetPositionInfoSlow(const Script* script, int position,
+                         Script::PositionInfo* info) {
+  if (!script->source()->IsString()) return false;
+  if (position < 0) position = 0;
+
+  String* source_string = String::cast(script->source());
+  int line = 0;
+  int line_start = 0;
+  int len = source_string->length();
+  for (int pos = 0; pos <= len; ++pos) {
+    if (pos == len || source_string->Get(pos) == '\n') {
+      if (position <= pos) {
+        info->line = line;
+        info->column = position - line_start;
+        info->line_start = line_start;
+        info->line_end = pos;
+        return true;
+      }
+      line++;
+      line_start = pos + 1;
+    }
+  }
+  return false;
+}
+}  // namespace
+
 #define SMI_VALUE(x) (Smi::cast(x)->value())
 bool Script::GetPositionInfo(int position, PositionInfo* info,
-                             OffsetFlag offset_flag) {
-  Handle<Script> script(this);
-  InitLineEnds(script);
-
+                             OffsetFlag offset_flag) const {
   DisallowHeapAllocation no_allocation;
 
-  DCHECK(script->line_ends()->IsFixedArray());
-  FixedArray* ends = FixedArray::cast(script->line_ends());
-
-  const int ends_len = ends->length();
-  if (ends_len == 0) return false;
-
-  // Return early on invalid positions. Negative positions behave as if 0 was
-  // passed, and positions beyond the end of the script return as failure.
-  if (position < 0) {
-    position = 0;
-  } else if (position > SMI_VALUE(ends->get(ends_len - 1))) {
-    return false;
-  }
-
-  // Determine line number by doing a binary search on the line ends array.
-  if (SMI_VALUE(ends->get(0)) >= position) {
-    info->line = 0;
-    info->line_start = 0;
-    info->column = position;
+  if (line_ends()->IsUndefined(GetIsolate())) {
+    // Slow mode: we do not have line_ends. We have to iterate through source.
+    if (!GetPositionInfoSlow(this, position, info)) return false;
   } else {
-    int left = 0;
-    int right = ends_len - 1;
+    DCHECK(line_ends()->IsFixedArray());
+    FixedArray* ends = FixedArray::cast(line_ends());
 
-    while (right > 0) {
-      DCHECK_LE(left, right);
-      const int mid = (left + right) / 2;
-      if (position > SMI_VALUE(ends->get(mid))) {
-        left = mid + 1;
-      } else if (position <= SMI_VALUE(ends->get(mid - 1))) {
-        right = mid - 1;
-      } else {
-        info->line = mid;
-        break;
-      }
+    const int ends_len = ends->length();
+    if (ends_len == 0) return false;
+
+    // Return early on invalid positions. Negative positions behave as if 0 was
+    // passed, and positions beyond the end of the script return as failure.
+    if (position < 0) {
+      position = 0;
+    } else if (position > SMI_VALUE(ends->get(ends_len - 1))) {
+      return false;
     }
-    DCHECK(SMI_VALUE(ends->get(info->line)) >= position &&
-           SMI_VALUE(ends->get(info->line - 1)) < position);
-    info->line_start = SMI_VALUE(ends->get(info->line - 1)) + 1;
-    info->column = position - info->line_start;
-  }
 
-  // Line end is position of the linebreak character.
-  info->line_end = SMI_VALUE(ends->get(info->line));
-  if (info->line_end > 0) {
-    DCHECK(script->source()->IsString());
-    Handle<String> src(String::cast(script->source()));
-    if (src->length() >= info->line_end &&
-        src->Get(info->line_end - 1) == '\r') {
-      info->line_end--;
+    // Determine line number by doing a binary search on the line ends array.
+    if (SMI_VALUE(ends->get(0)) >= position) {
+      info->line = 0;
+      info->line_start = 0;
+      info->column = position;
+    } else {
+      int left = 0;
+      int right = ends_len - 1;
+
+      while (right > 0) {
+        DCHECK_LE(left, right);
+        const int mid = (left + right) / 2;
+        if (position > SMI_VALUE(ends->get(mid))) {
+          left = mid + 1;
+        } else if (position <= SMI_VALUE(ends->get(mid - 1))) {
+          right = mid - 1;
+        } else {
+          info->line = mid;
+          break;
+        }
+      }
+      DCHECK(SMI_VALUE(ends->get(info->line)) >= position &&
+             SMI_VALUE(ends->get(info->line - 1)) < position);
+      info->line_start = SMI_VALUE(ends->get(info->line - 1)) + 1;
+      info->column = position - info->line_start;
+    }
+
+    // Line end is position of the linebreak character.
+    info->line_end = SMI_VALUE(ends->get(info->line));
+    if (info->line_end > 0) {
+      DCHECK(source()->IsString());
+      String* src = String::cast(source());
+      if (src->length() >= info->line_end &&
+          src->Get(info->line_end - 1) == '\r') {
+        info->line_end--;
+      }
     }
   }
 
   // Add offsets if requested.
   if (offset_flag == WITH_OFFSET) {
     if (info->line == 0) {
-      info->column += script->column_offset();
+      info->column += column_offset();
     }
-    info->line += script->line_offset();
+    info->line += line_offset();
   }
 
   return true;
@@ -13331,49 +13524,28 @@
 
 int Script::GetColumnNumber(Handle<Script> script, int code_pos) {
   PositionInfo info;
-  if (!script->GetPositionInfo(code_pos, &info, WITH_OFFSET)) {
-    return -1;
-  }
-
+  GetPositionInfo(script, code_pos, &info, WITH_OFFSET);
   return info.column;
 }
 
-int Script::GetLineNumberWithArray(int code_pos) {
+int Script::GetColumnNumber(int code_pos) const {
   PositionInfo info;
-  if (!GetPositionInfo(code_pos, &info, WITH_OFFSET)) {
-    return -1;
-  }
+  GetPositionInfo(code_pos, &info, WITH_OFFSET);
+  return info.column;
+}
 
+int Script::GetLineNumber(Handle<Script> script, int code_pos) {
+  PositionInfo info;
+  GetPositionInfo(script, code_pos, &info, WITH_OFFSET);
   return info.line;
 }
 
-
-int Script::GetLineNumber(Handle<Script> script, int code_pos) {
-  InitLineEnds(script);
-  return script->GetLineNumberWithArray(code_pos);
+int Script::GetLineNumber(int code_pos) const {
+  PositionInfo info;
+  GetPositionInfo(code_pos, &info, WITH_OFFSET);
+  return info.line;
 }
 
-
-int Script::GetLineNumber(int code_pos) {
-  DisallowHeapAllocation no_allocation;
-  if (!line_ends()->IsUndefined(GetIsolate())) {
-    return GetLineNumberWithArray(code_pos);
-  }
-
-  // Slow mode: we do not have line_ends. We have to iterate through source.
-  if (!source()->IsString()) return -1;
-
-  String* source_string = String::cast(source());
-  int line = 0;
-  int len = source_string->length();
-  for (int pos = 0; pos < len; pos++) {
-    if (pos == code_pos) break;
-    if (source_string->Get(pos) == '\n') line++;
-  }
-  return line;
-}
-
-
 Handle<Object> Script::GetNameOrSourceURL(Handle<Script> script) {
   Isolate* isolate = script->GetIsolate();
 
@@ -13748,7 +13920,7 @@
     Handle<SharedFunctionInfo> shared_info, FunctionLiteral* lit) {
   // When adding fields here, make sure DeclarationScope::AnalyzePartially is
   // updated accordingly.
-  shared_info->set_length(lit->scope()->arity());
+  shared_info->set_length(lit->function_length());
   shared_info->set_internal_formal_parameter_count(lit->parameter_count());
   shared_info->set_function_token_position(lit->function_token_position());
   shared_info->set_start_position(lit->start_position());
@@ -13758,8 +13930,6 @@
   shared_info->set_is_anonymous_expression(lit->is_anonymous_expression());
   shared_info->set_inferred_name(*lit->inferred_name());
   shared_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
-  shared_info->set_allows_lazy_compilation_without_context(
-      lit->AllowsLazyCompilationWithoutContext());
   shared_info->set_language_mode(lit->language_mode());
   shared_info->set_uses_arguments(lit->scope()->arguments() != NULL);
   shared_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
@@ -13818,7 +13988,7 @@
     }
     set_opt_count(0);
     set_deopt_count(0);
-  } else if (code()->is_interpreter_trampoline_builtin()) {
+  } else if (IsInterpreted()) {
     set_profiler_ticks(0);
     if (optimization_disabled() && opt_count() >= FLAG_max_opt_count) {
       // Re-enable optimizations if they were disabled due to opt_count limit.
@@ -13845,11 +14015,6 @@
         return i;
       }
     }
-    Object* shared_code =
-        WeakCell::cast(optimized_code_map->get(kSharedCodeIndex))->value();
-    if (shared_code->IsCode() && osr_ast_id.IsNone()) {
-      return kSharedCodeIndex;
-    }
   }
   return -1;
 }
@@ -13863,8 +14028,6 @@
       optimized_code_map->set(i + kCachedCodeOffset, empty_weak_cell,
                               SKIP_WRITE_BARRIER);
     }
-    optimized_code_map->set(kSharedCodeIndex, empty_weak_cell,
-                            SKIP_WRITE_BARRIER);
   }
 }
 
@@ -13874,24 +14037,14 @@
   int entry = SearchOptimizedCodeMapEntry(native_context, osr_ast_id);
   if (entry != kNotFound) {
     FixedArray* code_map = optimized_code_map();
-    if (entry == kSharedCodeIndex) {
-      // We know the weak cell isn't cleared because we made sure of it in
-      // SearchOptimizedCodeMapEntry and performed no allocations since that
-      // call.
-      result = {
-          Code::cast(WeakCell::cast(code_map->get(kSharedCodeIndex))->value()),
-          nullptr};
-    } else {
-      DCHECK_LE(entry + kEntryLength, code_map->length());
-      WeakCell* cell = WeakCell::cast(code_map->get(entry + kCachedCodeOffset));
-      WeakCell* literals_cell =
-          WeakCell::cast(code_map->get(entry + kLiteralsOffset));
+    DCHECK_LE(entry + kEntryLength, code_map->length());
+    WeakCell* cell = WeakCell::cast(code_map->get(entry + kCachedCodeOffset));
+    WeakCell* literals_cell =
+        WeakCell::cast(code_map->get(entry + kLiteralsOffset));
 
-      result = {cell->cleared() ? nullptr : Code::cast(cell->value()),
-                literals_cell->cleared()
-                    ? nullptr
-                    : LiteralsArray::cast(literals_cell->value())};
-    }
+    result = {cell->cleared() ? nullptr : Code::cast(cell->value()),
+              literals_cell->cleared() ? nullptr : LiteralsArray::cast(
+                                                       literals_cell->value())};
   }
   return result;
 }
@@ -14148,7 +14301,7 @@
   for (SourcePositionTableIterator iterator(source_position_table());
        !iterator.done() && iterator.code_offset() <= offset;
        iterator.Advance()) {
-    position = iterator.source_position();
+    position = iterator.source_position().ScriptOffset();
   }
   return position;
 }
@@ -14161,7 +14314,7 @@
   for (SourcePositionTableIterator it(source_position_table()); !it.done();
        it.Advance()) {
     if (it.is_statement()) {
-      int p = it.source_position();
+      int p = it.source_position().ScriptOffset();
       if (statement_position < p && p <= position) {
         statement_position = p;
       }
@@ -14379,14 +14532,15 @@
 void Code::PrintDeoptLocation(FILE* out, Address pc) {
   Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(this, pc);
   class SourcePosition pos = info.position;
-  if (info.deopt_reason != DeoptimizeReason::kNoReason || !pos.IsUnknown()) {
+  if (info.deopt_reason != DeoptimizeReason::kNoReason || pos.IsKnown()) {
     if (FLAG_hydrogen_track_positions) {
-      PrintF(out, "            ;;; deoptimize at %d_%d: %s\n",
-             pos.inlining_id(), pos.position(),
-             DeoptimizeReasonToString(info.deopt_reason));
+      PrintF(out, "            ;;; deoptimize at %d_%d: %s\n", pos.InliningId(),
+             pos.ScriptOffset(), DeoptimizeReasonToString(info.deopt_reason));
     } else {
-      PrintF(out, "            ;;; deoptimize at %d: %s\n", pos.raw(),
-             DeoptimizeReasonToString(info.deopt_reason));
+      PrintF(out, "            ;;; deoptimize at ");
+      OFStream outstr(out);
+      pos.Print(outstr, this);
+      PrintF(out, ", %s\n", DeoptimizeReasonToString(info.deopt_reason));
     }
   }
 }
@@ -14450,6 +14604,42 @@
   return NULL;
 }
 
+#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
+
+const char* Code::ICState2String(InlineCacheState state) {
+  switch (state) {
+    case UNINITIALIZED:
+      return "UNINITIALIZED";
+    case PREMONOMORPHIC:
+      return "PREMONOMORPHIC";
+    case MONOMORPHIC:
+      return "MONOMORPHIC";
+    case RECOMPUTE_HANDLER:
+      return "RECOMPUTE_HANDLER";
+    case POLYMORPHIC:
+      return "POLYMORPHIC";
+    case MEGAMORPHIC:
+      return "MEGAMORPHIC";
+    case GENERIC:
+      return "GENERIC";
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+void Code::PrintExtraICState(std::ostream& os,  // NOLINT
+                             Kind kind, ExtraICState extra) {
+  os << "extra_ic_state = ";
+  if ((kind == STORE_IC || kind == KEYED_STORE_IC) &&
+      is_strict(static_cast<LanguageMode>(extra))) {
+    os << "STRICT\n";
+  } else {
+    os << extra << "\n";
+  }
+}
+
+#endif  // defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
+
 #ifdef ENABLE_DISASSEMBLER
 
 void DeoptimizationInputData::DeoptimizationInputDataPrint(
@@ -14706,34 +14896,6 @@
 }
 
 
-const char* Code::ICState2String(InlineCacheState state) {
-  switch (state) {
-    case UNINITIALIZED: return "UNINITIALIZED";
-    case PREMONOMORPHIC: return "PREMONOMORPHIC";
-    case MONOMORPHIC: return "MONOMORPHIC";
-    case RECOMPUTE_HANDLER:
-      return "RECOMPUTE_HANDLER";
-    case POLYMORPHIC: return "POLYMORPHIC";
-    case MEGAMORPHIC: return "MEGAMORPHIC";
-    case GENERIC: return "GENERIC";
-  }
-  UNREACHABLE();
-  return NULL;
-}
-
-
-void Code::PrintExtraICState(std::ostream& os,  // NOLINT
-                             Kind kind, ExtraICState extra) {
-  os << "extra_ic_state = ";
-  if ((kind == STORE_IC || kind == KEYED_STORE_IC) &&
-      is_strict(static_cast<LanguageMode>(extra))) {
-    os << "STRICT\n";
-  } else {
-    os << extra << "\n";
-  }
-}
-
-
 void Code::Disassemble(const char* name, std::ostream& os) {  // NOLINT
   os << "kind = " << Kind2String(kind()) << "\n";
   if (IsCodeStubOrIC()) {
@@ -14817,8 +14979,8 @@
     os << "Source positions:\n pc offset  position\n";
     for (; !it.done(); it.Advance()) {
       os << std::setw(10) << it.code_offset() << std::setw(10)
-         << it.source_position() << (it.is_statement() ? "  statement" : "")
-         << "\n";
+         << it.source_position().ScriptOffset()
+         << (it.is_statement() ? "  statement" : "") << "\n";
     }
     os << "\n";
   }
@@ -14920,7 +15082,7 @@
   while (!iterator.done()) {
     if (!source_positions.done() &&
         iterator.current_offset() == source_positions.code_offset()) {
-      os << std::setw(5) << source_positions.source_position();
+      os << std::setw(5) << source_positions.source_position().ScriptOffset();
       os << (source_positions.is_statement() ? " S> " : " E> ");
       source_positions.Advance();
     } else {
@@ -15266,8 +15428,8 @@
       return "prototype-check";
     case kPropertyCellChangedGroup:
       return "property-cell-changed";
-    case kFieldTypeGroup:
-      return "field-type";
+    case kFieldOwnerGroup:
+      return "field-owner";
     case kInitialMapChangedGroup:
       return "initial-map-changed";
     case kAllocationSiteTenuringChangedGroup:
@@ -15426,7 +15588,7 @@
   // Nothing to do if prototype is already set.
   if (map->prototype() == *value) return Just(true);
 
-  bool immutable_proto = object->map()->is_immutable_proto();
+  bool immutable_proto = map->is_immutable_proto();
   if (immutable_proto) {
     RETURN_FAILURE(
         isolate, should_throw,
@@ -15788,6 +15950,14 @@
   return result;
 }
 
+AllocationSiteMode AllocationSite::GetMode(ElementsKind from, ElementsKind to) {
+  if (IsFastSmiElementsKind(from) &&
+      IsMoreGeneralElementsKindTransition(from, to)) {
+    return TRACK_ALLOCATION_SITE;
+  }
+
+  return DONT_TRACK_ALLOCATION_SITE;
+}
 
 const char* AllocationSite::PretenureDecisionName(PretenureDecision decision) {
   switch (decision) {
@@ -15912,12 +16082,13 @@
 
 template <typename BackingStore>
 static int FastHoleyElementsUsage(JSObject* object, BackingStore* store) {
+  Isolate* isolate = store->GetIsolate();
   int limit = object->IsJSArray()
                   ? Smi::cast(JSArray::cast(object)->length())->value()
                   : store->length();
   int used = 0;
   for (int i = 0; i < limit; ++i) {
-    if (!store->is_the_hole(i)) ++used;
+    if (!store->is_the_hole(isolate, i)) ++used;
   }
   return used;
 }
@@ -16540,15 +16711,14 @@
   if (constructor->IsJSFunction() &&
       JSFunction::cast(constructor)->initial_map() == map) {
     // If we still have the original map, set in-object properties directly.
-    regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
-                                  Smi::FromInt(0), SKIP_WRITE_BARRIER);
+    regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex, Smi::kZero,
+                                  SKIP_WRITE_BARRIER);
   } else {
     // Map has changed, so use generic, but slower, method.
-    RETURN_ON_EXCEPTION(
-        isolate,
-        JSReceiver::SetProperty(regexp, factory->last_index_string(),
-                                Handle<Smi>(Smi::FromInt(0), isolate), STRICT),
-        JSRegExp);
+    RETURN_ON_EXCEPTION(isolate, JSReceiver::SetProperty(
+                                     regexp, factory->lastIndex_string(),
+                                     Handle<Smi>(Smi::kZero, isolate), STRICT),
+                        JSRegExp);
   }
 
   return regexp;
@@ -16679,7 +16849,8 @@
     MinimumCapacity capacity_option,
     PretenureFlag pretenure) {
   DCHECK(0 <= at_least_space_for);
-  DCHECK(!capacity_option || base::bits::IsPowerOfTwo32(at_least_space_for));
+  DCHECK_IMPLIES(capacity_option == USE_CUSTOM_MINIMUM_CAPACITY,
+                 base::bits::IsPowerOfTwo32(at_least_space_for));
 
   int capacity = (capacity_option == USE_CUSTOM_MINIMUM_CAPACITY)
                      ? at_least_space_for
@@ -17264,7 +17435,7 @@
     limit = elements_length;
   }
   if (limit == 0) {
-    return handle(Smi::FromInt(0), isolate);
+    return handle(Smi::kZero, isolate);
   }
 
   uint32_t result = 0;
@@ -18244,6 +18415,9 @@
   return Lookup(isolate, key, Smi::cast(hash)->value());
 }
 
+Object* ObjectHashTable::ValueAt(int entry) {
+  return get(EntryToValueIndex(entry));
+}
 
 Object* ObjectHashTable::Lookup(Handle<Object> key, int32_t hash) {
   return Lookup(GetIsolate(), key, hash);
@@ -18687,7 +18861,7 @@
     MoveNext();
     return Smi::cast(kind());
   }
-  return Smi::FromInt(0);
+  return Smi::kZero;
 }
 
 
@@ -19250,7 +19424,8 @@
 
   Script::PositionInfo info;
   const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
-  if (!the_script->GetPositionInfo(start_position(), &info, offset_flag)) {
+  if (!Script::GetPositionInfo(the_script, start_position(), &info,
+                               offset_flag)) {
     return Message::kNoLineNumberInfo;
   }
 
@@ -19264,7 +19439,8 @@
 
   Script::PositionInfo info;
   const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
-  if (!the_script->GetPositionInfo(start_position(), &info, offset_flag)) {
+  if (!Script::GetPositionInfo(the_script, start_position(), &info,
+                               offset_flag)) {
     return -1;
   }
 
@@ -19281,7 +19457,8 @@
 
   Script::PositionInfo info;
   const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
-  if (!the_script->GetPositionInfo(start_position(), &info, offset_flag)) {
+  if (!Script::GetPositionInfo(the_script, start_position(), &info,
+                               offset_flag)) {
     return isolate->factory()->empty_string();
   }
 
@@ -19293,7 +19470,7 @@
   CHECK(is_neuterable());
   CHECK(is_external());
   set_backing_store(NULL);
-  set_byte_length(Smi::FromInt(0));
+  set_byte_length(Smi::kZero);
   set_was_neutered(true);
 }
 
@@ -19304,7 +19481,7 @@
   DCHECK(array_buffer->GetInternalFieldCount() ==
          v8::ArrayBuffer::kInternalFieldCount);
   for (int i = 0; i < v8::ArrayBuffer::kInternalFieldCount; i++) {
-    array_buffer->SetInternalField(i, Smi::FromInt(0));
+    array_buffer->SetInternalField(i, Smi::kZero);
   }
   array_buffer->set_bit_field(0);
   array_buffer->set_is_external(is_external);
@@ -19591,11 +19768,29 @@
   return false;
 }
 
+MaybeHandle<Object> JSModuleNamespace::GetExport(Handle<String> name) {
+  Isolate* isolate = name->GetIsolate();
+
+  Handle<Object> object(module()->exports()->Lookup(name), isolate);
+  if (object->IsTheHole(isolate)) {
+    return isolate->factory()->undefined_value();
+  }
+
+  Handle<Object> value(Handle<Cell>::cast(object)->value(), isolate);
+  if (value->IsTheHole(isolate)) {
+    THROW_NEW_ERROR(
+        isolate, NewReferenceError(MessageTemplate::kNotDefined, name), Object);
+  }
+
+  return value;
+}
+
 namespace {
 
-template <typename T>
-struct HandleValueHash {
-  V8_INLINE size_t operator()(Handle<T> handle) const { return handle->Hash(); }
+struct ModuleHandleHash {
+  V8_INLINE size_t operator()(Handle<Module> module) const {
+    return module->hash();
+  }
 };
 
 struct ModuleHandleEqual {
@@ -19604,6 +19799,12 @@
   }
 };
 
+struct StringHandleHash {
+  V8_INLINE size_t operator()(Handle<String> string) const {
+    return string->Hash();
+  }
+};
+
 struct StringHandleEqual {
   V8_INLINE bool operator()(Handle<String> lhs, Handle<String> rhs) const {
     return lhs->Equals(*rhs);
@@ -19611,32 +19812,57 @@
 };
 
 class UnorderedStringSet
-    : public std::unordered_set<Handle<String>, HandleValueHash<String>,
+    : public std::unordered_set<Handle<String>, StringHandleHash,
                                 StringHandleEqual,
                                 zone_allocator<Handle<String>>> {
  public:
   explicit UnorderedStringSet(Zone* zone)
-      : std::unordered_set<Handle<String>, HandleValueHash<String>,
-                           StringHandleEqual, zone_allocator<Handle<String>>>(
-            2 /* bucket count */, HandleValueHash<String>(),
-            StringHandleEqual(), zone_allocator<Handle<String>>(zone)) {}
+      : std::unordered_set<Handle<String>, StringHandleHash, StringHandleEqual,
+                           zone_allocator<Handle<String>>>(
+            2 /* bucket count */, StringHandleHash(), StringHandleEqual(),
+            zone_allocator<Handle<String>>(zone)) {}
+};
+
+class UnorderedModuleSet
+    : public std::unordered_set<Handle<Module>, ModuleHandleHash,
+                                ModuleHandleEqual,
+                                zone_allocator<Handle<Module>>> {
+ public:
+  explicit UnorderedModuleSet(Zone* zone)
+      : std::unordered_set<Handle<Module>, ModuleHandleHash, ModuleHandleEqual,
+                           zone_allocator<Handle<Module>>>(
+            2 /* bucket count */, ModuleHandleHash(), ModuleHandleEqual(),
+            zone_allocator<Handle<Module>>(zone)) {}
+};
+
+class UnorderedStringMap
+    : public std::unordered_map<
+          Handle<String>, Handle<Object>, StringHandleHash, StringHandleEqual,
+          zone_allocator<std::pair<const Handle<String>, Handle<Object>>>> {
+ public:
+  explicit UnorderedStringMap(Zone* zone)
+      : std::unordered_map<
+            Handle<String>, Handle<Object>, StringHandleHash, StringHandleEqual,
+            zone_allocator<std::pair<const Handle<String>, Handle<Object>>>>(
+            2 /* bucket count */, StringHandleHash(), StringHandleEqual(),
+            zone_allocator<std::pair<const Handle<String>, Handle<Object>>>(
+                zone)) {}
 };
 
 }  // anonymous namespace
 
 class Module::ResolveSet
     : public std::unordered_map<
-          Handle<Module>, UnorderedStringSet*, HandleValueHash<Module>,
+          Handle<Module>, UnorderedStringSet*, ModuleHandleHash,
           ModuleHandleEqual, zone_allocator<std::pair<const Handle<Module>,
                                                       UnorderedStringSet*>>> {
  public:
   explicit ResolveSet(Zone* zone)
       : std::unordered_map<Handle<Module>, UnorderedStringSet*,
-                           HandleValueHash<Module>, ModuleHandleEqual,
+                           ModuleHandleHash, ModuleHandleEqual,
                            zone_allocator<std::pair<const Handle<Module>,
                                                     UnorderedStringSet*>>>(
-            2 /* bucket count */, HandleValueHash<Module>(),
-            ModuleHandleEqual(),
+            2 /* bucket count */, ModuleHandleHash(), ModuleHandleEqual(),
             zone_allocator<
                 std::pair<const Handle<Module>, UnorderedStringSet*>>(zone)),
         zone_(zone) {}
@@ -19647,6 +19873,22 @@
   Zone* zone_;
 };
 
+namespace {
+
+int ExportIndex(int cell_index) {
+  DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
+            ModuleDescriptor::kExport);
+  return cell_index - 1;
+}
+
+int ImportIndex(int cell_index) {
+  DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
+            ModuleDescriptor::kImport);
+  return -cell_index - 1;
+}
+
+}  // anonymous namespace
+
 void Module::CreateIndirectExport(Handle<Module> module, Handle<String> name,
                                   Handle<ModuleInfoEntry> entry) {
   Isolate* isolate = module->GetIsolate();
@@ -19656,11 +19898,15 @@
   module->set_exports(*exports);
 }
 
-void Module::CreateExport(Handle<Module> module, Handle<FixedArray> names) {
+void Module::CreateExport(Handle<Module> module, int cell_index,
+                          Handle<FixedArray> names) {
   DCHECK_LT(0, names->length());
   Isolate* isolate = module->GetIsolate();
+
   Handle<Cell> cell =
       isolate->factory()->NewCell(isolate->factory()->undefined_value());
+  module->regular_exports()->set(ExportIndex(cell_index), *cell);
+
   Handle<ObjectHashTable> exports(module->exports(), isolate);
   for (int i = 0, n = names->length(); i < n; ++i) {
     Handle<String> name(String::cast(names->get(i)), isolate);
@@ -19670,44 +19916,49 @@
   module->set_exports(*exports);
 }
 
-void Module::StoreExport(Handle<Module> module, Handle<String> name,
-                         Handle<Object> value) {
-  Handle<Cell> cell(Cell::cast(module->exports()->Lookup(name)));
-  cell->set_value(*value);
-}
-
-Handle<Object> Module::LoadExport(Handle<Module> module, Handle<String> name) {
+Handle<Object> Module::LoadVariable(Handle<Module> module, int cell_index) {
   Isolate* isolate = module->GetIsolate();
-  Handle<Object> object(module->exports()->Lookup(name), isolate);
-
-  // TODO(neis): Namespace imports are not yet implemented.  Trying to use this
-  // feature may crash here.
-  if (!object->IsCell()) UNIMPLEMENTED();
-
+  Handle<Object> object;
+  switch (ModuleDescriptor::GetCellIndexKind(cell_index)) {
+    case ModuleDescriptor::kImport:
+      object = handle(module->regular_imports()->get(ImportIndex(cell_index)),
+                      isolate);
+      break;
+    case ModuleDescriptor::kExport:
+      object = handle(module->regular_exports()->get(ExportIndex(cell_index)),
+                      isolate);
+      break;
+    case ModuleDescriptor::kInvalid:
+      UNREACHABLE();
+      break;
+  }
   return handle(Handle<Cell>::cast(object)->value(), isolate);
 }
 
-Handle<Object> Module::LoadImport(Handle<Module> module, Handle<String> name,
-                                  int module_request) {
+void Module::StoreVariable(Handle<Module> module, int cell_index,
+                           Handle<Object> value) {
   Isolate* isolate = module->GetIsolate();
-  Handle<Module> requested_module(
-      Module::cast(module->requested_modules()->get(module_request)), isolate);
-  return Module::LoadExport(requested_module, name);
+  DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
+            ModuleDescriptor::kExport);
+  Handle<Object> object(module->regular_exports()->get(ExportIndex(cell_index)),
+                        isolate);
+  Handle<Cell>::cast(object)->set_value(*value);
 }
 
 MaybeHandle<Cell> Module::ResolveImport(Handle<Module> module,
                                         Handle<String> name, int module_request,
-                                        bool must_resolve,
+                                        MessageLocation loc, bool must_resolve,
                                         Module::ResolveSet* resolve_set) {
   Isolate* isolate = module->GetIsolate();
   Handle<Module> requested_module(
       Module::cast(module->requested_modules()->get(module_request)), isolate);
-  return Module::ResolveExport(requested_module, name, must_resolve,
+  return Module::ResolveExport(requested_module, name, loc, must_resolve,
                                resolve_set);
 }
 
 MaybeHandle<Cell> Module::ResolveExport(Handle<Module> module,
-                                        Handle<String> name, bool must_resolve,
+                                        Handle<String> name,
+                                        MessageLocation loc, bool must_resolve,
                                         Module::ResolveSet* resolve_set) {
   Isolate* isolate = module->GetIsolate();
   Handle<Object> object(module->exports()->Lookup(name), isolate);
@@ -19729,10 +19980,10 @@
     } else if (name_set->count(name)) {
       // Cycle detected.
       if (must_resolve) {
-        THROW_NEW_ERROR(
-            isolate,
-            NewSyntaxError(MessageTemplate::kCyclicModuleDependency, name),
-            Cell);
+        return isolate->Throw<Cell>(
+            isolate->factory()->NewSyntaxError(
+                MessageTemplate::kCyclicModuleDependency, name),
+            &loc);
       }
       return MaybeHandle<Cell>();
     }
@@ -19742,11 +19993,15 @@
   if (object->IsModuleInfoEntry()) {
     // Not yet resolved indirect export.
     Handle<ModuleInfoEntry> entry = Handle<ModuleInfoEntry>::cast(object);
-    int module_request = Smi::cast(entry->module_request())->value();
     Handle<String> import_name(String::cast(entry->import_name()), isolate);
+    Handle<Script> script(
+        Script::cast(JSFunction::cast(module->code())->shared()->script()),
+        isolate);
+    MessageLocation new_loc(script, entry->beg_pos(), entry->end_pos());
 
     Handle<Cell> cell;
-    if (!ResolveImport(module, import_name, module_request, true, resolve_set)
+    if (!ResolveImport(module, import_name, entry->module_request(), new_loc,
+                       true, resolve_set)
              .ToHandle(&cell)) {
       DCHECK(isolate->has_pending_exception());
       return MaybeHandle<Cell>();
@@ -19763,13 +20018,13 @@
   }
 
   DCHECK(object->IsTheHole(isolate));
-  return Module::ResolveExportUsingStarExports(module, name, must_resolve,
+  return Module::ResolveExportUsingStarExports(module, name, loc, must_resolve,
                                                resolve_set);
 }
 
 MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
-    Handle<Module> module, Handle<String> name, bool must_resolve,
-    Module::ResolveSet* resolve_set) {
+    Handle<Module> module, Handle<String> name, MessageLocation loc,
+    bool must_resolve, Module::ResolveSet* resolve_set) {
   Isolate* isolate = module->GetIsolate();
   if (!name->Equals(isolate->heap()->default_string())) {
     // Go through all star exports looking for the given name.  If multiple star
@@ -19783,16 +20038,22 @@
       if (!entry->export_name()->IsUndefined(isolate)) {
         continue;  // Indirect export.
       }
-      int module_request = Smi::cast(entry->module_request())->value();
+
+      Handle<Script> script(
+          Script::cast(JSFunction::cast(module->code())->shared()->script()),
+          isolate);
+      MessageLocation new_loc(script, entry->beg_pos(), entry->end_pos());
 
       Handle<Cell> cell;
-      if (ResolveImport(module, name, module_request, false, resolve_set)
+      if (ResolveImport(module, name, entry->module_request(), new_loc, false,
+                        resolve_set)
               .ToHandle(&cell)) {
         if (unique_cell.is_null()) unique_cell = cell;
         if (*unique_cell != *cell) {
-          THROW_NEW_ERROR(
-              isolate, NewSyntaxError(MessageTemplate::kAmbiguousExport, name),
-              Cell);
+          return isolate->Throw<Cell>(
+              isolate->factory()->NewSyntaxError(
+                  MessageTemplate::kAmbiguousExport, name),
+              &loc);
         }
       } else if (isolate->has_pending_exception()) {
         return MaybeHandle<Cell>();
@@ -19811,18 +20072,16 @@
 
   // Unresolvable.
   if (must_resolve) {
-    THROW_NEW_ERROR(isolate,
-                    NewSyntaxError(MessageTemplate::kUnresolvableExport, name),
-                    Cell);
+    return isolate->Throw<Cell>(isolate->factory()->NewSyntaxError(
+                                    MessageTemplate::kUnresolvableExport, name),
+                                &loc);
   }
   return MaybeHandle<Cell>();
 }
 
 bool Module::Instantiate(Handle<Module> module, v8::Local<v8::Context> context,
-                         v8::Module::ResolveCallback callback,
-                         v8::Local<v8::Value> callback_data) {
-  // Already instantiated.
-  if (module->code()->IsJSFunction()) return true;
+                         v8::Module::ResolveCallback callback) {
+  if (module->instantiated()) return true;
 
   Isolate* isolate = module->GetIsolate();
   Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(module->code()),
@@ -19832,16 +20091,18 @@
           shared,
           handle(Utils::OpenHandle(*context)->native_context(), isolate));
   module->set_code(*function);
+  DCHECK(module->instantiated());
 
   Handle<ModuleInfo> module_info(shared->scope_info()->ModuleDescriptorInfo(),
                                  isolate);
 
   // Set up local exports.
-  Handle<FixedArray> regular_exports(module_info->regular_exports(), isolate);
-  for (int i = 0, n = regular_exports->length(); i < n; i += 2) {
-    Handle<FixedArray> export_names(
-        FixedArray::cast(regular_exports->get(i + 1)), isolate);
-    CreateExport(module, export_names);
+  // TODO(neis): Create regular_exports array here instead of in factory method?
+  for (int i = 0, n = module_info->RegularExportCount(); i < n; ++i) {
+    int cell_index = module_info->RegularExportCellIndex(i);
+    Handle<FixedArray> export_names(module_info->RegularExportExportNames(i),
+                                    isolate);
+    CreateExport(module, cell_index, export_names);
   }
 
   // Partially set up indirect exports.
@@ -19866,7 +20127,7 @@
     // persist a module_map across multiple top-level module loads, as
     // the current module is left in a "half-instantiated" state.
     if (!callback(context, v8::Utils::ToLocal(specifier),
-                  v8::Utils::ToLocal(module), callback_data)
+                  v8::Utils::ToLocal(module))
              .ToLocal(&api_requested_module)) {
       // TODO(adamk): Give this a better error message. But this is a
       // misuse of the API anyway.
@@ -19875,12 +20136,12 @@
     }
     Handle<Module> requested_module = Utils::OpenHandle(*api_requested_module);
     module->requested_modules()->set(i, *requested_module);
-    if (!Instantiate(requested_module, context, callback, callback_data)) {
+    if (!Instantiate(requested_module, context, callback)) {
       return false;
     }
   }
 
-  Zone zone(isolate->allocator());
+  Zone zone(isolate->allocator(), ZONE_NAME);
 
   // Resolve imports.
   Handle<FixedArray> regular_imports(module_info->regular_imports(), isolate);
@@ -19888,12 +20149,18 @@
     Handle<ModuleInfoEntry> entry(
         ModuleInfoEntry::cast(regular_imports->get(i)), isolate);
     Handle<String> name(String::cast(entry->import_name()), isolate);
-    int module_request = Smi::cast(entry->module_request())->value();
+    Handle<Script> script(
+        Script::cast(JSFunction::cast(module->code())->shared()->script()),
+        isolate);
+    MessageLocation loc(script, entry->beg_pos(), entry->end_pos());
     ResolveSet resolve_set(&zone);
-    if (ResolveImport(module, name, module_request, true, &resolve_set)
-            .is_null()) {
+    Handle<Cell> cell;
+    if (!ResolveImport(module, name, entry->module_request(), loc, true,
+                       &resolve_set)
+             .ToHandle(&cell)) {
       return false;
     }
+    module->regular_imports()->set(ImportIndex(entry->cell_index()), *cell);
   }
 
   // Resolve indirect exports.
@@ -19902,8 +20169,13 @@
         ModuleInfoEntry::cast(special_exports->get(i)), isolate);
     Handle<Object> name(entry->export_name(), isolate);
     if (name->IsUndefined(isolate)) continue;  // Star export.
+    Handle<Script> script(
+        Script::cast(JSFunction::cast(module->code())->shared()->script()),
+        isolate);
+    MessageLocation loc(script, entry->beg_pos(), entry->end_pos());
     ResolveSet resolve_set(&zone);
-    if (ResolveExport(module, Handle<String>::cast(name), true, &resolve_set)
+    if (ResolveExport(module, Handle<String>::cast(name), loc, true,
+                      &resolve_set)
             .is_null()) {
       return false;
     }
@@ -19913,16 +20185,15 @@
 }
 
 MaybeHandle<Object> Module::Evaluate(Handle<Module> module) {
-  DCHECK(module->code()->IsJSFunction());  // Instantiated.
-
-  Isolate* isolate = module->GetIsolate();
+  DCHECK(module->instantiated());
 
   // Each module can only be evaluated once.
+  Isolate* isolate = module->GetIsolate();
   if (module->evaluated()) return isolate->factory()->undefined_value();
-  module->set_evaluated(true);
+  Handle<JSFunction> function(JSFunction::cast(module->code()), isolate);
+  module->set_evaluated();
 
   // Initialization.
-  Handle<JSFunction> function(JSFunction::cast(module->code()), isolate);
   DCHECK_EQ(MODULE_SCOPE, function->shared()->scope_info()->scope_type());
   Handle<Object> receiver = isolate->factory()->undefined_value();
   Handle<Object> argv[] = {module};
@@ -19945,5 +20216,192 @@
   return Execution::Call(isolate, resume, generator, 0, nullptr);
 }
 
+namespace {
+
+void FetchStarExports(Handle<Module> module, Zone* zone,
+                      UnorderedModuleSet* visited) {
+  DCHECK(module->instantiated());
+
+  bool cycle = !visited->insert(module).second;
+  if (cycle) return;
+
+  Isolate* isolate = module->GetIsolate();
+  Handle<ObjectHashTable> exports(module->exports(), isolate);
+  UnorderedStringMap more_exports(zone);
+
+  // TODO(neis): Only allocate more_exports if there are star exports.
+  // Maybe split special_exports into indirect_exports and star_exports.
+
+  Handle<FixedArray> special_exports(module->info()->special_exports(),
+                                     isolate);
+  for (int i = 0, n = special_exports->length(); i < n; ++i) {
+    Handle<ModuleInfoEntry> entry(
+        ModuleInfoEntry::cast(special_exports->get(i)), isolate);
+    if (!entry->export_name()->IsUndefined(isolate)) {
+      continue;  // Indirect export.
+    }
+
+    Handle<Module> requested_module(
+        Module::cast(module->requested_modules()->get(entry->module_request())),
+        isolate);
+
+    // Recurse.
+    FetchStarExports(requested_module, zone, visited);
+
+    // Collect all of [requested_module]'s exports that must be added to
+    // [module]'s exports (i.e. to [exports]).  We record these in
+    // [more_exports].  Ambiguities (conflicting exports) are marked by mapping
+    // the name to undefined instead of a Cell.
+    Handle<ObjectHashTable> requested_exports(requested_module->exports(),
+                                              isolate);
+    for (int i = 0, n = requested_exports->Capacity(); i < n; ++i) {
+      Handle<Object> key(requested_exports->KeyAt(i), isolate);
+      if (!requested_exports->IsKey(isolate, *key)) continue;
+      Handle<String> name = Handle<String>::cast(key);
+
+      if (name->Equals(isolate->heap()->default_string())) continue;
+      if (!exports->Lookup(name)->IsTheHole(isolate)) continue;
+
+      Handle<Cell> cell(Cell::cast(requested_exports->ValueAt(i)), isolate);
+      auto insert_result = more_exports.insert(std::make_pair(name, cell));
+      if (!insert_result.second) {
+        auto it = insert_result.first;
+        if (*it->second == *cell || it->second->IsUndefined(isolate)) {
+          // We already recorded this mapping before, or the name is already
+          // known to be ambiguous.  In either case, there's nothing to do.
+        } else {
+          DCHECK(it->second->IsCell());
+          // Different star exports provide different cells for this name, hence
+          // mark the name as ambiguous.
+          it->second = isolate->factory()->undefined_value();
+        }
+      }
+    }
+  }
+
+  // Copy [more_exports] into [exports].
+  for (const auto& elem : more_exports) {
+    if (elem.second->IsUndefined(isolate)) continue;  // Ambiguous export.
+    DCHECK(!elem.first->Equals(isolate->heap()->default_string()));
+    DCHECK(elem.second->IsCell());
+    exports = ObjectHashTable::Put(exports, elem.first, elem.second);
+  }
+  module->set_exports(*exports);
+}
+
+}  // anonymous namespace
+
+Handle<JSModuleNamespace> Module::GetModuleNamespace(Handle<Module> module,
+                                                     int module_request) {
+  Isolate* isolate = module->GetIsolate();
+  Handle<Module> requested_module(
+      Module::cast(module->requested_modules()->get(module_request)), isolate);
+  return Module::GetModuleNamespace(requested_module);
+}
+
+Handle<JSModuleNamespace> Module::GetModuleNamespace(Handle<Module> module) {
+  Isolate* isolate = module->GetIsolate();
+
+  Handle<HeapObject> object(module->module_namespace(), isolate);
+  if (!object->IsUndefined(isolate)) {
+    // Namespace object already exists.
+    return Handle<JSModuleNamespace>::cast(object);
+  }
+
+  // Create the namespace object (initially empty).
+  Handle<JSModuleNamespace> ns = isolate->factory()->NewJSModuleNamespace();
+  ns->set_module(*module);
+  module->set_module_namespace(*ns);
+
+  // Collect the export names.
+  Zone zone(isolate->allocator(), ZONE_NAME);
+  UnorderedModuleSet visited(&zone);
+  FetchStarExports(module, &zone, &visited);
+  Handle<ObjectHashTable> exports(module->exports(), isolate);
+  ZoneVector<Handle<String>> names(&zone);
+  names.reserve(exports->NumberOfElements());
+  for (int i = 0, n = exports->Capacity(); i < n; ++i) {
+    Handle<Object> key(exports->KeyAt(i), isolate);
+    if (!exports->IsKey(isolate, *key)) continue;
+    DCHECK(exports->ValueAt(i)->IsCell());
+    names.push_back(Handle<String>::cast(key));
+  }
+  DCHECK_EQ(static_cast<int>(names.size()), exports->NumberOfElements());
+
+  // Sort them alphabetically.
+  struct {
+    bool operator()(Handle<String> a, Handle<String> b) {
+      return String::Compare(a, b) == ComparisonResult::kLessThan;
+    }
+  } StringLess;
+  std::sort(names.begin(), names.end(), StringLess);
+
+  // Create the corresponding properties in the namespace object.
+  PropertyAttributes attr = DONT_DELETE;
+  for (const auto& name : names) {
+    JSObject::SetAccessor(
+        ns, Accessors::ModuleNamespaceEntryInfo(isolate, name, attr))
+        .Check();
+  }
+  JSObject::PreventExtensions(ns, THROW_ON_ERROR).ToChecked();
+
+  return ns;
+}
+
+MaybeHandle<Name> FunctionTemplateInfo::TryGetCachedPropertyName(
+    Isolate* isolate, Handle<Object> getter) {
+  if (getter->IsFunctionTemplateInfo()) {
+    Handle<FunctionTemplateInfo> fti =
+        Handle<FunctionTemplateInfo>::cast(getter);
+    // Check if the accessor uses a cached property.
+    if (!fti->cached_property_name()->IsTheHole(isolate)) {
+      return handle(Name::cast(fti->cached_property_name()));
+    }
+  }
+  return MaybeHandle<Name>();
+}
+
+// static
+ElementsKind JSArrayIterator::ElementsKindForInstanceType(InstanceType type) {
+  DCHECK_GE(type, FIRST_ARRAY_ITERATOR_TYPE);
+  DCHECK_LE(type, LAST_ARRAY_ITERATOR_TYPE);
+
+  if (type <= LAST_ARRAY_KEY_ITERATOR_TYPE) {
+    // Should be ignored for key iterators.
+    return FAST_ELEMENTS;
+  } else {
+    ElementsKind kind;
+    if (type < FIRST_ARRAY_VALUE_ITERATOR_TYPE) {
+      // Convert `type` to a value iterator from an entries iterator
+      type = static_cast<InstanceType>(type +
+                                       (FIRST_ARRAY_VALUE_ITERATOR_TYPE -
+                                        FIRST_ARRAY_KEY_VALUE_ITERATOR_TYPE));
+      DCHECK_GE(type, FIRST_ARRAY_VALUE_ITERATOR_TYPE);
+      DCHECK_LE(type, LAST_ARRAY_ITERATOR_TYPE);
+    }
+
+    if (type <= JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE) {
+      kind =
+          static_cast<ElementsKind>(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
+                                    (type - FIRST_ARRAY_VALUE_ITERATOR_TYPE));
+      DCHECK_LE(kind, LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+    } else if (type < JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE) {
+      kind = static_cast<ElementsKind>(
+          FIRST_FAST_ELEMENTS_KIND +
+          (type - JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE));
+      DCHECK_LE(kind, LAST_FAST_ELEMENTS_KIND);
+    } else {
+      // For any slow element cases, the actual elements kind is not known.
+      // Simply
+      // return a slow elements kind in this case. Users of this function must
+      // not
+      // depend on this.
+      return DICTIONARY_ELEMENTS;
+    }
+    DCHECK_LE(kind, LAST_ELEMENTS_KIND);
+    return kind;
+  }
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/objects.h b/src/objects.h
index fcc1f94..747a4f0 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -71,6 +71,8 @@
 //         - JSValue
 //           - JSDate
 //         - JSMessageObject
+//         - JSModuleNamespace
+//         - JSFixedArrayIterator
 //       - JSProxy
 //     - FixedArrayBase
 //       - ByteArray
@@ -95,7 +97,6 @@
 //         - TemplateList
 //         - TransitionArray
 //         - ScopeInfo
-//         - ModuleInfoEntry
 //         - ModuleInfo
 //         - ScriptContextTable
 //         - WeakFixedArray
@@ -142,6 +143,8 @@
 //     - Struct
 //       - Box
 //       - AccessorInfo
+//       - PromiseResolveThenableJobInfo
+//       - PromiseReactionJobInfo
 //       - AccessorPair
 //       - AccessCheckInfo
 //       - InterceptorInfo
@@ -155,6 +158,7 @@
 //       - CodeCache
 //       - PrototypeInfo
 //       - Module
+//       - ModuleInfoEntry
 //     - WeakCell
 //
 // Formats of Object*:
@@ -164,6 +168,8 @@
 namespace v8 {
 namespace internal {
 
+struct InliningPosition;
+
 enum KeyedAccessStoreMode {
   STANDARD_STORE,
   STORE_TRANSITION_TO_OBJECT,
@@ -397,10 +403,13 @@
   V(TYPE_FEEDBACK_INFO_TYPE)                                    \
   V(ALIASED_ARGUMENTS_ENTRY_TYPE)                               \
   V(BOX_TYPE)                                                   \
-  V(PROMISE_CONTAINER_TYPE)                                     \
+  V(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE)                     \
+  V(PROMISE_REACTION_JOB_INFO_TYPE)                             \
   V(PROTOTYPE_INFO_TYPE)                                        \
+  V(TUPLE3_TYPE)                                                \
   V(CONTEXT_EXTENSION_TYPE)                                     \
   V(MODULE_TYPE)                                                \
+  V(MODULE_INFO_ENTRY_TYPE)                                     \
                                                                 \
   V(FIXED_ARRAY_TYPE)                                           \
   V(FIXED_DOUBLE_ARRAY_TYPE)                                    \
@@ -416,6 +425,8 @@
   V(JS_ARGUMENTS_TYPE)                                          \
   V(JS_CONTEXT_EXTENSION_OBJECT_TYPE)                           \
   V(JS_GENERATOR_OBJECT_TYPE)                                   \
+  V(JS_MODULE_NAMESPACE_TYPE)                                   \
+  V(JS_FIXED_ARRAY_ITERATOR_TYPE)                               \
   V(JS_GLOBAL_OBJECT_TYPE)                                      \
   V(JS_GLOBAL_PROXY_TYPE)                                       \
   V(JS_API_OBJECT_TYPE)                                         \
@@ -436,6 +447,46 @@
   V(JS_ERROR_TYPE)                                              \
   V(JS_STRING_ITERATOR_TYPE)                                    \
                                                                 \
+  V(JS_TYPED_ARRAY_KEY_ITERATOR_TYPE)                           \
+  V(JS_FAST_ARRAY_KEY_ITERATOR_TYPE)                            \
+  V(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)                         \
+                                                                \
+  V(JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE)                      \
+  V(JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE)                     \
+  V(JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE)                     \
+  V(JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE)                    \
+  V(JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE)                     \
+  V(JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE)                    \
+  V(JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE)                   \
+  V(JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE)                   \
+  V(JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE)             \
+                                                                \
+  V(JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE)                  \
+  V(JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE)            \
+  V(JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE)                      \
+  V(JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE)                \
+  V(JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE)               \
+  V(JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE)         \
+  V(JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE)                   \
+                                                                \
+  V(JS_INT8_ARRAY_VALUE_ITERATOR_TYPE)                          \
+  V(JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE)                         \
+  V(JS_INT16_ARRAY_VALUE_ITERATOR_TYPE)                         \
+  V(JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE)                        \
+  V(JS_INT32_ARRAY_VALUE_ITERATOR_TYPE)                         \
+  V(JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE)                        \
+  V(JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE)                       \
+  V(JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE)                       \
+  V(JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE)                 \
+                                                                \
+  V(JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE)                      \
+  V(JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE)                \
+  V(JS_FAST_ARRAY_VALUE_ITERATOR_TYPE)                          \
+  V(JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE)                    \
+  V(JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE)                   \
+  V(JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE)             \
+  V(JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE)                       \
+                                                                \
   V(JS_BOUND_FUNCTION_TYPE)                                     \
   V(JS_FUNCTION_TYPE)                                           \
   V(DEBUG_INFO_TYPE)                                            \
@@ -503,7 +554,10 @@
 // manually.
 #define STRUCT_LIST(V)                                                       \
   V(BOX, Box, box)                                                           \
-  V(PROMISE_CONTAINER, PromiseContainer, promise_container)                  \
+  V(PROMISE_RESOLVE_THENABLE_JOB_INFO, PromiseResolveThenableJobInfo,        \
+    promise_resolve_thenable_job_info)                                       \
+  V(PROMISE_REACTION_JOB_INFO, PromiseReactionJobInfo,                       \
+    promise_reaction_job_info)                                               \
   V(ACCESSOR_INFO, AccessorInfo, accessor_info)                              \
   V(ACCESSOR_PAIR, AccessorPair, accessor_pair)                              \
   V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info)                   \
@@ -519,7 +573,9 @@
   V(DEBUG_INFO, DebugInfo, debug_info)                                       \
   V(BREAK_POINT_INFO, BreakPointInfo, break_point_info)                      \
   V(PROTOTYPE_INFO, PrototypeInfo, prototype_info)                           \
+  V(TUPLE3, Tuple3, tuple3)                                                  \
   V(MODULE, Module, module)                                                  \
+  V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry)                   \
   V(CONTEXT_EXTENSION, ContextExtension, context_extension)
 
 // We use the full 8 bits of the instance_type field to encode heap object
@@ -685,7 +741,8 @@
   TYPE_FEEDBACK_INFO_TYPE,
   ALIASED_ARGUMENTS_ENTRY_TYPE,
   BOX_TYPE,
-  PROMISE_CONTAINER_TYPE,
+  PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE,
+  PROMISE_REACTION_JOB_INFO_TYPE,
   DEBUG_INFO_TYPE,
   BREAK_POINT_INFO_TYPE,
   FIXED_ARRAY_TYPE,
@@ -695,8 +752,10 @@
   TRANSITION_ARRAY_TYPE,
   PROPERTY_CELL_TYPE,
   PROTOTYPE_INFO_TYPE,
+  TUPLE3_TYPE,
   CONTEXT_EXTENSION_TYPE,
   MODULE_TYPE,
+  MODULE_INFO_ENTRY_TYPE,
 
   // All the following types are subtypes of JSReceiver, which corresponds to
   // objects in the JS sense. The first and the last type in this range are
@@ -717,6 +776,8 @@
   JS_ARGUMENTS_TYPE,
   JS_CONTEXT_EXTENSION_OBJECT_TYPE,
   JS_GENERATOR_OBJECT_TYPE,
+  JS_MODULE_NAMESPACE_TYPE,
+  JS_FIXED_ARRAY_ITERATOR_TYPE,
   JS_ARRAY_TYPE,
   JS_ARRAY_BUFFER_TYPE,
   JS_TYPED_ARRAY_TYPE,
@@ -731,6 +792,47 @@
   JS_REGEXP_TYPE,
   JS_ERROR_TYPE,
   JS_STRING_ITERATOR_TYPE,
+
+  JS_TYPED_ARRAY_KEY_ITERATOR_TYPE,
+  JS_FAST_ARRAY_KEY_ITERATOR_TYPE,
+  JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE,
+
+  JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+  JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+  JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+  JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+  JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+  JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+  JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+  JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+  JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+
+  JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+  JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+  JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+  JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+  JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+  JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+  JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+
+  JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE,
+  JS_INT8_ARRAY_VALUE_ITERATOR_TYPE,
+  JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE,
+  JS_INT16_ARRAY_VALUE_ITERATOR_TYPE,
+  JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE,
+  JS_INT32_ARRAY_VALUE_ITERATOR_TYPE,
+  JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE,
+  JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE,
+  JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE,
+
+  JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE,
+  JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE,
+  JS_FAST_ARRAY_VALUE_ITERATOR_TYPE,
+  JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE,
+  JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
+  JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
+  JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE,
+
   JS_BOUND_FUNCTION_TYPE,
   JS_FUNCTION_TYPE,  // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
 
@@ -767,6 +869,18 @@
   // an empty fixed array as elements backing store. This is true for string
   // wrappers.
   LAST_CUSTOM_ELEMENTS_RECEIVER = JS_VALUE_TYPE,
+
+  FIRST_ARRAY_KEY_ITERATOR_TYPE = JS_TYPED_ARRAY_KEY_ITERATOR_TYPE,
+  LAST_ARRAY_KEY_ITERATOR_TYPE = JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE,
+
+  FIRST_ARRAY_KEY_VALUE_ITERATOR_TYPE = JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+  LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE = JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+
+  FIRST_ARRAY_VALUE_ITERATOR_TYPE = JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE,
+  LAST_ARRAY_VALUE_ITERATOR_TYPE = JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE,
+
+  FIRST_ARRAY_ITERATOR_TYPE = FIRST_ARRAY_KEY_ITERATOR_TYPE,
+  LAST_ARRAY_ITERATOR_TYPE = LAST_ARRAY_VALUE_ITERATOR_TYPE,
 };
 
 STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
@@ -775,8 +889,8 @@
 STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType);
 STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType);
 
-
-std::ostream& operator<<(std::ostream& os, InstanceType instance_type);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+                                           InstanceType instance_type);
 
 #define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V)    \
   V(BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE)       \
@@ -886,6 +1000,7 @@
 class LiteralsArray;
 class LookupIterator;
 class FieldType;
+class Module;
 class ModuleDescriptor;
 class ModuleInfoEntry;
 class ModuleInfo;
@@ -970,6 +1085,8 @@
   V(JSObject)                    \
   V(JSContextExtensionObject)    \
   V(JSGeneratorObject)           \
+  V(JSModuleNamespace)           \
+  V(JSFixedArrayIterator)        \
   V(Map)                         \
   V(DescriptorArray)             \
   V(FrameArray)                  \
@@ -985,11 +1102,11 @@
   V(FixedDoubleArray)            \
   V(WeakFixedArray)              \
   V(ArrayList)                   \
+  V(RegExpMatchInfo)             \
   V(Context)                     \
   V(ScriptContextTable)          \
   V(NativeContext)               \
   V(ScopeInfo)                   \
-  V(ModuleInfoEntry)             \
   V(ModuleInfo)                  \
   V(JSBoundFunction)             \
   V(JSFunction)                  \
@@ -1008,6 +1125,7 @@
   V(JSArrayBufferView)           \
   V(JSCollection)                \
   V(JSTypedArray)                \
+  V(JSArrayIterator)             \
   V(JSDataView)                  \
   V(JSProxy)                     \
   V(JSError)                     \
@@ -1383,7 +1501,7 @@
   // Checks whether this object has the same value as the given one.  This
   // function is implemented according to ES5, section 9.12 and can be used
   // to implement the Harmony "egal" function.
-  bool SameValue(Object* other);
+  V8_EXPORT_PRIVATE bool SameValue(Object* other);
 
   // Checks whether this object has the same value as the given one.
   // +0 and -0 are treated equal. Everything else is the same as SameValue.
@@ -1445,7 +1563,7 @@
   friend class StringStream;
 
   // Return the map of the root of object's prototype chain.
-  Map* GetRootMap(Isolate* isolate);
+  Map* GetPrototypeChainRootMap(Isolate* isolate);
 
   // Helper for SetProperty and SetSuperProperty.
   // Return value is only meaningful if [found] is set to true on return.
@@ -1470,9 +1588,7 @@
   const Object* value;
 };
 
-
-std::ostream& operator<<(std::ostream& os, const Brief& v);
-
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const Brief& v);
 
 // Smi represents integer Numbers that can be stored in 31 bits.
 // Smis are immediate which means they are NOT allocated in the heap.
@@ -1507,9 +1623,10 @@
   DECLARE_CAST(Smi)
 
   // Dispatched behavior.
-  void SmiPrint(std::ostream& os) const;  // NOLINT
+  V8_EXPORT_PRIVATE void SmiPrint(std::ostream& os) const;  // NOLINT
   DECLARE_VERIFIER(Smi)
 
+  V8_EXPORT_PRIVATE static Smi* const kZero;
   static const int kMinValue =
       (static_cast<unsigned int>(-1)) << (kSmiValueSize - 1);
   static const int kMaxValue = -(kMinValue + 1);
@@ -1735,7 +1852,7 @@
   // Dispatched behavior.
   bool HeapNumberBooleanValue();
 
-  void HeapNumberPrint(std::ostream& os);  // NOLINT
+  V8_EXPORT_PRIVATE void HeapNumberPrint(std::ostream& os);  // NOLINT
   DECLARE_VERIFIER(HeapNumber)
 
   inline int get_exponent();
@@ -2236,6 +2353,11 @@
   static bool UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate);
   static void InvalidatePrototypeChains(Map* map);
 
+  // Updates prototype chain tracking information when an object changes its
+  // map from |old_map| to |new_map|.
+  static void NotifyMapChange(Handle<Map> old_map, Handle<Map> new_map,
+                              Isolate* isolate);
+
   // Utility used by many Array builtins and runtime functions
   static inline bool PrototypeHasNoElements(Isolate* isolate, JSObject* object);
 
@@ -2724,7 +2846,7 @@
 
   // Setter that uses write barrier.
   inline void set(int index, Object* value);
-  inline bool is_the_hole(int index);
+  inline bool is_the_hole(Isolate* isolate, int index);
 
   // Setter that doesn't need write barrier.
   inline void set(int index, Smi* value);
@@ -2814,6 +2936,7 @@
   inline void set_the_hole(int index);
 
   // Checking for the hole.
+  inline bool is_the_hole(Isolate* isolate, int index);
   inline bool is_the_hole(int index);
 
   // Garbage collection support.
@@ -2872,7 +2995,7 @@
   inline int Length() const;
 
   inline bool IsEmptySlot(int index) const;
-  static Object* Empty() { return Smi::FromInt(0); }
+  static Object* Empty() { return Smi::kZero; }
 
   class Iterator {
    public:
@@ -2944,8 +3067,59 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(ArrayList);
 };
 
+// The property RegExpMatchInfo includes the matchIndices
+// array of the last successful regexp match (an array of start/end index
+// pairs for the match and all the captured substrings), the invariant is
+// that there are at least two capture indices.  The array also contains
+// the subject string for the last successful match.
+// After creation the result must be treated as a FixedArray in all regards.
+class V8_EXPORT_PRIVATE RegExpMatchInfo : NON_EXPORTED_BASE(public FixedArray) {
+ public:
+  // Returns the number of captures, which is defined as the length of the
+  // matchIndices objects of the last match. matchIndices contains two indices
+  // for each capture (including the match itself), i.e. 2 * #captures + 2.
+  inline int NumberOfCaptureRegisters();
+  inline void SetNumberOfCaptureRegisters(int value);
+
+  // Returns the subject string of the last match.
+  inline String* LastSubject();
+  inline void SetLastSubject(String* value);
+
+  // Like LastSubject, but modifiable by the user.
+  inline Object* LastInput();
+  inline void SetLastInput(Object* value);
+
+  // Returns the i'th capture index, 0 <= i < NumberOfCaptures(). Capture(0) and
+  // Capture(1) determine the start- and endpoint of the match itself.
+  inline int Capture(int i);
+  inline void SetCapture(int i, int value);
+
+  // Reserves space for captures.
+  static Handle<RegExpMatchInfo> ReserveCaptures(
+      Handle<RegExpMatchInfo> match_info, int capture_count);
+
+  DECLARE_CAST(RegExpMatchInfo)
+
+  static const int kNumberOfCapturesIndex = 0;
+  static const int kLastSubjectIndex = 1;
+  static const int kLastInputIndex = 2;
+  static const int kFirstCaptureIndex = 3;
+  static const int kLastMatchOverhead = kFirstCaptureIndex;
+
+  static const int kNumberOfCapturesOffset = FixedArray::kHeaderSize;
+  static const int kLastSubjectOffset = kNumberOfCapturesOffset + kPointerSize;
+  static const int kLastInputOffset = kLastSubjectOffset + kPointerSize;
+  static const int kFirstCaptureOffset = kLastInputOffset + kPointerSize;
+
+  // Every match info is guaranteed to have enough space to store two captures.
+  static const int kInitialCaptureIndices = 2;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMatchInfo);
+};
+
 #define FRAME_ARRAY_FIELD_LIST(V) \
-  V(WasmObject, Object)           \
+  V(WasmInstance, Object)         \
   V(WasmFunctionIndex, Smi)       \
   V(Receiver, Object)             \
   V(Function, JSFunction)         \
@@ -2963,14 +3137,16 @@
 #undef DECLARE_FRAME_ARRAY_ACCESSORS
 
   inline bool IsWasmFrame(int frame_ix) const;
+  inline bool IsAsmJsWasmFrame(int frame_ix) const;
   inline int FrameCount() const;
 
   void ShrinkToFit();
 
   // Flags.
   static const int kIsWasmFrame = 1 << 0;
-  static const int kIsStrict = 1 << 1;
-  static const int kForceConstructor = 1 << 2;
+  static const int kIsAsmJsWasmFrame = 1 << 1;
+  static const int kIsStrict = 1 << 2;
+  static const int kForceConstructor = 1 << 3;
 
   static Handle<FrameArray> AppendJSFrame(Handle<FrameArray> in,
                                           Handle<Object> receiver,
@@ -2978,7 +3154,7 @@
                                           Handle<AbstractCode> code, int offset,
                                           int flags);
   static Handle<FrameArray> AppendWasmFrame(Handle<FrameArray> in,
-                                            Handle<Object> wasm_object,
+                                            Handle<Object> wasm_instance,
                                             int wasm_function_index,
                                             Handle<AbstractCode> code,
                                             int offset, int flags);
@@ -2993,7 +3169,7 @@
   //
   // with internal offsets as below:
 
-  static const int kWasmObjectOffset = 0;
+  static const int kWasmInstanceOffset = 0;
   static const int kWasmFunctionIndexOffset = 1;
 
   static const int kReceiverOffset = 0;
@@ -3323,6 +3499,9 @@
   // Constant used for denoting a absent entry.
   static const int kNotFound = -1;
 
+  // Minimum capacity for newly created hash tables.
+  static const int kMinCapacity = 4;
+
  protected:
   // Update the number of elements in the hash table.
   inline void SetNumberOfElements(int nof);
@@ -3400,8 +3579,11 @@
   static const int kEntryKeyIndex = 0;
   static const int kElementsStartOffset =
       kHeaderSize + kElementsStartIndex * kPointerSize;
-  static const int kCapacityOffset =
-      kHeaderSize + kCapacityIndex * kPointerSize;
+  // Maximal capacity of HashTable. Based on maximal length of underlying
+  // FixedArray. Staying below kMaxCapacity also ensures that EntryToIndex
+  // cannot overflow.
+  static const int kMaxCapacity =
+      (FixedArray::kMaxLength - kElementsStartIndex) / kEntrySize;
 
   // Returns the index for an entry (of the key)
   static inline int EntryToIndex(int entry) {
@@ -3438,12 +3620,6 @@
     set(kCapacityIndex, Smi::FromInt(capacity));
   }
 
-  // Maximal capacity of HashTable. Based on maximal length of underlying
-  // FixedArray. Staying below kMaxCapacity also ensures that EntryToIndex
-  // cannot overflow.
-  static const int kMaxCapacity =
-      (FixedArray::kMaxLength - kElementsStartOffset) / kEntrySize;
-
  private:
   // Returns _expected_ if one of entries given by the first _probe_ probes is
   // equal to  _expected_. Otherwise, returns the entry given by the probe
@@ -3672,23 +3848,22 @@
   static Handle<FixedArray> BuildIterationIndicesArray(
       Handle<Derived> dictionary);
 
+  static const int kMaxNumberKeyIndex = DerivedHashTable::kPrefixStartIndex;
+  static const int kNextEnumerationIndexIndex = kMaxNumberKeyIndex + 1;
+
  protected:
   // Generic at put operation.
   MUST_USE_RESULT static Handle<Derived> AtPut(
       Handle<Derived> dictionary,
       Key key,
       Handle<Object> value);
-
   // Add entry to dictionary. Returns entry value.
   static int AddEntry(Handle<Derived> dictionary, Key key, Handle<Object> value,
                       PropertyDetails details, uint32_t hash);
-
   // Generate new enumeration indices to avoid enumeration index overflow.
   // Returns iteration indices array for the |dictionary|.
   static Handle<FixedArray> GenerateNewEnumerationIndices(
       Handle<Derived> dictionary);
-  static const int kMaxNumberKeyIndex = DerivedHashTable::kPrefixStartIndex;
-  static const int kNextEnumerationIndexIndex = kMaxNumberKeyIndex + 1;
 };
 
 
@@ -3760,6 +3935,7 @@
 
   static const int kEntryValueIndex = 1;
   static const int kEntryDetailsIndex = 2;
+  static const int kInitialCapacity = 2;
 };
 
 
@@ -3950,6 +4126,9 @@
   Object* Lookup(Handle<Object> key, int32_t hash);
   Object* Lookup(Isolate* isolate, Handle<Object> key, int32_t hash);
 
+  // Returns the value at entry.
+  Object* ValueAt(int entry);
+
   // Adds (or overwrites) the value associated with the given key.
   static Handle<ObjectHashTable> Put(Handle<ObjectHashTable> table,
                                      Handle<Object> key,
@@ -4377,8 +4556,9 @@
                               VariableMode* mode, InitializationFlag* init_flag,
                               MaybeAssignedFlag* maybe_assigned_flag);
 
-  // Lookup metadata of a MODULE-allocated variable.  Return a negative value if
-  // there is no module variable with the given name.
+  // Lookup metadata of a MODULE-allocated variable.  Return 0 if there is no
+  // module variable with the given name (the index value of a MODULE variable
+  // is never 0).
   int ModuleIndex(Handle<String> name, VariableMode* mode,
                   InitializationFlag* init_flag,
                   MaybeAssignedFlag* maybe_assigned_flag);
@@ -4428,7 +4608,7 @@
   static Handle<ScopeInfo> CreateGlobalThisBinding(Isolate* isolate);
 
   // Serializes empty scope info.
-  static ScopeInfo* Empty(Isolate* isolate);
+  V8_EXPORT_PRIVATE static ScopeInfo* Empty(Isolate* isolate);
 
 #ifdef DEBUG
   void Print();
@@ -4517,6 +4697,14 @@
              VariableLocation* location, InitializationFlag* init_flag,
              MaybeAssignedFlag* maybe_assigned_flag);
 
+  // Get metadata of i-th MODULE-allocated variable, where 0 <= i <
+  // ModuleVariableCount.  The metadata is returned via out-arguments, which may
+  // be nullptr if the corresponding information is not requested
+  void ModuleVariable(int i, String** name, int* index,
+                      VariableMode* mode = nullptr,
+                      InitializationFlag* init_flag = nullptr,
+                      MaybeAssignedFlag* maybe_assigned_flag = nullptr);
+
   // Used for the function name variable for named function expressions, and for
   // the receiver.
   enum VariableAllocationInfo { NONE, STACK, CONTEXT, UNUSED };
@@ -4556,58 +4744,6 @@
   friend class ScopeIterator;
 };
 
-class ModuleInfoEntry : public FixedArray {
- public:
-  DECLARE_CAST(ModuleInfoEntry)
-  static Handle<ModuleInfoEntry> New(Isolate* isolate,
-                                     Handle<Object> export_name,
-                                     Handle<Object> local_name,
-                                     Handle<Object> import_name,
-                                     Handle<Object> module_request);
-  inline Object* export_name() const;
-  inline Object* local_name() const;
-  inline Object* import_name() const;
-  inline Object* module_request() const;
-
- private:
-  friend class Factory;
-  enum {
-    kExportNameIndex,
-    kLocalNameIndex,
-    kImportNameIndex,
-    kModuleRequestIndex,
-    kLength
-  };
-};
-
-// ModuleInfo is to ModuleDescriptor what ScopeInfo is to Scope.
-class ModuleInfo : public FixedArray {
- public:
-  DECLARE_CAST(ModuleInfo)
-  static Handle<ModuleInfo> New(Isolate* isolate, Zone* zone,
-                                ModuleDescriptor* descr);
-  inline FixedArray* module_requests() const;
-  inline FixedArray* special_exports() const;
-  inline FixedArray* regular_exports() const;
-  inline FixedArray* namespace_imports() const;
-  inline FixedArray* regular_imports() const;
-
-#ifdef DEBUG
-  inline bool Equals(ModuleInfo* other) const;
-#endif
-
- private:
-  friend class Factory;
-  enum {
-    kModuleRequestsIndex,
-    kSpecialExportsIndex,
-    kRegularExportsIndex,
-    kNamespaceImportsIndex,
-    kRegularImportsIndex,
-    kLength
-  };
-};
-
 // The cache for maps used by normalized (dictionary mode) objects.
 // Such maps do not have property descriptors, so a typical program
 // needs very limited number of distinct normalized maps.
@@ -4726,7 +4862,6 @@
   // Setter and getter.
   inline byte get(int index);
   inline void set(int index, byte value);
-  inline const byte* data() const;
 
   // Copy in / copy out whole byte slices.
   inline void copy_out(int index, byte* buffer, int length);
@@ -4774,6 +4909,32 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
 };
 
+// Wrapper class for ByteArray which can store arbitrary C++ classes, as long
+// as they can be copied with memcpy.
+template <class T>
+class PodArray : public ByteArray {
+ public:
+  static Handle<PodArray<T>> New(Isolate* isolate, int length,
+                                 PretenureFlag pretenure = NOT_TENURED);
+  void copy_out(int index, T* result) {
+    ByteArray::copy_out(index * sizeof(T), reinterpret_cast<byte*>(result),
+                        sizeof(T));
+  }
+  T get(int index) {
+    T result;
+    copy_out(index, &result);
+    return result;
+  }
+  void set(int index, const T& value) {
+    copy_in(index * sizeof(T), reinterpret_cast<const byte*>(&value),
+            sizeof(T));
+  }
+  int length() { return ByteArray::length() / sizeof(T); }
+  DECLARE_CAST(PodArray<T>)
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(PodArray<T>);
+};
 
 // BytecodeArray represents a sequence of interpreter bytecodes.
 class BytecodeArray : public FixedArrayBase {
@@ -5006,7 +5167,6 @@
 
 #undef FIXED_TYPED_ARRAY_TRAITS
 
-
 // DeoptimizationInputData is a fixed array used to hold the deoptimization
 // data for code generated by the Hydrogen/Lithium compiler.  It also
 // contains information about functions that were inlined.  If N different
@@ -5025,7 +5185,8 @@
   static const int kOptimizationIdIndex = 5;
   static const int kSharedFunctionInfoIndex = 6;
   static const int kWeakCellCacheIndex = 7;
-  static const int kFirstDeoptEntryIndex = 8;
+  static const int kInliningPositionsIndex = 8;
+  static const int kFirstDeoptEntryIndex = 9;
 
   // Offsets of deopt entry elements relative to the start of the entry.
   static const int kAstIdRawOffset = 0;
@@ -5047,6 +5208,7 @@
   DECLARE_ELEMENT_ACCESSORS(OptimizationId, Smi)
   DECLARE_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
   DECLARE_ELEMENT_ACCESSORS(WeakCellCache, Object)
+  DECLARE_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
 
 #undef DECLARE_ELEMENT_ACCESSORS
 
@@ -5068,6 +5230,12 @@
 
   inline int DeoptCount();
 
+  static const int kNotInlinedIndex = -1;
+
+  // Returns the inlined function at the given position in LiteralArray, or the
+  // outer function if index == kNotInlinedIndex.
+  class SharedFunctionInfo* GetInlinedFunction(int index);
+
   // Allocates a DeoptimizationInputData.
   static Handle<DeoptimizationInputData> New(Isolate* isolate,
                                              int deopt_entry_count,
@@ -5088,7 +5256,6 @@
   static int LengthFor(int entry_count) { return IndexForEntry(entry_count); }
 };
 
-
 // DeoptimizationOutputData is a fixed array used to hold the deoptimization
 // data for code generated by the full compiler.
 // The format of the these objects is
@@ -5128,7 +5295,7 @@
  public:
   static const int kVectorIndex = 0;
   static const int kFirstLiteralIndex = 1;
-  static const int kFeedbackVectorOffset;
+  V8_EXPORT_PRIVATE static const int kFeedbackVectorOffset;
   static const int kOffsetToFirstLiteral;
 
   static int OffsetOfLiteralAt(int index) {
@@ -5217,11 +5384,14 @@
 
   static const int kPrologueOffsetNotSet = -1;
 
-#ifdef ENABLE_DISASSEMBLER
+#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
   // Printing
   static const char* ICState2String(InlineCacheState state);
   static void PrintExtraICState(std::ostream& os,  // NOLINT
                                 Kind kind, ExtraICState extra);
+#endif  // defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
+
+#ifdef ENABLE_DISASSEMBLER
   void Disassemble(const char* name, std::ostream& os);  // NOLINT
 #endif  // ENABLE_DISASSEMBLER
 
@@ -5825,9 +5995,9 @@
     // Group of code that depends on global property values in property cells
     // not being changed.
     kPropertyCellChangedGroup,
-    // Group of code that omit run-time type checks for the field(s) introduced
-    // by this map.
-    kFieldTypeGroup,
+    // Group of code that omit run-time checks for field(s) introduced by
+    // this map, i.e. for the field type.
+    kFieldOwnerGroup,
     // Group of code that omit run-time type checks for initial maps of
     // constructors.
     kInitialMapChangedGroup,
@@ -6143,6 +6313,14 @@
   static const int kPrototypeChainValid = 0;
   static const int kPrototypeChainInvalid = 1;
 
+  // Return the map of the root of object's prototype chain.
+  Map* GetPrototypeChainRootMap(Isolate* isolate);
+
+  // Returns a WeakCell object containing given prototype. The cell is cached
+  // in PrototypeInfo which is created lazily.
+  static Handle<WeakCell> GetOrCreatePrototypeWeakCell(
+      Handle<JSObject> prototype, Isolate* isolate);
+
   Map* FindRootMap();
   Map* FindFieldOwner(int descriptor);
 
@@ -6665,33 +6843,58 @@
   DECLARE_CAST(Struct)
 };
 
-// A container struct to hold state required for
-// PromiseResolveThenableJob. {before, after}_debug_event could
-// potentially be undefined if the debugger is turned off.
-class PromiseContainer : public Struct {
+// A container struct to hold state required for PromiseResolveThenableJob.
+class PromiseResolveThenableJobInfo : public Struct {
  public:
   DECL_ACCESSORS(thenable, JSReceiver)
   DECL_ACCESSORS(then, JSReceiver)
   DECL_ACCESSORS(resolve, JSFunction)
   DECL_ACCESSORS(reject, JSFunction)
-  DECL_ACCESSORS(before_debug_event, Object)
-  DECL_ACCESSORS(after_debug_event, Object)
+  DECL_ACCESSORS(debug_id, Object)
+  DECL_ACCESSORS(debug_name, Object)
+  DECL_ACCESSORS(context, Context)
 
   static const int kThenableOffset = Struct::kHeaderSize;
   static const int kThenOffset = kThenableOffset + kPointerSize;
   static const int kResolveOffset = kThenOffset + kPointerSize;
   static const int kRejectOffset = kResolveOffset + kPointerSize;
-  static const int kBeforeDebugEventOffset = kRejectOffset + kPointerSize;
-  static const int kAfterDebugEventOffset =
-      kBeforeDebugEventOffset + kPointerSize;
-  static const int kSize = kAfterDebugEventOffset + kPointerSize;
+  static const int kDebugIdOffset = kRejectOffset + kPointerSize;
+  static const int kDebugNameOffset = kDebugIdOffset + kPointerSize;
+  static const int kContextOffset = kDebugNameOffset + kPointerSize;
+  static const int kSize = kContextOffset + kPointerSize;
 
-  DECLARE_CAST(PromiseContainer)
-  DECLARE_PRINTER(PromiseContainer)
-  DECLARE_VERIFIER(PromiseContainer)
+  DECLARE_CAST(PromiseResolveThenableJobInfo)
+  DECLARE_PRINTER(PromiseResolveThenableJobInfo)
+  DECLARE_VERIFIER(PromiseResolveThenableJobInfo)
 
  private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseContainer);
+  DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseResolveThenableJobInfo);
+};
+
+// Struct to hold state required for PromiseReactionJob.
+class PromiseReactionJobInfo : public Struct {
+ public:
+  DECL_ACCESSORS(value, Object)
+  DECL_ACCESSORS(tasks, Object)
+  DECL_ACCESSORS(deferred, Object)
+  DECL_ACCESSORS(debug_id, Object)
+  DECL_ACCESSORS(debug_name, Object)
+  DECL_ACCESSORS(context, Context)
+
+  static const int kValueOffset = Struct::kHeaderSize;
+  static const int kTasksOffset = kValueOffset + kPointerSize;
+  static const int kDeferredOffset = kTasksOffset + kPointerSize;
+  static const int kDebugIdOffset = kDeferredOffset + kPointerSize;
+  static const int kDebugNameOffset = kDebugIdOffset + kPointerSize;
+  static const int kContextOffset = kDebugNameOffset + kPointerSize;
+  static const int kSize = kContextOffset + kPointerSize;
+
+  DECLARE_CAST(PromiseReactionJobInfo)
+  DECLARE_PRINTER(PromiseReactionJobInfo)
+  DECLARE_VERIFIER(PromiseReactionJobInfo)
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseReactionJobInfo);
 };
 
 // A simple one-element struct, useful where smis need to be boxed.
@@ -6719,6 +6922,9 @@
  public:
   static const int UNREGISTERED = -1;
 
+  // [weak_cell]: A WeakCell containing this prototype. ICs cache the cell here.
+  DECL_ACCESSORS(weak_cell, Object)
+
   // [prototype_users]: WeakFixedArray containing maps using this prototype,
   // or Smi(0) if uninitialized.
   DECL_ACCESSORS(prototype_users, Object)
@@ -6752,7 +6958,8 @@
   DECLARE_PRINTER(PrototypeInfo)
   DECLARE_VERIFIER(PrototypeInfo)
 
-  static const int kPrototypeUsersOffset = HeapObject::kHeaderSize;
+  static const int kWeakCellOffset = HeapObject::kHeaderSize;
+  static const int kPrototypeUsersOffset = kWeakCellOffset + kPointerSize;
   static const int kRegistrySlotOffset = kPrototypeUsersOffset + kPointerSize;
   static const int kValidityCellOffset = kRegistrySlotOffset + kPointerSize;
   static const int kObjectCreateMap = kValidityCellOffset + kPointerSize;
@@ -6768,6 +6975,26 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(PrototypeInfo);
 };
 
+class Tuple3 : public Struct {
+ public:
+  DECL_ACCESSORS(value1, Object)
+  DECL_ACCESSORS(value2, Object)
+  DECL_ACCESSORS(value3, Object)
+
+  DECLARE_CAST(Tuple3)
+
+  // Dispatched behavior.
+  DECLARE_PRINTER(Tuple3)
+  DECLARE_VERIFIER(Tuple3)
+
+  static const int kValue1Offset = HeapObject::kHeaderSize;
+  static const int kValue2Offset = kValue1Offset + kPointerSize;
+  static const int kValue3Offset = kValue2Offset + kPointerSize;
+  static const int kSize = kValue3Offset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Tuple3);
+};
 
 // Pair used to store both a ScopeInfo and an extension object in the extension
 // slot of a block, catch, or with context. Needed in the rare case where a
@@ -6869,13 +7096,9 @@
   // [source_mapping_url]: sourceMappingURL magic comment
   DECL_ACCESSORS(source_mapping_url, Object)
 
-  // [wasm_object]: the wasm object this script belongs to.
+  // [wasm_compiled_module]: the compiled wasm module this script belongs to.
   // This must only be called if the type of this script is TYPE_WASM.
-  DECL_ACCESSORS(wasm_object, JSObject)
-
-  // [wasm_function_index]: the wasm function index this script belongs to.
-  // This must only be called if the type of this script is TYPE_WASM.
-  DECL_INT_ACCESSORS(wasm_function_index)
+  DECL_ACCESSORS(wasm_compiled_module, Object)
 
   // [compilation_type]: how the the script was compiled. Encoded in the
   // 'flags' field.
@@ -6916,14 +7139,6 @@
   // Init line_ends array with source code positions of line ends.
   static void InitLineEnds(Handle<Script> script);
 
-  // Convert code offset into column number.
-  static int GetColumnNumber(Handle<Script> script, int code_offset);
-
-  // Convert code offset into (zero-based) line number.
-  // The non-handlified version does not allocate, but may be much slower.
-  static int GetLineNumber(Handle<Script> script, int code_offset);
-  int GetLineNumber(int code_pos);
-
   // Carries information about a source position.
   struct PositionInfo {
     PositionInfo() : line(-1), column(-1), line_start(-1), line_end(-1) {}
@@ -6931,7 +7146,7 @@
     int line;        // Zero-based line number.
     int column;      // Zero-based column number.
     int line_start;  // Position of first character in line.
-    int line_end;    // Position of last (non-linebreak) character in line.
+    int line_end;    // Position of final linebreak character in line.
   };
 
   // Specifies whether to add offsets to position infos.
@@ -6940,8 +7155,20 @@
   // Retrieves information about the given position, optionally with an offset.
   // Returns false on failure, and otherwise writes into the given info object
   // on success.
+  // The static method should is preferable for handlified callsites because it
+  // initializes the line ends array, avoiding expensive recomputations.
+  // The non-static version is not allocating and safe for unhandlified
+  // callsites.
+  static bool GetPositionInfo(Handle<Script> script, int position,
+                              PositionInfo* info, OffsetFlag offset_flag);
   bool GetPositionInfo(int position, PositionInfo* info,
-                       OffsetFlag offset_flag);
+                       OffsetFlag offset_flag) const;
+
+  // Wrappers for GetPositionInfo
+  static int GetColumnNumber(Handle<Script> script, int code_offset);
+  int GetColumnNumber(int code_pos) const;
+  static int GetLineNumber(Handle<Script> script, int code_offset);
+  int GetLineNumber(int code_pos) const;
 
   // Get the JS object wrapping the given script; create it if none exists.
   static Handle<JSObject> GetWrapper(Handle<Script> script);
@@ -6985,8 +7212,6 @@
   static const int kSize = kSourceMappingUrlOffset + kPointerSize;
 
  private:
-  int GetLineNumberWithArray(int code_pos);
-
   // Bit positions in the flags field.
   static const int kCompilationTypeBit = 0;
   static const int kCompilationStateBit = 1;
@@ -7072,6 +7297,7 @@
   V(Number, isInteger, NumberIsInteger)                     \
   V(Number, isNaN, NumberIsNaN)                             \
   V(Number, isSafeInteger, NumberIsSafeInteger)             \
+  V(Number, parseFloat, NumberParseFloat)                   \
   V(Number, parseInt, NumberParseInt)                       \
   V(Number.prototype, toString, NumberToString)
 
@@ -7091,6 +7317,10 @@
   kMathPowHalf,
   // These are manually assigned to special getters during bootstrapping.
   kArrayBufferByteLength,
+  kArrayEntries,
+  kArrayKeys,
+  kArrayValues,
+  kArrayIteratorNext,
   kDataViewBuffer,
   kDataViewByteLength,
   kDataViewByteOffset,
@@ -7105,8 +7335,12 @@
   kGlobalIsNaN,
   kTypedArrayByteLength,
   kTypedArrayByteOffset,
+  kTypedArrayEntries,
+  kTypedArrayKeys,
   kTypedArrayLength,
+  kTypedArrayValues,
   kSharedArrayBufferByteLength,
+  kStringIterator,
   kStringIteratorNext,
 };
 
@@ -7133,6 +7367,14 @@
   // a Code object or a BytecodeArray.
   inline AbstractCode* abstract_code();
 
+  // Tells whether or not this shared function info is interpreted.
+  //
+  // Note: function->IsInterpreted() does not necessarily return the same value
+  // as function->shared()->IsInterpreted() because the shared function info
+  // could tier up to baseline via a different function closure. The interpreter
+  // entry stub will "self-heal" this divergence when the function is executed.
+  inline bool IsInterpreted() const;
+
   inline void ReplaceCode(Code* code);
   inline bool HasBaselineCode() const;
 
@@ -7169,10 +7411,6 @@
   static Handle<LiteralsArray> FindOrCreateLiterals(
       Handle<SharedFunctionInfo> shared, Handle<Context> native_context);
 
-  // Add or update entry in the optimized code map for context-independent code.
-  static void AddSharedCodeToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
-                                              Handle<Code> code);
-
   // Add or update entry in the optimized code map for context-dependent code.
   // If {code} is not given, then an existing entry's code won't be overwritten.
   static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
@@ -7183,12 +7421,11 @@
 
   // Set up the link between shared function info and the script. The shared
   // function info is added to the list on the script.
-  static void SetScript(Handle<SharedFunctionInfo> shared,
-                        Handle<Object> script_object);
+  V8_EXPORT_PRIVATE static void SetScript(Handle<SharedFunctionInfo> shared,
+                                          Handle<Object> script_object);
 
   // Layout description of the optimized code map.
-  static const int kSharedCodeIndex = 0;
-  static const int kEntriesStart = 1;
+  static const int kEntriesStart = 0;
   static const int kContextOffset = 0;
   static const int kCachedCodeOffset = 1;
   static const int kLiteralsOffset = 2;
@@ -7298,7 +7535,7 @@
   inline String* inferred_name();
   inline void set_inferred_name(String* inferred_name);
 
-  // [script info]: Script from which the function originates.
+  // [script]: Script from which the function originates.
   DECL_ACCESSORS(script, Object)
 
   // [num_literals]: Number of literals used by this function.
@@ -7368,12 +7605,6 @@
   // when doing GC if we expect that the function will no longer be used.
   DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation)
 
-  // Indicates if this function can be lazy compiled without a context.
-  // This is used to determine if we can force compilation without reaching
-  // the function through program execution but through other means (e.g. heap
-  // iteration by the debugger).
-  DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation_without_context)
-
   // Indicates whether optimizations have been disabled for this
   // shared function info. If a function is repeatedly optimized or if
   // we cannot optimize the function we disable optimization to avoid
@@ -7442,6 +7673,9 @@
   // Whether this function was created from a FunctionDeclaration.
   DECL_BOOLEAN_ACCESSORS(is_declaration)
 
+  // Whether this function was marked to be tiered up.
+  DECL_BOOLEAN_ACCESSORS(marked_for_tier_up)
+
   // Indicates that asm->wasm conversion failed and should not be re-attempted.
   DECL_BOOLEAN_ACCESSORS(is_asm_wasm_broken)
 
@@ -7691,7 +7925,7 @@
   enum CompilerHints {
     // byte 0
     kAllowLazyCompilation,
-    kAllowLazyCompilationWithoutContext,
+    kMarkedForTierUp,
     kOptimizationDisabled,
     kNeverCompiled,
     kNative,
@@ -7741,7 +7975,8 @@
   static const int kCompilerHintsSize = kIntSize;
 #endif
 
-  STATIC_ASSERT(SharedFunctionInfo::kCompilerHintsCount <=
+  STATIC_ASSERT(SharedFunctionInfo::kCompilerHintsCount +
+                    SharedFunctionInfo::kCompilerHintsSmiTagSize <=
                 SharedFunctionInfo::kCompilerHintsSize * kBitsPerByte);
 
  public:
@@ -7758,6 +7993,9 @@
   static const int kAllFunctionKindBitsMask = FunctionKindBits::kMask
                                               << kCompilerHintsSmiTagSize;
 
+  static const int kMarkedForTierUpBit =
+      kMarkedForTierUp + kCompilerHintsSmiTagSize;
+
   // Constants for optimizing codegen for strict mode function and
   // native tests.
   // Allows to use byte-width instructions.
@@ -7770,6 +8008,9 @@
       FunctionKind::kClassConstructor << kCompilerHintsSmiTagSize;
   STATIC_ASSERT(kClassConstructorBitsWithinByte < (1 << kBitsPerByte));
 
+  static const int kMarkedForTierUpBitWithinByte =
+      kMarkedForTierUpBit % kBitsPerByte;
+
 #if defined(V8_TARGET_LITTLE_ENDIAN)
 #define BYTE_OFFSET(compiler_hint) \
   kCompilerHintsOffset +           \
@@ -7786,12 +8027,13 @@
   static const int kFunctionKindByteOffset = BYTE_OFFSET(kFunctionKind);
   static const int kHasDuplicateParametersByteOffset =
       BYTE_OFFSET(kHasDuplicateParameters);
+  static const int kMarkedForTierUpByteOffset = BYTE_OFFSET(kMarkedForTierUp);
 #undef BYTE_OFFSET
 
  private:
   // Returns entry from optimized code map for specified context and OSR entry.
-  // The result is either kNotFound, kSharedCodeIndex for context-independent
-  // entry or a start index of the context-dependent entry.
+  // The result is either kNotFound, or a start index of the context-dependent
+  // entry.
   int SearchOptimizedCodeMapEntry(Context* native_context,
                                   BailoutId osr_ast_id);
 
@@ -7875,6 +8117,110 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
 };
 
+class ModuleInfoEntry : public Struct {
+ public:
+  DECLARE_CAST(ModuleInfoEntry)
+  DECLARE_PRINTER(ModuleInfoEntry)
+  DECLARE_VERIFIER(ModuleInfoEntry)
+
+  DECL_ACCESSORS(export_name, Object)
+  DECL_ACCESSORS(local_name, Object)
+  DECL_ACCESSORS(import_name, Object)
+  DECL_INT_ACCESSORS(module_request)
+  DECL_INT_ACCESSORS(cell_index)
+  DECL_INT_ACCESSORS(beg_pos)
+  DECL_INT_ACCESSORS(end_pos)
+
+  static Handle<ModuleInfoEntry> New(Isolate* isolate,
+                                     Handle<Object> export_name,
+                                     Handle<Object> local_name,
+                                     Handle<Object> import_name,
+                                     int module_request, int cell_index,
+                                     int beg_pos, int end_pos);
+
+  static const int kExportNameOffset = HeapObject::kHeaderSize;
+  static const int kLocalNameOffset = kExportNameOffset + kPointerSize;
+  static const int kImportNameOffset = kLocalNameOffset + kPointerSize;
+  static const int kModuleRequestOffset = kImportNameOffset + kPointerSize;
+  static const int kCellIndexOffset = kModuleRequestOffset + kPointerSize;
+  static const int kBegPosOffset = kCellIndexOffset + kPointerSize;
+  static const int kEndPosOffset = kBegPosOffset + kPointerSize;
+  static const int kSize = kEndPosOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ModuleInfoEntry);
+};
+
+// ModuleInfo is to ModuleDescriptor what ScopeInfo is to Scope.
+class ModuleInfo : public FixedArray {
+ public:
+  DECLARE_CAST(ModuleInfo)
+
+  static Handle<ModuleInfo> New(Isolate* isolate, Zone* zone,
+                                ModuleDescriptor* descr);
+
+  inline FixedArray* module_requests() const;
+  inline FixedArray* special_exports() const;
+  inline FixedArray* regular_exports() const;
+  inline FixedArray* namespace_imports() const;
+  inline FixedArray* regular_imports() const;
+
+  // Accessors for [regular_exports].
+  int RegularExportCount() const;
+  String* RegularExportLocalName(int i) const;
+  int RegularExportCellIndex(int i) const;
+  FixedArray* RegularExportExportNames(int i) const;
+
+  static Handle<ModuleInfoEntry> LookupRegularImport(Handle<ModuleInfo> info,
+                                                     Handle<String> local_name);
+
+#ifdef DEBUG
+  inline bool Equals(ModuleInfo* other) const;
+#endif
+
+ private:
+  friend class Factory;
+  friend class ModuleDescriptor;
+  enum {
+    kModuleRequestsIndex,
+    kSpecialExportsIndex,
+    kRegularExportsIndex,
+    kNamespaceImportsIndex,
+    kRegularImportsIndex,
+    kLength
+  };
+  enum {
+    kRegularExportLocalNameOffset,
+    kRegularExportCellIndexOffset,
+    kRegularExportExportNamesOffset,
+    kRegularExportLength
+  };
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ModuleInfo);
+};
+// When importing a module namespace (import * as foo from "bar"), a
+// JSModuleNamespace object (representing module "bar") is created and bound to
+// the declared variable (foo).  A module can have at most one namespace object.
+class JSModuleNamespace : public JSObject {
+ public:
+  DECLARE_CAST(JSModuleNamespace)
+  DECLARE_PRINTER(JSModuleNamespace)
+  DECLARE_VERIFIER(JSModuleNamespace)
+
+  // The actual module whose namespace is being represented.
+  DECL_ACCESSORS(module, Module)
+
+  // Retrieve the value exported by [module] under the given [name]. If there is
+  // no such export, return Just(undefined). If the export is uninitialized,
+  // schedule an exception and return Nothing.
+  MUST_USE_RESULT MaybeHandle<Object> GetExport(Handle<String> name);
+
+  static const int kModuleOffset = JSObject::kHeaderSize;
+  static const int kSize = kModuleOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSModuleNamespace);
+};
+
 // A Module object is a mapping from export names to cells
 // This is still very much in flux.
 class Module : public Struct {
@@ -7883,69 +8229,82 @@
   DECLARE_VERIFIER(Module)
   DECLARE_PRINTER(Module)
 
-  // The code representing this Module, either a
-  // SharedFunctionInfo or a JSFunction depending
-  // on whether it's been instantiated.
+  // The code representing this Module, or an abstraction thereof.
+  // This is either a SharedFunctionInfo or a JSFunction or a ModuleInfo
+  // depending on whether the module has been instantiated and evaluated.  See
+  // Module::ModuleVerify() for the precise invariant.
   DECL_ACCESSORS(code, Object)
 
+  // Arrays of cells corresponding to regular exports and regular imports.
+  // A cell's position in the array is determined by the cell index of the
+  // associated module entry (which coincides with the variable index of the
+  // associated variable).
+  DECL_ACCESSORS(regular_exports, FixedArray)
+  DECL_ACCESSORS(regular_imports, FixedArray)
+
+  // The complete export table, mapping an export name to its cell.
+  // TODO(neis): We may want to remove the regular exports from the table.
   DECL_ACCESSORS(exports, ObjectHashTable)
 
-  // [[RequestedModules]]: Modules imported or re-exported by this module.
+  // Hash for this object (a random non-zero Smi).
+  DECL_INT_ACCESSORS(hash)
+
+  // The namespace object (or undefined).
+  DECL_ACCESSORS(module_namespace, HeapObject)
+
+  // Modules imported or re-exported by this module.
   // Corresponds 1-to-1 to the module specifier strings in
   // ModuleInfo::module_requests.
   DECL_ACCESSORS(requested_modules, FixedArray)
 
-  // [[Evaluated]]: Whether this module has been evaluated. Modules
-  // are only evaluated a single time.
-  DECL_BOOLEAN_ACCESSORS(evaluated)
-
-  // Storage for [[Evaluated]]
-  DECL_INT_ACCESSORS(flags)
-
-  // Embedder-specified data
-  DECL_ACCESSORS(embedder_data, Object)
-
-  // Get the SharedFunctionInfo associated with the code.
-  inline SharedFunctionInfo* shared() const;
-
   // Get the ModuleInfo associated with the code.
   inline ModuleInfo* info() const;
 
-  // Compute a hash for this object.
-  inline uint32_t Hash() const;
+  inline bool instantiated() const;
+  inline bool evaluated() const;
+  inline void set_evaluated();
 
   // Implementation of spec operation ModuleDeclarationInstantiation.
   // Returns false if an exception occurred during instantiation, true
   // otherwise.
   static MUST_USE_RESULT bool Instantiate(Handle<Module> module,
                                           v8::Local<v8::Context> context,
-                                          v8::Module::ResolveCallback callback,
-                                          v8::Local<v8::Value> callback_data);
+                                          v8::Module::ResolveCallback callback);
 
   // Implementation of spec operation ModuleEvaluation.
   static MUST_USE_RESULT MaybeHandle<Object> Evaluate(Handle<Module> module);
 
-  static Handle<Object> LoadExport(Handle<Module> module, Handle<String> name);
-  static void StoreExport(Handle<Module> module, Handle<String> name,
-                          Handle<Object> value);
+  static Handle<Object> LoadVariable(Handle<Module> module, int cell_index);
+  static void StoreVariable(Handle<Module> module, int cell_index,
+                            Handle<Object> value);
 
-  static Handle<Object> LoadImport(Handle<Module> module, Handle<String> name,
-                                   int module_request);
+  // Get the namespace object for [module_request] of [module].  If it doesn't
+  // exist yet, it is created.
+  static Handle<JSModuleNamespace> GetModuleNamespace(Handle<Module> module,
+                                                      int module_request);
 
   static const int kCodeOffset = HeapObject::kHeaderSize;
   static const int kExportsOffset = kCodeOffset + kPointerSize;
-  static const int kRequestedModulesOffset = kExportsOffset + kPointerSize;
-  static const int kFlagsOffset = kRequestedModulesOffset + kPointerSize;
-  static const int kEmbedderDataOffset = kFlagsOffset + kPointerSize;
-  static const int kSize = kEmbedderDataOffset + kPointerSize;
+  static const int kRegularExportsOffset = kExportsOffset + kPointerSize;
+  static const int kRegularImportsOffset = kRegularExportsOffset + kPointerSize;
+  static const int kHashOffset = kRegularImportsOffset + kPointerSize;
+  static const int kModuleNamespaceOffset = kHashOffset + kPointerSize;
+  static const int kRequestedModulesOffset =
+      kModuleNamespaceOffset + kPointerSize;
+  static const int kSize = kRequestedModulesOffset + kPointerSize;
 
  private:
   enum { kEvaluatedBit };
 
-  static void CreateExport(Handle<Module> module, Handle<FixedArray> names);
+  static void CreateExport(Handle<Module> module, int cell_index,
+                           Handle<FixedArray> names);
   static void CreateIndirectExport(Handle<Module> module, Handle<String> name,
                                    Handle<ModuleInfoEntry> entry);
 
+  // Get the namespace object for [module].  If it doesn't exist yet, it is
+  // created.
+  static Handle<JSModuleNamespace> GetModuleNamespace(Handle<Module> module);
+
   // The [must_resolve] argument indicates whether or not an exception should be
   // thrown in case the module does not provide an export named [name]
   // (including when a cycle is detected).  An exception is always thrown in the
@@ -7956,16 +8315,16 @@
   // exception (so check manually!).
   class ResolveSet;
   static MUST_USE_RESULT MaybeHandle<Cell> ResolveExport(
-      Handle<Module> module, Handle<String> name, bool must_resolve,
-      ResolveSet* resolve_set);
+      Handle<Module> module, Handle<String> name, MessageLocation loc,
+      bool must_resolve, ResolveSet* resolve_set);
   static MUST_USE_RESULT MaybeHandle<Cell> ResolveImport(
       Handle<Module> module, Handle<String> name, int module_request,
-      bool must_resolve, ResolveSet* resolve_set);
+      MessageLocation loc, bool must_resolve, ResolveSet* resolve_set);
 
   // Helper for ResolveExport.
   static MUST_USE_RESULT MaybeHandle<Cell> ResolveExportUsingStarExports(
-      Handle<Module> module, Handle<String> name, bool must_resolve,
-      ResolveSet* resolve_set);
+      Handle<Module> module, Handle<String> name, MessageLocation loc,
+      bool must_resolve, ResolveSet* resolve_set);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(Module);
 };
@@ -8050,6 +8409,14 @@
   // Tells whether this function inlines the given shared function info.
   bool Inlines(SharedFunctionInfo* candidate);
 
+  // Tells whether or not this function is interpreted.
+  //
+  // Note: function->IsInterpreted() does not necessarily return the same value
+  // as function->shared()->IsInterpreted() because the shared function info
+  // could tier up to baseline via a different function closure. The interpreter
+  // entry stub will "self-heal" this divergence when the function is executed.
+  inline bool IsInterpreted();
+
   // Tells whether or not this function has been optimized.
   inline bool IsOptimized();
 
@@ -8230,6 +8597,8 @@
 
   inline bool IsDetachedFrom(JSGlobalObject* global) const;
 
+  static int SizeWithInternalFields(int internal_field_count);
+
   // Dispatched behavior.
   DECLARE_PRINTER(JSGlobalProxy)
   DECLARE_VERIFIER(JSGlobalProxy)
@@ -8519,6 +8888,9 @@
   // Set implementation data after the object has been prepared.
   inline void SetDataAt(int index, Object* value);
 
+  inline void SetLastIndex(int index);
+  inline Object* LastIndex();
+
   static int code_index(bool is_latin1) {
     if (is_latin1) {
       return kIrregexpLatin1CodeIndex;
@@ -8879,7 +9251,7 @@
   DECLARE_CAST(AllocationSite)
   static inline AllocationSiteMode GetMode(
       ElementsKind boilerplate_elements_kind);
-  static inline AllocationSiteMode GetMode(ElementsKind from, ElementsKind to);
+  static AllocationSiteMode GetMode(ElementsKind from, ElementsKind to);
   static inline bool CanTrack(InstanceType type);
 
   static const int kTransitionInfoOffset = HeapObject::kHeaderSize;
@@ -9406,14 +9778,37 @@
   MUST_USE_RESULT static ComparisonResult Compare(Handle<String> x,
                                                   Handle<String> y);
 
+  // Perform ES6 21.1.3.8, including checking arguments.
+  static Object* IndexOf(Isolate* isolate, Handle<Object> receiver,
+                         Handle<Object> search, Handle<Object> position);
   // Perform string match of pattern on subject, starting at start index.
-  // Caller must ensure that 0 <= start_index <= sub->length().
-  static int IndexOf(Isolate* isolate, Handle<String> sub, Handle<String> pat,
-                     int start_index);
+  // Caller must ensure that 0 <= start_index <= sub->length(), as this does not
+  // check any arguments.
+  static int IndexOf(Isolate* isolate, Handle<String> receiver,
+                     Handle<String> search, int start_index);
 
   static Object* LastIndexOf(Isolate* isolate, Handle<Object> receiver,
                              Handle<Object> search, Handle<Object> position);
 
+  // Encapsulates logic related to a match and its capture groups as required
+  // by GetSubstitution.
+  class Match {
+   public:
+    virtual Handle<String> GetMatch() = 0;
+    virtual MaybeHandle<String> GetCapture(int i, bool* capture_exists) = 0;
+    virtual Handle<String> GetPrefix() = 0;
+    virtual Handle<String> GetSuffix() = 0;
+    virtual int CaptureCount() = 0;
+    virtual ~Match() {}
+  };
+
+  // ES#sec-getsubstitution
+  // GetSubstitution(matched, str, position, captures, replacement)
+  // Expand the $-expressions in the string and return a new string with
+  // the result.
+  MUST_USE_RESULT static MaybeHandle<String> GetSubstitution(
+      Isolate* isolate, Match* match, Handle<String> replacement);
+
   // String equality operations.
   inline bool Equals(String* other);
   inline static bool Equals(Handle<String> one, Handle<String> two);
@@ -9716,7 +10111,7 @@
                          WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
 
   // Dispatched behavior.
-  uint16_t ConsStringGet(int index);
+  V8_EXPORT_PRIVATE uint16_t ConsStringGet(int index);
 
   DECLARE_CAST(ConsString)
 
@@ -9759,7 +10154,7 @@
   inline void set_offset(int offset);
 
   // Dispatched behavior.
-  uint16_t SlicedStringGet(int index);
+  V8_EXPORT_PRIVATE uint16_t SlicedStringGet(int index);
 
   DECLARE_CAST(SlicedString)
 
@@ -10343,6 +10738,36 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap);
 };
 
+class JSArrayIterator : public JSObject {
+ public:
+  DECLARE_PRINTER(JSArrayIterator)
+  DECLARE_VERIFIER(JSArrayIterator)
+
+  DECLARE_CAST(JSArrayIterator)
+
+  // [object]: the [[IteratedObject]] internal field.
+  DECL_ACCESSORS(object, Object)
+
+  // [index]: The [[ArrayIteratorNextIndex]] internal field.
+  DECL_ACCESSORS(index, Object)
+
+  // [map]: The Map of the [[IteratedObject]] field at the time the iterator is
+  // allocated.
+  DECL_ACCESSORS(object_map, Object)
+
+  // Return the ElementsKind that a JSArrayIterator's [[IteratedObject]] is
+  // expected to have, based on its instance type.
+  static ElementsKind ElementsKindForInstanceType(InstanceType instance_type);
+
+  static const int kIteratedObjectOffset = JSObject::kHeaderSize;
+  static const int kNextIndexOffset = kIteratedObjectOffset + kPointerSize;
+  static const int kIteratedObjectMapOffset = kNextIndexOffset + kPointerSize;
+  static const int kSize = kIteratedObjectMapOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayIterator);
+};
+
 class JSStringIterator : public JSObject {
  public:
   // Dispatched behavior.
@@ -10366,6 +10791,37 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSStringIterator);
 };
 
+// A JS iterator over the elements of a FixedArray.
+// This corresponds to ListIterator in ecma262/#sec-createlistiterator.
+class JSFixedArrayIterator : public JSObject {
+ public:
+  DECLARE_CAST(JSFixedArrayIterator)
+  DECLARE_PRINTER(JSFixedArrayIterator)
+  DECLARE_VERIFIER(JSFixedArrayIterator)
+
+  // The array over which the iterator iterates.
+  DECL_ACCESSORS(array, FixedArray)
+
+  // The index of the array element that will be returned next.
+  DECL_INT_ACCESSORS(index)
+
+  // The initial value of the object's "next" property.
+  DECL_ACCESSORS(initial_next, JSFunction)
+
+  static const int kArrayOffset = JSObject::kHeaderSize;
+  static const int kIndexOffset = kArrayOffset + kPointerSize;
+  static const int kNextOffset = kIndexOffset + kPointerSize;
+  static const int kHeaderSize = kNextOffset + kPointerSize;
+
+  enum InObjectPropertyIndex {
+    kNextIndex,
+    kInObjectPropertyCount  // Dummy.
+  };
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSFixedArrayIterator);
+};
+
 // OrderedHashTableIterator is an iterator that iterates over the keys and
 // values of an OrderedHashTable.
 //
@@ -10571,9 +11027,10 @@
 
   void Neuter();
 
-  static void Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
-                    bool is_external, void* data, size_t allocated_length,
-                    SharedFlag shared = SharedFlag::kNotShared);
+  V8_EXPORT_PRIVATE static void Setup(
+      Handle<JSArrayBuffer> array_buffer, Isolate* isolate, bool is_external,
+      void* data, size_t allocated_length,
+      SharedFlag shared = SharedFlag::kNotShared);
 
   static bool SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
                                   Isolate* isolate, size_t allocated_length,
@@ -10652,7 +11109,7 @@
   DECLARE_CAST(JSTypedArray)
 
   ExternalArrayType type();
-  size_t element_size();
+  V8_EXPORT_PRIVATE size_t element_size();
 
   Handle<JSArrayBuffer> GetBuffer();
 
@@ -10862,6 +11319,9 @@
   inline bool is_special_data_property();
   inline void set_is_special_data_property(bool value);
 
+  inline bool replace_on_access();
+  inline void set_replace_on_access(bool value);
+
   inline bool is_sloppy();
   inline void set_is_sloppy(bool value);
 
@@ -10903,7 +11363,8 @@
   static const int kAllCanWriteBit = 1;
   static const int kSpecialDataProperty = 2;
   static const int kIsSloppy = 3;
-  class AttributesField : public BitField<PropertyAttributes, 4, 3> {};
+  static const int kReplaceOnAccess = 4;
+  class AttributesField : public BitField<PropertyAttributes, 5, 3> {};
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorInfo);
 };
@@ -11069,9 +11530,7 @@
   static const int kPropertyListOffset = kNumberOfProperties + kPointerSize;
   static const int kPropertyAccessorsOffset =
       kPropertyListOffset + kPointerSize;
-  static const int kPropertyIntrinsicsOffset =
-      kPropertyAccessorsOffset + kPointerSize;
-  static const int kHeaderSize = kPropertyIntrinsicsOffset + kPointerSize;
+  static const int kHeaderSize = kPropertyAccessorsOffset + kPointerSize;
 
   static const int kFastTemplateInstantiationsCacheSize = 1 * KB;
 
@@ -11110,6 +11569,8 @@
   DECL_BOOLEAN_ACCESSORS(do_not_cache)
   DECL_BOOLEAN_ACCESSORS(accept_any_receiver)
 
+  DECL_ACCESSORS(cached_property_name, Object)
+
   DECLARE_CAST(FunctionTemplateInfo)
 
   // Dispatched behavior.
@@ -11136,7 +11597,8 @@
       kAccessCheckInfoOffset + kPointerSize;
   static const int kFlagOffset = kSharedFunctionInfoOffset + kPointerSize;
   static const int kLengthOffset = kFlagOffset + kPointerSize;
-  static const int kSize = kLengthOffset + kPointerSize;
+  static const int kCachedPropertyNameOffset = kLengthOffset + kPointerSize;
+  static const int kSize = kCachedPropertyNameOffset + kPointerSize;
 
   static Handle<SharedFunctionInfo> GetOrCreateSharedFunctionInfo(
       Isolate* isolate, Handle<FunctionTemplateInfo> info);
@@ -11147,6 +11609,10 @@
   bool IsTemplateFor(Map* map);
   inline bool instantiated();
 
+  // Helper function for cached accessors.
+  static MaybeHandle<Name> TryGetCachedPropertyName(Isolate* isolate,
+                                                    Handle<Object> getter);
+
  private:
   // Bit position in the flag, from least significant bit position.
   static const int kHiddenPrototypeBit   = 0;
@@ -11217,7 +11683,7 @@
   // Get the number of break points for this function.
   int GetBreakPointCount();
 
-  static Smi* uninitialized() { return Smi::FromInt(0); }
+  static Smi* uninitialized() { return Smi::kZero; }
 
   inline bool HasDebugBytecodeArray();
   inline bool HasDebugCode();
diff --git a/src/ostreams.h b/src/ostreams.h
index dea7514..e72c8ee 100644
--- a/src/ostreams.h
+++ b/src/ostreams.h
@@ -80,7 +80,8 @@
 std::ostream& operator<<(std::ostream& os, const AsReversiblyEscapedUC16& c);
 
 // Same as AsReversiblyEscapedUC16 with additional escaping of \n, \r, " and '.
-std::ostream& operator<<(std::ostream& os, const AsEscapedUC16ForJSON& c);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+                                           const AsEscapedUC16ForJSON& c);
 
 // Writes the given character to the output escaping everything outside
 // of printable ASCII range.
diff --git a/src/parsing/expression-classifier.h b/src/parsing/expression-classifier.h
index 6a1fbac..d2dc6fa 100644
--- a/src/parsing/expression-classifier.h
+++ b/src/parsing/expression-classifier.h
@@ -22,8 +22,7 @@
   T(StrictModeFormalParametersProduction, 5) \
   T(ArrowFormalParametersProduction, 6)      \
   T(LetPatternProduction, 7)                 \
-  T(TailCallExpressionProduction, 8)         \
-  T(AsyncArrowFormalParametersProduction, 9)
+  T(AsyncArrowFormalParametersProduction, 8)
 
 // Expression classifiers serve two purposes:
 //
@@ -191,13 +190,6 @@
     return reported_error(kLetPatternProduction);
   }
 
-  V8_INLINE bool has_tail_call_expression() const {
-    return !is_valid(TailCallExpressionProduction);
-  }
-  V8_INLINE const Error& tail_call_expression_error() const {
-    return reported_error(kTailCallExpressionProduction);
-  }
-
   V8_INLINE const Error& async_arrow_formal_parameters_error() const {
     return reported_error(kAsyncArrowFormalParametersProduction);
   }
@@ -299,14 +291,6 @@
     Add(Error(loc, message, kLetPatternProduction, arg));
   }
 
-  void RecordTailCallExpressionError(const Scanner::Location& loc,
-                                     MessageTemplate::Template message,
-                                     const char* arg = nullptr) {
-    if (has_tail_call_expression()) return;
-    invalid_productions_ |= TailCallExpressionProduction;
-    Add(Error(loc, message, kTailCallExpressionProduction, arg));
-  }
-
   void Accumulate(ExpressionClassifier* inner, unsigned productions,
                   bool merge_non_patterns = true) {
     DCHECK_EQ(inner->reported_errors_, reported_errors_);
diff --git a/src/parsing/parse-info.cc b/src/parsing/parse-info.cc
index 5b9b5e4..4fbfb19 100644
--- a/src/parsing/parse-info.cc
+++ b/src/parsing/parse-info.cc
@@ -31,18 +31,12 @@
       function_name_(nullptr),
       literal_(nullptr) {}
 
-ParseInfo::ParseInfo(Zone* zone, Handle<JSFunction> function)
-    : ParseInfo(zone, Handle<SharedFunctionInfo>(function->shared())) {
-  if (!function->context()->IsNativeContext()) {
-    set_outer_scope_info(handle(function->context()->scope_info()));
-  }
-}
-
 ParseInfo::ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared)
     : ParseInfo(zone) {
   isolate_ = shared->GetIsolate();
 
-  set_lazy();
+  set_toplevel(shared->is_toplevel());
+  set_allow_lazy_parsing(FLAG_lazy_inner_functions);
   set_hash_seed(isolate_->heap()->HashSeed());
   set_is_named_expression(shared->is_named_expression());
   set_calls_eval(shared->scope_info()->CallsEval());
@@ -56,22 +50,29 @@
 
   Handle<Script> script(Script::cast(shared->script()));
   set_script(script);
-  if (!script.is_null() && script->type() == Script::TYPE_NATIVE) {
-    set_native();
+  set_native(script->type() == Script::TYPE_NATIVE);
+  set_eval(script->compilation_type() == Script::COMPILATION_TYPE_EVAL);
+
+  Handle<HeapObject> scope_info(shared->outer_scope_info());
+  if (!scope_info->IsTheHole(isolate()) &&
+      Handle<ScopeInfo>::cast(scope_info)->length() > 0) {
+    set_outer_scope_info(Handle<ScopeInfo>::cast(scope_info));
   }
 }
 
 ParseInfo::ParseInfo(Zone* zone, Handle<Script> script) : ParseInfo(zone) {
   isolate_ = script->GetIsolate();
 
+  set_allow_lazy_parsing(String::cast(script->source())->length() >
+                         FLAG_min_preparse_length);
+  set_toplevel();
   set_hash_seed(isolate_->heap()->HashSeed());
   set_stack_limit(isolate_->stack_guard()->real_climit());
   set_unicode_cache(isolate_->unicode_cache());
   set_script(script);
 
-  if (script->type() == Script::TYPE_NATIVE) {
-    set_native();
-  }
+  set_native(script->type() == Script::TYPE_NATIVE);
+  set_eval(script->compilation_type() == Script::COMPILATION_TYPE_EVAL);
 }
 
 ParseInfo::~ParseInfo() {
diff --git a/src/parsing/parse-info.h b/src/parsing/parse-info.h
index 4aedae4..24188d9 100644
--- a/src/parsing/parse-info.h
+++ b/src/parsing/parse-info.h
@@ -26,12 +26,10 @@
 class Zone;
 
 // A container for the inputs, configuration options, and outputs of parsing.
-class ParseInfo {
+class V8_EXPORT_PRIVATE ParseInfo {
  public:
   explicit ParseInfo(Zone* zone);
-  ParseInfo(Zone* zone, Handle<JSFunction> function);
   ParseInfo(Zone* zone, Handle<Script> script);
-  // TODO(all) Only used via Debug::FindSharedFunctionInfoInScript, remove?
   ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared);
 
   ~ParseInfo();
@@ -45,9 +43,7 @@
   void setter(bool val) { SetFlag(flag, val); }
 
   FLAG_ACCESSOR(kToplevel, is_toplevel, set_toplevel)
-  FLAG_ACCESSOR(kLazy, is_lazy, set_lazy)
   FLAG_ACCESSOR(kEval, is_eval, set_eval)
-  FLAG_ACCESSOR(kGlobal, is_global, set_global)
   FLAG_ACCESSOR(kStrictMode, is_strict_mode, set_strict_mode)
   FLAG_ACCESSOR(kNative, is_native, set_native)
   FLAG_ACCESSOR(kModule, is_module, set_module)
@@ -57,6 +53,8 @@
   FLAG_ACCESSOR(kIsNamedExpression, is_named_expression,
                 set_is_named_expression)
   FLAG_ACCESSOR(kCallsEval, calls_eval, set_calls_eval)
+  FLAG_ACCESSOR(kDebug, is_debug, set_is_debug)
+  FLAG_ACCESSOR(kSerializing, will_serialize, set_will_serialize)
 
 #undef FLAG_ACCESSOR
 
@@ -99,6 +97,9 @@
     return compile_options_;
   }
   void set_compile_options(ScriptCompiler::CompileOptions compile_options) {
+    if (compile_options == ScriptCompiler::kConsumeParserCache) {
+      set_allow_lazy_parsing();
+    }
     compile_options_ = compile_options;
   }
 
@@ -198,16 +199,17 @@
     kToplevel = 1 << 0,
     kLazy = 1 << 1,
     kEval = 1 << 2,
-    kGlobal = 1 << 3,
-    kStrictMode = 1 << 4,
-    kNative = 1 << 5,
-    kParseRestriction = 1 << 6,
-    kModule = 1 << 7,
-    kAllowLazyParsing = 1 << 8,
-    kIsNamedExpression = 1 << 9,
-    kCallsEval = 1 << 10,
+    kStrictMode = 1 << 3,
+    kNative = 1 << 4,
+    kParseRestriction = 1 << 5,
+    kModule = 1 << 6,
+    kAllowLazyParsing = 1 << 7,
+    kIsNamedExpression = 1 << 8,
+    kCallsEval = 1 << 9,
+    kDebug = 1 << 10,
+    kSerializing = 1 << 11,
     // ---------- Output flags --------------------------
-    kAstValueFactoryOwned = 1 << 11
+    kAstValueFactoryOwned = 1 << 12
   };
 
   //------------- Inputs to parsing and scope analysis -----------------------
diff --git a/src/parsing/parser-base.h b/src/parsing/parser-base.h
index 1ebbee4..bb62f86 100644
--- a/src/parsing/parser-base.h
+++ b/src/parsing/parser-base.h
@@ -59,10 +59,27 @@
 
 struct FormalParametersBase {
   explicit FormalParametersBase(DeclarationScope* scope) : scope(scope) {}
+
+  int num_parameters() const {
+    // Don't include the rest parameter into the function's formal parameter
+    // count (esp. the SharedFunctionInfo::internal_formal_parameter_count,
+    // which says whether we need to create an arguments adaptor frame).
+    return arity - has_rest;
+  }
+
+  void UpdateArityAndFunctionLength(bool is_optional, bool is_rest) {
+    if (!is_optional && !is_rest && function_length == arity) {
+      ++function_length;
+    }
+    ++arity;
+  }
+
   DeclarationScope* scope;
   bool has_rest = false;
   bool is_simple = true;
   int materialized_literals_count = 0;
+  int function_length = 0;
+  int arity = 0;
 };
 
 
@@ -175,27 +192,25 @@
 
   ParserBase(Zone* zone, Scanner* scanner, uintptr_t stack_limit,
              v8::Extension* extension, AstValueFactory* ast_value_factory,
-             ParserRecorder* log)
+             RuntimeCallStats* runtime_call_stats)
       : scope_state_(nullptr),
         function_state_(nullptr),
         extension_(extension),
         fni_(nullptr),
         ast_value_factory_(ast_value_factory),
         ast_node_factory_(ast_value_factory),
-        log_(log),
-        mode_(PARSE_EAGERLY),  // Lazy mode must be set explicitly.
+        runtime_call_stats_(runtime_call_stats),
         parsing_module_(false),
         stack_limit_(stack_limit),
         zone_(zone),
         classifier_(nullptr),
         scanner_(scanner),
         stack_overflow_(false),
+        default_eager_compile_hint_(FunctionLiteral::kShouldLazyCompile),
         allow_lazy_(false),
         allow_natives_(false),
         allow_tailcalls_(false),
-        allow_harmony_restrictive_declarations_(false),
         allow_harmony_do_expressions_(false),
-        allow_harmony_for_in_(false),
         allow_harmony_function_sent_(false),
         allow_harmony_async_await_(false),
         allow_harmony_restrictive_generators_(false),
@@ -209,9 +224,7 @@
   ALLOW_ACCESSORS(lazy);
   ALLOW_ACCESSORS(natives);
   ALLOW_ACCESSORS(tailcalls);
-  ALLOW_ACCESSORS(harmony_restrictive_declarations);
   ALLOW_ACCESSORS(harmony_do_expressions);
-  ALLOW_ACCESSORS(harmony_for_in);
   ALLOW_ACCESSORS(harmony_function_sent);
   ALLOW_ACCESSORS(harmony_async_await);
   ALLOW_ACCESSORS(harmony_restrictive_generators);
@@ -224,26 +237,26 @@
 
   void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
 
+  void set_default_eager_compile_hint(
+      FunctionLiteral::EagerCompileHint eager_compile_hint) {
+    default_eager_compile_hint_ = eager_compile_hint;
+  }
+
+  FunctionLiteral::EagerCompileHint default_eager_compile_hint() const {
+    return default_eager_compile_hint_;
+  }
+
   Zone* zone() const { return zone_; }
 
  protected:
   friend class v8::internal::ExpressionClassifier<ParserTypes<Impl>>;
 
-  // clang-format off
   enum AllowRestrictedIdentifiers {
     kAllowRestrictedIdentifiers,
     kDontAllowRestrictedIdentifiers
   };
 
-  enum Mode {
-    PARSE_LAZILY,
-    PARSE_EAGERLY
-  };
-
-  enum LazyParsingResult {
-    kLazyParsingComplete,
-    kLazyParsingAborted
-  };
+  enum LazyParsingResult { kLazyParsingComplete, kLazyParsingAborted };
 
   enum VariableDeclarationContext {
     kStatementListItem,
@@ -251,11 +264,7 @@
     kForStatement
   };
 
-  enum class FunctionBodyType {
-    kNormal,
-    kSingleExpression
-  };
-  // clang-format on
+  enum class FunctionBodyType { kNormal, kSingleExpression };
 
   class Checkpoint;
   class ClassLiteralChecker;
@@ -581,22 +590,6 @@
     int expected_property_count_;
   };
 
-  class ParsingModeScope BASE_EMBEDDED {
-   public:
-    ParsingModeScope(ParserBase* parser, Mode mode)
-        : parser_(parser),
-          old_mode_(parser->mode()) {
-      parser_->mode_ = mode;
-    }
-    ~ParsingModeScope() {
-      parser_->mode_ = old_mode_;
-    }
-
-   private:
-    ParserBase* parser_;
-    Mode old_mode_;
-  };
-
   struct DeclarationDescriptor {
     enum Kind { NORMAL, PARAMETER };
     Scope* scope;
@@ -659,11 +652,11 @@
     explicit ForInfo(ParserBase* parser)
         : bound_names(1, parser->zone()),
           mode(ForEachStatement::ENUMERATE),
-          each_loc(),
+          position(kNoSourcePosition),
           parsing_result() {}
     ZoneList<const AstRawString*> bound_names;
     ForEachStatement::VisitMode mode;
-    Scanner::Location each_loc;
+    int position;
     DeclarationParsingResult parsing_result;
   };
 
@@ -743,7 +736,6 @@
   int peek_position() const { return scanner_->peek_location().beg_pos; }
   bool stack_overflow() const { return stack_overflow_; }
   void set_stack_overflow() { stack_overflow_ = true; }
-  Mode mode() const { return mode_; }
 
   INLINE(Token::Value peek()) {
     if (stack_overflow_) return Token::ILLEGAL;
@@ -1430,8 +1422,7 @@
   FuncNameInferrer* fni_;
   AstValueFactory* ast_value_factory_;  // Not owned.
   typename Types::Factory ast_node_factory_;
-  ParserRecorder* log_;
-  Mode mode_;
+  RuntimeCallStats* runtime_call_stats_;
   bool parsing_module_;
   uintptr_t stack_limit_;
 
@@ -1444,12 +1435,12 @@
   Scanner* scanner_;
   bool stack_overflow_;
 
+  FunctionLiteral::EagerCompileHint default_eager_compile_hint_;
+
   bool allow_lazy_;
   bool allow_natives_;
   bool allow_tailcalls_;
-  bool allow_harmony_restrictive_declarations_;
   bool allow_harmony_do_expressions_;
-  bool allow_harmony_for_in_;
   bool allow_harmony_function_sent_;
   bool allow_harmony_async_await_;
   bool allow_harmony_restrictive_generators_;
@@ -1755,8 +1746,7 @@
     case Token::FUTURE_STRICT_RESERVED_WORD: {
       // Using eval or arguments in this context is OK even in strict mode.
       IdentifierT name = ParseAndClassifyIdentifier(CHECK_OK);
-      return impl()->ExpressionFromIdentifier(name, beg_pos,
-                                              scanner()->location().end_pos);
+      return impl()->ExpressionFromIdentifier(name, beg_pos);
     }
 
     case Token::STRING: {
@@ -2286,10 +2276,10 @@
   FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
       impl()->EmptyIdentifierString(), initializer_scope, body,
       initializer_state.materialized_literal_count(),
-      initializer_state.expected_property_count(), 0,
+      initializer_state.expected_property_count(), 0, 0,
       FunctionLiteral::kNoDuplicateParameters,
-      FunctionLiteral::kAnonymousExpression,
-      FunctionLiteral::kShouldLazyCompile, initializer_scope->start_position());
+      FunctionLiteral::kAnonymousExpression, default_eager_compile_hint_,
+      initializer_scope->start_position(), true);
   function_literal->set_is_class_field_initializer(true);
   return function_literal;
 }
@@ -2377,8 +2367,7 @@
             Scanner::Location(next_beg_pos, next_end_pos),
             MessageTemplate::kAwaitBindingIdentifier);
       }
-      ExpressionT lhs =
-          impl()->ExpressionFromIdentifier(name, next_beg_pos, next_end_pos);
+      ExpressionT lhs = impl()->ExpressionFromIdentifier(name, next_beg_pos);
       CheckDestructuringElement(lhs, next_beg_pos, next_end_pos);
 
       ExpressionT value;
@@ -2645,8 +2634,8 @@
       PeekAhead() == Token::ARROW) {
     // async Identifier => AsyncConciseBody
     IdentifierT name = ParseAndClassifyIdentifier(CHECK_OK);
-    expression = impl()->ExpressionFromIdentifier(
-        name, position(), scanner()->location().end_pos, InferName::kNo);
+    expression =
+        impl()->ExpressionFromIdentifier(name, position(), InferName::kNo);
     if (fni_) {
       // Remove `async` keyword from inferred name stack.
       fni_->RemoveAsyncKeywordFromEnd();
@@ -2722,8 +2711,7 @@
   if (is_destructuring_assignment) {
     // This is definitely not an expression so don't accumulate
     // expression-related errors.
-    productions &= ~(ExpressionClassifier::ExpressionProduction |
-                     ExpressionClassifier::TailCallExpressionProduction);
+    productions &= ~ExpressionClassifier::ExpressionProduction;
   }
 
   if (!Token::IsAssignmentOp(peek())) {
@@ -3083,8 +3071,8 @@
           // Also the trailing parenthesis are a hint that the function will
           // be called immediately. If we happen to have parsed a preceding
           // function literal eagerly, we can also compile it eagerly.
-          if (result->IsFunctionLiteral() && mode() == PARSE_EAGERLY) {
-            result->AsFunctionLiteral()->set_should_eager_compile();
+          if (result->IsFunctionLiteral()) {
+            result->AsFunctionLiteral()->SetShouldEagerCompile();
           }
         }
         Scanner::Location spread_pos;
@@ -3413,10 +3401,10 @@
           pos = position();
         } else {
           pos = peek_position();
-          if (expression->IsFunctionLiteral() && mode() == PARSE_EAGERLY) {
+          if (expression->IsFunctionLiteral()) {
             // If the tag function looks like an IIFE, set_parenthesized() to
             // force eager compilation.
-            expression->AsFunctionLiteral()->set_should_eager_compile();
+            expression->AsFunctionLiteral()->SetShouldEagerCompile();
           }
         }
         expression = ParseTemplateLiteral(expression, pos, CHECK_OK);
@@ -3482,11 +3470,11 @@
   //   FormalParameter[?Yield]
   //   FormalParameterList[?Yield] , FormalParameter[?Yield]
 
-  DCHECK_EQ(0, parameters->Arity());
+  DCHECK_EQ(0, parameters->arity);
 
   if (peek() != Token::RPAREN) {
     while (true) {
-      if (parameters->Arity() > Code::kMaxArguments) {
+      if (parameters->arity > Code::kMaxArguments) {
         ReportMessage(MessageTemplate::kTooManyParameters);
         *ok = false;
         return;
@@ -3513,7 +3501,7 @@
     }
   }
 
-  for (int i = 0; i < parameters->Arity(); ++i) {
+  for (int i = 0; i < parameters->arity; ++i) {
     auto parameter = parameters->at(i);
     impl()->DeclareFormalParameter(parameters->scope, parameter);
   }
@@ -3671,13 +3659,10 @@
   int pos = position();
   ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
   if (Check(Token::MUL)) {
-    flags |= ParseFunctionFlags::kIsGenerator;
-    if (allow_harmony_restrictive_declarations()) {
-      impl()->ReportMessageAt(scanner()->location(),
-                              MessageTemplate::kGeneratorInLegacyContext);
-      *ok = false;
-      return impl()->NullStatement();
-    }
+    impl()->ReportMessageAt(scanner()->location(),
+                            MessageTemplate::kGeneratorInLegacyContext);
+    *ok = false;
+    return impl()->NullStatement();
   }
   return ParseHoistableDeclaration(pos, flags, nullptr, false, ok);
 }
@@ -3905,6 +3890,11 @@
 typename ParserBase<Impl>::ExpressionT
 ParserBase<Impl>::ParseArrowFunctionLiteral(
     bool accept_IN, const FormalParametersT& formal_parameters, bool* ok) {
+  RuntimeCallTimerScope runtime_timer(
+      runtime_call_stats_,
+      Impl::IsPreParser() ? &RuntimeCallStats::ParseArrowFunctionLiteral
+                          : &RuntimeCallStats::PreParseArrowFunctionLiteral);
+
   if (peek() == Token::ARROW && scanner_->HasAnyLineTerminatorBeforeNext()) {
     // ASI inserts `;` after arrow parameters if a line terminator is found.
     // `=> ...` is never a valid expression, so report as syntax error.
@@ -3915,14 +3905,20 @@
   }
 
   StatementListT body = impl()->NullStatementList();
-  int num_parameters = formal_parameters.scope->num_parameters();
   int materialized_literal_count = -1;
   int expected_property_count = -1;
 
   FunctionKind kind = formal_parameters.scope->function_kind();
   FunctionLiteral::EagerCompileHint eager_compile_hint =
-      FunctionLiteral::kShouldLazyCompile;
+      default_eager_compile_hint_;
+  bool can_preparse = impl()->parse_lazily() &&
+                      eager_compile_hint == FunctionLiteral::kShouldLazyCompile;
+  // TODO(marja): consider lazy-parsing inner arrow functions too. is_this
+  // handling in Scope::ResolveVariable needs to change.
+  bool is_lazy_top_level_function =
+      can_preparse && impl()->AllowsLazyParsingWithoutUnresolvedVariables();
   bool should_be_used_once_hint = false;
+  bool has_braces = true;
   {
     FunctionState function_state(&function_state_, &scope_state_,
                                  formal_parameters.scope);
@@ -3936,18 +3932,22 @@
 
     if (peek() == Token::LBRACE) {
       // Multiple statement body
-      Consume(Token::LBRACE);
       DCHECK_EQ(scope(), formal_parameters.scope);
-      bool is_lazily_parsed =
-          (mode() == PARSE_LAZILY &&
-           formal_parameters.scope
-               ->AllowsLazyParsingWithoutUnresolvedVariables());
-      // TODO(marja): consider lazy-parsing inner arrow functions too. is_this
-      // handling in Scope::ResolveVariable needs to change.
-      if (is_lazily_parsed) {
+      if (is_lazy_top_level_function) {
+        // FIXME(marja): Arrow function parameters will be parsed even if the
+        // body is preparsed; move relevant parts of parameter handling to
+        // simulate consistent parameter handling.
         Scanner::BookmarkScope bookmark(scanner());
         bookmark.Set();
-        LazyParsingResult result = impl()->SkipLazyFunctionBody(
+        // For arrow functions, we don't need to retrieve data about function
+        // parameters.
+        int dummy_num_parameters = -1;
+        int dummy_function_length = -1;
+        bool dummy_has_duplicate_parameters = false;
+        DCHECK((kind & FunctionKind::kArrowFunction) != 0);
+        LazyParsingResult result = impl()->SkipFunction(
+            kind, formal_parameters.scope, &dummy_num_parameters,
+            &dummy_function_length, &dummy_has_duplicate_parameters,
             &materialized_literal_count, &expected_property_count, false, true,
             CHECK_OK);
         formal_parameters.scope->ResetAfterPreparsing(
@@ -3961,7 +3961,7 @@
         if (result == kLazyParsingAborted) {
           bookmark.Apply();
           // Trigger eager (re-)parsing, just below this block.
-          is_lazily_parsed = false;
+          is_lazy_top_level_function = false;
 
           // This is probably an initialization function. Inform the compiler it
           // should also eager-compile this function, and that we expect it to
@@ -3970,7 +3970,8 @@
           should_be_used_once_hint = true;
         }
       }
-      if (!is_lazily_parsed) {
+      if (!is_lazy_top_level_function) {
+        Consume(Token::LBRACE);
         body = impl()->ParseEagerFunctionBody(
             impl()->EmptyIdentifier(), kNoSourcePosition, formal_parameters,
             kind, FunctionLiteral::kAnonymousExpression, CHECK_OK);
@@ -3980,6 +3981,7 @@
       }
     } else {
       // Single-expression body
+      has_braces = false;
       int pos = position();
       DCHECK(ReturnExprContext::kInsideValidBlock ==
              function_state_->return_expr_context());
@@ -3997,7 +3999,9 @@
       } else {
         ExpressionT expression = ParseAssignmentExpression(accept_IN, CHECK_OK);
         impl()->RewriteNonPattern(CHECK_OK);
-        body->Add(factory()->NewReturnStatement(expression, pos), zone());
+        body->Add(
+            factory()->NewReturnStatement(expression, expression->position()),
+            zone());
         if (allow_tailcalls() && !is_sloppy(language_mode())) {
           // ES6 14.6.1 Static Semantics: IsInTailPosition
           impl()->MarkTailPosition(expression);
@@ -4028,12 +4032,19 @@
     impl()->RewriteDestructuringAssignments();
   }
 
+  if (FLAG_trace_preparse) {
+    Scope* scope = formal_parameters.scope;
+    PrintF("  [%s]: %i-%i (arrow function)\n",
+           is_lazy_top_level_function ? "Preparse no-resolution" : "Full parse",
+           scope->start_position(), scope->end_position());
+  }
   FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
       impl()->EmptyIdentifierString(), formal_parameters.scope, body,
-      materialized_literal_count, expected_property_count, num_parameters,
+      materialized_literal_count, expected_property_count,
+      formal_parameters.num_parameters(), formal_parameters.function_length,
       FunctionLiteral::kNoDuplicateParameters,
       FunctionLiteral::kAnonymousExpression, eager_compile_hint,
-      formal_parameters.scope->start_position());
+      formal_parameters.scope->start_position(), has_braces);
 
   function_literal->set_function_token_position(
       formal_parameters.scope->start_position());
@@ -4391,11 +4402,6 @@
           *ok = false;
           return kLazyParsingComplete;
         }
-        // Because declarations in strict eval code don't leak into the scope
-        // of the eval call, it is likely that functions declared in strict
-        // eval code will be used within the eval code, so lazy parsing is
-        // probably not a win.
-        if (scope()->is_eval_scope()) mode_ = PARSE_EAGERLY;
       } else if (impl()->IsUseAsmDirective(stat) &&
                  token_loc.end_pos - token_loc.beg_pos ==
                      sizeof("use asm") + 1) {
@@ -4622,8 +4628,7 @@
 template <typename Impl>
 typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseScopedStatement(
     ZoneList<const AstRawString*>* labels, bool legacy, bool* ok) {
-  if (is_strict(language_mode()) || peek() != Token::FUNCTION ||
-      (legacy && allow_harmony_restrictive_declarations())) {
+  if (is_strict(language_mode()) || peek() != Token::FUNCTION || legacy) {
     return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
   } else {
     if (legacy) {
@@ -4693,7 +4698,7 @@
   //   Identifier ':' Statement
   //
   // ExpressionStatement[Yield] :
-  //   [lookahead ∉ {{, function, class, let [}] Expression[In, ?Yield] ;
+  //   [lookahead notin {{, function, class, let [}] Expression[In, ?Yield] ;
 
   int pos = peek_position();
 
@@ -5164,7 +5169,7 @@
                                 nullptr, CHECK_OK);
       bound_names_are_lexical =
           IsLexicalVariableMode(for_info.parsing_result.descriptor.mode);
-      for_info.each_loc = scanner()->location();
+      for_info.position = scanner()->location().beg_pos;
 
       if (CheckInOrOf(&for_info.mode)) {
         // Just one declaration followed by in/of.
@@ -5181,13 +5186,7 @@
              for_info.mode == ForEachStatement::ITERATE ||
              bound_names_are_lexical ||
              !impl()->IsIdentifier(
-                 for_info.parsing_result.declarations[0].pattern) ||
-             allow_harmony_for_in())) {
-          // Only increment the use count if we would have let this through
-          // without the flag.
-          if (allow_harmony_for_in()) {
-            impl()->CountUsage(v8::Isolate::kForInInitializer);
-          }
+                 for_info.parsing_result.declarations[0].pattern))) {
           impl()->ReportMessageAt(
               for_info.parsing_result.first_initializer_loc,
               MessageTemplate::kForInOfLoopInitializer,
diff --git a/src/parsing/parser.cc b/src/parsing/parser.cc
index 7b88695..8d88901 100644
--- a/src/parsing/parser.cc
+++ b/src/parsing/parser.cc
@@ -68,7 +68,6 @@
   if (data_length < PreparseDataConstants::kHeaderSize) return false;
   if (Magic() != PreparseDataConstants::kMagicNumber) return false;
   if (Version() != PreparseDataConstants::kCurrentVersion) return false;
-  if (HasError()) return false;
   // Check that the space allocated for function entries is sane.
   int functions_size = FunctionsSize();
   if (functions_size < 0) return false;
@@ -90,11 +89,6 @@
 }
 
 
-bool ParseData::HasError() {
-  return Data()[PreparseDataConstants::kHasErrorOffset];
-}
-
-
 unsigned ParseData::Magic() {
   return Data()[PreparseDataConstants::kMagicOffset];
 }
@@ -124,6 +118,7 @@
       parser_->zone_ = temp_zone;
       if (parser_->reusable_preparser_ != nullptr) {
         parser_->reusable_preparser_->zone_ = temp_zone;
+        parser_->reusable_preparser_->factory()->set_zone(temp_zone);
       }
     }
   }
@@ -132,6 +127,7 @@
     parser_->zone_ = prev_zone_;
     if (parser_->reusable_preparser_ != nullptr) {
       parser_->reusable_preparser_->zone_ = prev_zone_;
+      parser_->reusable_preparser_->factory()->set_zone(prev_zone_);
     }
     ast_node_factory_scope_.Reset();
   }
@@ -148,12 +144,11 @@
 };
 
 void Parser::SetCachedData(ParseInfo* info) {
-  if (compile_options_ == ScriptCompiler::kNoCompileOptions) {
-    cached_parse_data_ = NULL;
-  } else {
-    DCHECK(info->cached_data() != NULL);
-    if (compile_options_ == ScriptCompiler::kConsumeParserCache) {
-      cached_parse_data_ = ParseData::FromCachedData(*info->cached_data());
+  DCHECK_NULL(cached_parse_data_);
+  if (consume_cached_parse_data()) {
+    cached_parse_data_ = ParseData::FromCachedData(*info->cached_data());
+    if (cached_parse_data_ == nullptr) {
+      compile_options_ = ScriptCompiler::kNoCompileOptions;
     }
   }
 }
@@ -219,7 +214,7 @@
                                             LanguageMode language_mode) {
   int materialized_literal_count = -1;
   int expected_property_count = -1;
-  int parameter_count = 0;
+  const int parameter_count = 0;
   if (name == nullptr) name = ast_value_factory()->empty_string();
 
   FunctionKind kind = call_super ? FunctionKind::kDefaultSubclassConstructor
@@ -282,59 +277,16 @@
 
   FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
       name, function_scope, body, materialized_literal_count,
-      expected_property_count, parameter_count,
+      expected_property_count, parameter_count, parameter_count,
       FunctionLiteral::kNoDuplicateParameters,
-      FunctionLiteral::kAnonymousExpression,
-      FunctionLiteral::kShouldLazyCompile, pos);
+      FunctionLiteral::kAnonymousExpression, default_eager_compile_hint(), pos,
+      true);
 
   function_literal->set_requires_class_field_init(requires_class_field_init);
 
   return function_literal;
 }
 
-
-// ----------------------------------------------------------------------------
-// Target is a support class to facilitate manipulation of the
-// Parser's target_stack_ (the stack of potential 'break' and
-// 'continue' statement targets). Upon construction, a new target is
-// added; it is removed upon destruction.
-
-class ParserTarget BASE_EMBEDDED {
- public:
-  ParserTarget(ParserBase<Parser>* parser, BreakableStatement* statement)
-      : variable_(&parser->impl()->target_stack_),
-        statement_(statement),
-        previous_(parser->impl()->target_stack_) {
-    parser->impl()->target_stack_ = this;
-  }
-
-  ~ParserTarget() { *variable_ = previous_; }
-
-  ParserTarget* previous() { return previous_; }
-  BreakableStatement* statement() { return statement_; }
-
- private:
-  ParserTarget** variable_;
-  BreakableStatement* statement_;
-  ParserTarget* previous_;
-};
-
-class ParserTargetScope BASE_EMBEDDED {
- public:
-  explicit ParserTargetScope(ParserBase<Parser>* parser)
-      : variable_(&parser->impl()->target_stack_),
-        previous_(parser->impl()->target_stack_) {
-    parser->impl()->target_stack_ = nullptr;
-  }
-
-  ~ParserTargetScope() { *variable_ = previous_; }
-
- private:
-  ParserTarget** variable_;
-  ParserTarget* previous_;
-};
-
-
 // ----------------------------------------------------------------------------
 // The CHECK_OK macro is a convenient macro to enforce error
 // handling for functions that may fail (by returning !*ok).
@@ -521,9 +473,7 @@
 }
 
 Expression* Parser::NewTargetExpression(int pos) {
-  static const int kNewTargetStringLength = 10;
-  auto proxy = NewUnresolved(ast_value_factory()->new_target_string(), pos,
-                             pos + kNewTargetStringLength);
+  auto proxy = NewUnresolved(ast_value_factory()->new_target_string(), pos);
   proxy->set_is_new_target();
   return proxy;
 }
@@ -547,7 +497,7 @@
     case Token::FALSE_LITERAL:
       return factory()->NewBooleanLiteral(false, pos);
     case Token::SMI: {
-      int value = scanner()->smi_value();
+      uint32_t value = scanner()->smi_value();
       return factory()->NewSmiLiteral(value, pos);
     }
     case Token::NUMBER: {
@@ -631,31 +581,48 @@
 
 Parser::Parser(ParseInfo* info)
     : ParserBase<Parser>(info->zone(), &scanner_, info->stack_limit(),
-                         info->extension(), info->ast_value_factory(), NULL),
+                         info->extension(), info->ast_value_factory(),
+                         info->isolate()->counters()->runtime_call_stats()),
       scanner_(info->unicode_cache()),
-      reusable_preparser_(NULL),
-      original_scope_(NULL),
-      target_stack_(NULL),
+      reusable_preparser_(nullptr),
+      original_scope_(nullptr),
+      mode_(PARSE_EAGERLY),  // Lazy mode must be set explicitly.
+      target_stack_(nullptr),
       compile_options_(info->compile_options()),
-      cached_parse_data_(NULL),
+      cached_parse_data_(nullptr),
       total_preparse_skipped_(0),
-      pre_parse_timer_(NULL),
-      parsing_on_main_thread_(true) {
+      parsing_on_main_thread_(true),
+      log_(nullptr) {
   // Even though we were passed ParseInfo, we should not store it in
   // Parser - this makes sure that Isolate is not accidentally accessed via
   // ParseInfo during background parsing.
   DCHECK(!info->script().is_null() || info->source_stream() != nullptr ||
          info->character_stream() != nullptr);
+  // Determine if functions can be lazily compiled. This is necessary to
+  // allow some of our builtin JS files to be lazily compiled. These
+  // builtins cannot be handled lazily by the parser, since we have to know
+  // if a function uses the special natives syntax, which is something the
+  // parser records.
+  // If the debugger requests compilation for break points, we cannot be
+  // aggressive about lazy compilation, because it might trigger compilation
+  // of functions without an outer context when setting a breakpoint through
+  // Debug::FindSharedFunctionInfoInScript
+  bool can_compile_lazily = FLAG_lazy && !info->is_debug();
+
+  // Consider compiling eagerly when targeting the code cache.
+  can_compile_lazily &= !(FLAG_serialize_eager && info->will_serialize());
+
+  set_default_eager_compile_hint(can_compile_lazily
+                                     ? FunctionLiteral::kShouldLazyCompile
+                                     : FunctionLiteral::kShouldEagerCompile);
   set_allow_lazy(FLAG_lazy && info->allow_lazy_parsing() &&
-                 !info->is_native() && info->extension() == nullptr);
+                 !info->is_native() && info->extension() == nullptr &&
+                 can_compile_lazily);
   set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
   set_allow_tailcalls(FLAG_harmony_tailcalls && !info->is_native() &&
                       info->isolate()->is_tail_call_elimination_enabled());
   set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
-  set_allow_harmony_for_in(FLAG_harmony_for_in);
   set_allow_harmony_function_sent(FLAG_harmony_function_sent);
-  set_allow_harmony_restrictive_declarations(
-      FLAG_harmony_restrictive_declarations);
   set_allow_harmony_async_await(FLAG_harmony_async_await);
   set_allow_harmony_restrictive_generators(FLAG_harmony_restrictive_generators);
   set_allow_harmony_trailing_commas(FLAG_harmony_trailing_commas);
@@ -699,9 +666,10 @@
   // called in the main thread.
   DCHECK(parsing_on_main_thread_);
 
-  HistogramTimerScope timer_scope(isolate->counters()->parse(), true);
-  RuntimeCallTimerScope runtime_timer(isolate, &RuntimeCallStats::Parse);
-  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.Parse");
+  RuntimeCallTimerScope runtime_timer(
+      runtime_call_stats_, info->is_eval() ? &RuntimeCallStats::ParseEval
+                                           : &RuntimeCallStats::ParseProgram);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.ParseProgram");
   Handle<String> source(String::cast(info->script()->source()));
   isolate->counters()->total_parse_size()->Increment(source->length());
   base::ElapsedTimer timer;
@@ -711,10 +679,10 @@
   fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
 
   // Initialize parser state.
-  CompleteParserRecorder recorder;
+  ParserLogger logger;
 
   if (produce_cached_parse_data()) {
-    log_ = &recorder;
+    log_ = &logger;
   } else if (consume_cached_parse_data()) {
     cached_parse_data_->Initialize();
   }
@@ -734,7 +702,7 @@
   }
   HandleSourceURLComments(isolate, info->script());
 
-  if (FLAG_trace_parse && result != NULL) {
+  if (FLAG_trace_parse && result != nullptr) {
     double ms = timer.Elapsed().InMillisecondsF();
     if (info->is_eval()) {
       PrintF("[parsing eval");
@@ -747,10 +715,10 @@
     }
     PrintF(" - took %0.3f ms]\n", ms);
   }
-  if (produce_cached_parse_data()) {
-    if (result != NULL) *info->cached_data() = recorder.GetScriptData();
-    log_ = NULL;
+  if (produce_cached_parse_data() && result != nullptr) {
+    *info->cached_data() = logger.GetScriptData();
   }
+  log_ = nullptr;
   return result;
 }
 
@@ -762,7 +730,7 @@
   DCHECK_NULL(scope_state_);
   DCHECK_NULL(target_stack_);
 
-  Mode parsing_mode = allow_lazy() ? PARSE_LAZILY : PARSE_EAGERLY;
+  ParsingModeScope mode(this, allow_lazy() ? PARSE_LAZILY : PARSE_EAGERLY);
 
   FunctionLiteral* result = NULL;
   {
@@ -770,25 +738,16 @@
     DCHECK_NOT_NULL(outer);
     parsing_module_ = info->is_module();
     if (info->is_eval()) {
-      if (!outer->is_script_scope() || is_strict(info->language_mode())) {
-        parsing_mode = PARSE_EAGERLY;
-      }
       outer = NewEvalScope(outer);
     } else if (parsing_module_) {
       DCHECK_EQ(outer, info->script_scope());
       outer = NewModuleScope(info->script_scope());
-      // Never do lazy parsing in modules.  If we want to support this in the
-      // future, we must force context-allocation for all variables that are
-      // declared at the module level but not MODULE-allocated.
-      parsing_mode = PARSE_EAGERLY;
     }
 
     DeclarationScope* scope = outer->AsDeclarationScope();
 
     scope->set_start_position(0);
 
-    // Enter 'scope' with the given parsing mode.
-    ParsingModeScope parsing_mode_scope(this, parsing_mode);
     FunctionState function_state(&function_state_, &scope_state_, scope);
 
     ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
@@ -868,14 +827,13 @@
   return result;
 }
 
-
-FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info) {
+FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info) {
   // It's OK to use the Isolate & counters here, since this function is only
   // called in the main thread.
   DCHECK(parsing_on_main_thread_);
-  RuntimeCallTimerScope runtime_timer(isolate, &RuntimeCallStats::ParseLazy);
-  HistogramTimerScope timer_scope(isolate->counters()->parse_lazy());
-  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.ParseLazy");
+  RuntimeCallTimerScope runtime_timer(runtime_call_stats_,
+                                      &RuntimeCallStats::ParseFunction);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.ParseFunction");
   Handle<String> source(String::cast(info->script()->source()));
   isolate->counters()->total_parse_size()->Increment(source->length());
   base::ElapsedTimer timer;
@@ -892,8 +850,8 @@
     std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
         source, shared_info->start_position(), shared_info->end_position()));
     Handle<String> name(String::cast(shared_info->name()));
-    result =
-        DoParseLazy(info, ast_value_factory()->GetString(name), stream.get());
+    result = DoParseFunction(info, ast_value_factory()->GetString(name),
+                             stream.get());
     if (result != nullptr) {
       Handle<String> inferred_name(shared_info->inferred_name());
       result->set_inferred_name(inferred_name);
@@ -922,9 +880,9 @@
   return FunctionLiteral::kAnonymousExpression;
 }
 
-FunctionLiteral* Parser::DoParseLazy(ParseInfo* info,
-                                     const AstRawString* raw_name,
-                                     Utf16CharacterStream* source) {
+FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
+                                         const AstRawString* raw_name,
+                                         Utf16CharacterStream* source) {
   scanner_.Initialize(source);
   DCHECK_NULL(scope_state_);
   DCHECK_NULL(target_stack_);
@@ -1130,15 +1088,19 @@
     }
     const AstRawString* local_name = ParseIdentifierName(CHECK_OK_VOID);
     const AstRawString* export_name = NULL;
+    Scanner::Location location = scanner()->location();
     if (CheckContextualKeyword(CStrVector("as"))) {
       export_name = ParseIdentifierName(CHECK_OK_VOID);
+      // Set the location to the whole "a as b" string, so that it makes sense
+      // both for errors due to "a" and for errors due to "b".
+      location.end_pos = scanner()->location().end_pos;
     }
     if (export_name == NULL) {
       export_name = local_name;
     }
     export_names->Add(export_name, zone());
     local_names->Add(local_name, zone());
-    export_locations->Add(scanner()->location(), zone());
+    export_locations->Add(location, zone());
     if (peek() == Token::RBRACE) break;
     Expect(Token::COMMA, CHECK_OK_VOID);
   }
@@ -1168,6 +1130,7 @@
   while (peek() != Token::RBRACE) {
     const AstRawString* import_name = ParseIdentifierName(CHECK_OK);
     const AstRawString* local_name = import_name;
+    Scanner::Location location = scanner()->location();
     // In the presence of 'as', the left-side of the 'as' can
     // be any IdentifierName. But without 'as', it must be a valid
     // BindingIdentifier.
@@ -1188,8 +1151,8 @@
     DeclareVariable(local_name, CONST, kNeedsInitialization, position(),
                     CHECK_OK);
 
-    NamedImport* import = new (zone()) NamedImport(
-        import_name, local_name, scanner()->location());
+    NamedImport* import =
+        new (zone()) NamedImport(import_name, local_name, location);
     result->Add(import, zone());
 
     if (peek() == Token::RBRACE) break;
@@ -1377,21 +1340,23 @@
   //    'export' Declaration
   //    'export' 'default' ... (handled in ParseExportDefault)
 
-  int pos = peek_position();
   Expect(Token::EXPORT, CHECK_OK);
+  int pos = position();
 
   Statement* result = nullptr;
   ZoneList<const AstRawString*> names(1, zone());
+  Scanner::Location loc = scanner()->peek_location();
   switch (peek()) {
     case Token::DEFAULT:
       return ParseExportDefault(ok);
 
     case Token::MUL: {
       Consume(Token::MUL);
+      loc = scanner()->location();
       ExpectContextualKeyword(CStrVector("from"), CHECK_OK);
       const AstRawString* module_specifier = ParseModuleSpecifier(CHECK_OK);
       ExpectSemicolon(CHECK_OK);
-      module()->AddStarExport(module_specifier, scanner()->location(), zone());
+      module()->AddStarExport(module_specifier, loc, zone());
       return factory()->NewEmptyStatement(pos);
     }
 
@@ -1472,11 +1437,11 @@
       ReportUnexpectedToken(scanner()->current_token());
       return nullptr;
   }
+  loc.end_pos = scanner()->location().end_pos;
 
   ModuleDescriptor* descriptor = module();
   for (int i = 0; i < names.length(); ++i) {
-    // TODO(neis): Provide better location.
-    descriptor->AddExport(names[i], names[i], scanner()->location(), zone());
+    descriptor->AddExport(names[i], names[i], loc, zone());
   }
 
   DCHECK_NOT_NULL(result);
@@ -1484,13 +1449,12 @@
 }
 
 VariableProxy* Parser::NewUnresolved(const AstRawString* name, int begin_pos,
-                                     int end_pos, VariableKind kind) {
-  return scope()->NewUnresolved(factory(), name, begin_pos, end_pos, kind);
+                                     VariableKind kind) {
+  return scope()->NewUnresolved(factory(), name, begin_pos, kind);
 }
 
 VariableProxy* Parser::NewUnresolved(const AstRawString* name) {
-  return scope()->NewUnresolved(factory(), name, scanner()->location().beg_pos,
-                                scanner()->location().end_pos);
+  return scope()->NewUnresolved(factory(), name, scanner()->location().beg_pos);
 }
 
 Declaration* Parser::DeclareVariable(const AstRawString* name,
@@ -1504,18 +1468,19 @@
                                      int pos, bool* ok) {
   DCHECK_NOT_NULL(name);
   VariableProxy* proxy = factory()->NewVariableProxy(
-      name, NORMAL_VARIABLE, scanner()->location().beg_pos,
-      scanner()->location().end_pos);
+      name, NORMAL_VARIABLE, scanner()->location().beg_pos);
   Declaration* declaration =
       factory()->NewVariableDeclaration(proxy, this->scope(), pos);
-  Declare(declaration, DeclarationDescriptor::NORMAL, mode, init, CHECK_OK);
+  Declare(declaration, DeclarationDescriptor::NORMAL, mode, init, ok, nullptr,
+          scanner()->location().end_pos);
+  if (!*ok) return nullptr;
   return declaration;
 }
 
 Variable* Parser::Declare(Declaration* declaration,
                           DeclarationDescriptor::Kind declaration_kind,
                           VariableMode mode, InitializationFlag init, bool* ok,
-                          Scope* scope) {
+                          Scope* scope, int var_end_pos) {
   if (scope == nullptr) {
     scope = this->scope();
   }
@@ -1524,11 +1489,18 @@
       declaration, mode, init, allow_harmony_restrictive_generators(),
       &sloppy_mode_block_scope_function_redefinition, ok);
   if (!*ok) {
+    // If we only have the start position of a proxy, we can't highlight the
+    // whole variable name.  Pretend its length is 1 so that we highlight at
+    // least the first character.
+    Scanner::Location loc(declaration->proxy()->position(),
+                          var_end_pos != kNoSourcePosition
+                              ? var_end_pos
+                              : declaration->proxy()->position() + 1);
     if (declaration_kind == DeclarationDescriptor::NORMAL) {
-      ReportMessage(MessageTemplate::kVarRedeclaration,
-                    declaration->proxy()->raw_name());
+      ReportMessageAt(loc, MessageTemplate::kVarRedeclaration,
+                      declaration->proxy()->raw_name());
     } else {
-      ReportMessage(MessageTemplate::kParamDupe);
+      ReportMessageAt(loc, MessageTemplate::kParamDupe);
     }
     return nullptr;
   }
@@ -1947,7 +1919,6 @@
       for_info.parsing_result.declarations[0];
   if (!IsLexicalVariableMode(for_info.parsing_result.descriptor.mode) &&
       decl.pattern->IsVariableProxy() && decl.initializer != nullptr) {
-    DCHECK(!allow_harmony_for_in());
     ++use_counts_[v8::Isolate::kForInInitializer];
     const AstRawString* name = decl.pattern->AsVariableProxy()->raw_name();
     VariableProxy* single_var = NewUnresolved(name);
@@ -2034,8 +2005,7 @@
 
   *body_block = factory()->NewBlock(nullptr, 3, false, kNoSourcePosition);
   (*body_block)->statements()->Add(each_initialization_block, zone());
-  *each_variable = factory()->NewVariableProxy(temp, for_info->each_loc.beg_pos,
-                                               for_info->each_loc.end_pos);
+  *each_variable = factory()->NewVariableProxy(temp, for_info->position);
 }
 
 // Create a TDZ for any lexically-bound names in for in/of statements.
@@ -2483,7 +2453,7 @@
   AddArrowFunctionFormalParameters(parameters, expr, params_loc.end_pos,
                                    CHECK_OK_VOID);
 
-  if (parameters->Arity() > Code::kMaxArguments) {
+  if (parameters->arity > Code::kMaxArguments) {
     ReportMessageAt(params_loc, MessageTemplate::kMalformedArrowFunParamList);
     *ok = false;
     return;
@@ -2493,7 +2463,7 @@
   if (!parameters->is_simple) {
     this->classifier()->RecordNonSimpleParameter();
   }
-  for (int i = 0; i < parameters->Arity(); ++i) {
+  for (int i = 0; i < parameters->arity; ++i) {
     auto parameter = parameters->at(i);
     DeclareFormalParameter(parameters->scope, parameter);
     if (!this->classifier()
@@ -2551,8 +2521,6 @@
   int pos = function_token_pos == kNoSourcePosition ? peek_position()
                                                     : function_token_pos;
 
-  bool is_generator = IsGeneratorFunction(kind);
-
   // Anonymous functions were passed either the empty symbol or a null
   // handle as the function name.  Remember if we were passed a non-empty
   // handle to decide whether to invoke function name inference.
@@ -2566,7 +2534,7 @@
   FunctionLiteral::EagerCompileHint eager_compile_hint =
       function_state_->next_function_is_parenthesized()
           ? FunctionLiteral::kShouldEagerCompile
-          : FunctionLiteral::kShouldLazyCompile;
+          : default_eager_compile_hint();
 
   // Determine if the function can be parsed lazily. Lazy parsing is
   // different from lazy compilation; we need to parse more eagerly than we
@@ -2600,14 +2568,18 @@
   // parenthesis before the function means that it will be called
   // immediately). bar can be parsed lazily, but we need to parse it in a mode
   // that tracks unresolved variables.
-  DCHECK_IMPLIES(mode() == PARSE_LAZILY, FLAG_lazy);
-  DCHECK_IMPLIES(mode() == PARSE_LAZILY, allow_lazy());
-  DCHECK_IMPLIES(mode() == PARSE_LAZILY, extension_ == nullptr);
+  DCHECK_IMPLIES(parse_lazily(), FLAG_lazy);
+  DCHECK_IMPLIES(parse_lazily(), allow_lazy());
+  DCHECK_IMPLIES(parse_lazily(), extension_ == nullptr);
+
+  bool can_preparse = parse_lazily() &&
+                      eager_compile_hint == FunctionLiteral::kShouldLazyCompile;
 
   bool is_lazy_top_level_function =
-      mode() == PARSE_LAZILY &&
-      eager_compile_hint == FunctionLiteral::kShouldLazyCompile &&
-      scope()->AllowsLazyParsingWithoutUnresolvedVariables();
+      can_preparse && impl()->AllowsLazyParsingWithoutUnresolvedVariables();
+
+  RuntimeCallTimerScope runtime_timer(runtime_call_stats_,
+                                      &RuntimeCallStats::ParseFunctionLiteral);
 
   // Determine whether we can still lazy parse the inner function.
   // The preconditions are:
@@ -2629,8 +2601,11 @@
   // will migrate unresolved variable into a Scope in the main Zone.
   // TODO(marja): Refactor parsing modes: simplify this.
   bool use_temp_zone =
-      allow_lazy() && function_type == FunctionLiteral::kDeclaration &&
-      eager_compile_hint != FunctionLiteral::kShouldEagerCompile &&
+      (FLAG_lazy_inner_functions
+           ? can_preparse
+           : (is_lazy_top_level_function ||
+              (allow_lazy() && function_type == FunctionLiteral::kDeclaration &&
+               eager_compile_hint == FunctionLiteral::kShouldLazyCompile))) &&
       !(FLAG_validate_asm && scope()->IsAsmModule());
   bool is_lazy_inner_function =
       use_temp_zone && FLAG_lazy_inner_functions && !is_lazy_top_level_function;
@@ -2638,40 +2613,20 @@
   // This Scope lives in the main zone. We'll migrate data into that zone later.
   DeclarationScope* scope = NewFunctionScope(kind);
   SetLanguageMode(scope, language_mode);
-
-  ZoneList<Statement*>* body = nullptr;
-  int arity = -1;
-  int materialized_literal_count = -1;
-  int expected_property_count = -1;
-  DuplicateFinder duplicate_finder(scanner()->unicode_cache());
-  bool should_be_used_once_hint = false;
-  bool has_duplicate_parameters;
-
-  FunctionState function_state(&function_state_, &scope_state_, scope);
 #ifdef DEBUG
   scope->SetScopeName(function_name);
 #endif
 
-  ExpressionClassifier formals_classifier(this, &duplicate_finder);
-
-  if (is_generator) PrepareGeneratorVariables(&function_state);
+  ZoneList<Statement*>* body = nullptr;
+  int materialized_literal_count = -1;
+  int expected_property_count = -1;
+  bool should_be_used_once_hint = false;
+  int num_parameters = -1;
+  int function_length = -1;
+  bool has_duplicate_parameters = false;
 
   Expect(Token::LPAREN, CHECK_OK);
-  int start_position = scanner()->location().beg_pos;
-  this->scope()->set_start_position(start_position);
-  ParserFormalParameters formals(scope);
-  ParseFormalParameterList(&formals, CHECK_OK);
-  arity = formals.Arity();
-  Expect(Token::RPAREN, CHECK_OK);
-  int formals_end_position = scanner()->location().end_pos;
-
-  CheckArityRestrictions(arity, kind, formals.has_rest, start_position,
-                         formals_end_position, CHECK_OK);
-  Expect(Token::LBRACE, CHECK_OK);
-  // Don't include the rest parameter into the function's formal parameter
-  // count (esp. the SharedFunctionInfo::internal_formal_parameter_count,
-  // which says whether we need to create an arguments adaptor frame).
-  if (formals.has_rest) arity--;
+  scope->set_start_position(scanner()->location().beg_pos);
 
   {
     // Temporary zones can nest. When we migrate free variables (see below), we
@@ -2684,26 +2639,25 @@
     // the previous zone is always restored after parsing the body. To be able
     // to do scope analysis correctly after full parsing, we migrate needed
     // information when the function is parsed.
-    Zone temp_zone(zone()->allocator());
+    Zone temp_zone(zone()->allocator(), ZONE_NAME);
     DiscardableZoneScope zone_scope(this, &temp_zone, use_temp_zone);
 #ifdef DEBUG
     if (use_temp_zone) scope->set_needs_migration();
 #endif
 
     // Eager or lazy parse? If is_lazy_top_level_function, we'll parse
-    // lazily. We'll call SkipLazyFunctionBody, which may decide to abort lazy
-    // parsing if it suspects that wasn't a good idea. If so (in which case the
-    // parser is expected to have backtracked), or if we didn't try to lazy
-    // parse in the first place, we'll have to parse eagerly.
+    // lazily. We'll call SkipFunction, which may decide to
+    // abort lazy parsing if it suspects that wasn't a good idea. If so (in
+    // which case the parser is expected to have backtracked), or if we didn't
+    // try to lazy parse in the first place, we'll have to parse eagerly.
     if (is_lazy_top_level_function || is_lazy_inner_function) {
       Scanner::BookmarkScope bookmark(scanner());
       bookmark.Set();
-      LazyParsingResult result = SkipLazyFunctionBody(
-          &materialized_literal_count, &expected_property_count,
-          is_lazy_inner_function, is_lazy_top_level_function, CHECK_OK);
-
-      materialized_literal_count += formals.materialized_literals_count +
-                                    function_state.materialized_literal_count();
+      LazyParsingResult result =
+          SkipFunction(kind, scope, &num_parameters, &function_length,
+                       &has_duplicate_parameters, &materialized_literal_count,
+                       &expected_property_count, is_lazy_inner_function,
+                       is_lazy_top_level_function, CHECK_OK);
 
       if (result == kLazyParsingAborted) {
         DCHECK(is_lazy_top_level_function);
@@ -2723,31 +2677,41 @@
     }
 
     if (!is_lazy_top_level_function && !is_lazy_inner_function) {
-      body = ParseEagerFunctionBody(function_name, pos, formals, kind,
-                                    function_type, CHECK_OK);
-
-      materialized_literal_count = function_state.materialized_literal_count();
-      expected_property_count = function_state.expected_property_count();
+      body = ParseFunction(
+          function_name, pos, kind, function_type, scope, &num_parameters,
+          &function_length, &has_duplicate_parameters,
+          &materialized_literal_count, &expected_property_count, CHECK_OK);
     }
 
-    if (use_temp_zone || is_lazy_top_level_function) {
+    DCHECK(use_temp_zone || !is_lazy_top_level_function);
+    if (use_temp_zone) {
       // If the preconditions are correct the function body should never be
       // accessed, but do this anyway for better behaviour if they're wrong.
       body = nullptr;
       scope->AnalyzePartially(&previous_zone_ast_node_factory);
     }
 
-    // Parsing the body may change the language mode in our scope.
-    language_mode = scope->language_mode();
+    if (FLAG_trace_preparse) {
+      PrintF("  [%s]: %i-%i %.*s\n",
+             is_lazy_top_level_function
+                 ? "Preparse no-resolution"
+                 : (use_temp_zone ? "Preparse resolution" : "Full parse"),
+             scope->start_position(), scope->end_position(),
+             function_name->byte_length(), function_name->raw_data());
+      if (is_lazy_top_level_function) {
+        CHANGE_CURRENT_RUNTIME_COUNTER(runtime_call_stats_,
+                                       PreParseNoVariableResolution);
+      } else if (use_temp_zone) {
+        CHANGE_CURRENT_RUNTIME_COUNTER(runtime_call_stats_,
+                                       PreParseWithVariableResolution);
+      }
+    }
 
-    // Validate name and parameter names. We can do this only after parsing the
-    // function, since the function can declare itself strict.
+    // Validate function name. We can do this only after parsing the function,
+    // since the function can declare itself strict.
+    language_mode = scope->language_mode();
     CheckFunctionName(language_mode, function_name, function_name_validity,
                       function_name_location, CHECK_OK);
-    const bool allow_duplicate_parameters =
-        is_sloppy(language_mode) && formals.is_simple && !IsConciseMethod(kind);
-    ValidateFormalParameters(language_mode, allow_duplicate_parameters,
-                             CHECK_OK);
 
     if (is_strict(language_mode)) {
       CheckStrictOctalLiteral(scope->start_position(), scope->end_position(),
@@ -2756,13 +2720,6 @@
                                          scope->end_position());
     }
     CheckConflictingVarDeclarations(scope, CHECK_OK);
-
-    if (body) {
-      // If body can be inspected, rewrite queued destructuring assignments
-      RewriteDestructuringAssignments();
-    }
-    has_duplicate_parameters =
-        !classifier()->is_valid_formal_parameter_list_without_duplicates();
   }  // DiscardableZoneScope goes out of scope.
 
   FunctionLiteral::ParameterFlag duplicate_parameters =
@@ -2772,8 +2729,8 @@
   // Note that the FunctionLiteral needs to be created in the main Zone again.
   FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
       function_name, scope, body, materialized_literal_count,
-      expected_property_count, arity, duplicate_parameters, function_type,
-      eager_compile_hint, pos);
+      expected_property_count, num_parameters, function_length,
+      duplicate_parameters, function_type, eager_compile_hint, pos, true);
   function_literal->set_function_token_position(function_token_pos);
   if (should_be_used_once_hint)
     function_literal->set_should_be_used_once_hint();
@@ -2785,44 +2742,72 @@
   return function_literal;
 }
 
-Parser::LazyParsingResult Parser::SkipLazyFunctionBody(
+Parser::LazyParsingResult Parser::SkipFunction(
+    FunctionKind kind, DeclarationScope* function_scope, int* num_parameters,
+    int* function_length, bool* has_duplicate_parameters,
     int* materialized_literal_count, int* expected_property_count,
     bool is_inner_function, bool may_abort, bool* ok) {
+  DCHECK_NE(kNoSourcePosition, function_scope->start_position());
   if (produce_cached_parse_data()) CHECK(log_);
 
-  int function_block_pos = position();
-  DeclarationScope* scope = function_state_->scope();
-  DCHECK(scope->is_function_scope());
+  DCHECK_IMPLIES(IsArrowFunction(kind),
+                 scanner()->current_token() == Token::ARROW);
+
   // Inner functions are not part of the cached data.
   if (!is_inner_function && consume_cached_parse_data() &&
       !cached_parse_data_->rejected()) {
-    // If we have cached data, we use it to skip parsing the function body. The
-    // data contains the information we need to construct the lazy function.
+    // If we have cached data, we use it to skip parsing the function. The data
+    // contains the information we need to construct the lazy function.
     FunctionEntry entry =
-        cached_parse_data_->GetFunctionEntry(function_block_pos);
+        cached_parse_data_->GetFunctionEntry(function_scope->start_position());
     // Check that cached data is valid. If not, mark it as invalid (the embedder
     // handles it). Note that end position greater than end of stream is safe,
     // and hard to check.
-    if (entry.is_valid() && entry.end_pos() > function_block_pos) {
+    if (entry.is_valid() &&
+        entry.end_pos() > function_scope->start_position()) {
+      total_preparse_skipped_ += entry.end_pos() - position();
+      function_scope->set_end_position(entry.end_pos());
       scanner()->SeekForward(entry.end_pos() - 1);
-
-      scope->set_end_position(entry.end_pos());
       Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
-      total_preparse_skipped_ += scope->end_position() - function_block_pos;
+      *num_parameters = entry.num_parameters();
+      *function_length = entry.function_length();
+      *has_duplicate_parameters = entry.has_duplicate_parameters();
       *materialized_literal_count = entry.literal_count();
       *expected_property_count = entry.property_count();
-      SetLanguageMode(scope, entry.language_mode());
-      if (entry.uses_super_property()) scope->RecordSuperPropertyUsage();
-      if (entry.calls_eval()) scope->RecordEvalCall();
+      SetLanguageMode(function_scope, entry.language_mode());
+      if (entry.uses_super_property())
+        function_scope->RecordSuperPropertyUsage();
+      if (entry.calls_eval()) function_scope->RecordEvalCall();
       return kLazyParsingComplete;
     }
     cached_parse_data_->Reject();
   }
+
   // With no cached data, we partially parse the function, without building an
   // AST. This gathers the data needed to build a lazy function.
-  SingletonLogger logger;
-  PreParser::PreParseResult result =
-      ParseLazyFunctionBodyWithPreParser(&logger, is_inner_function, may_abort);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.PreParse");
+
+  if (reusable_preparser_ == NULL) {
+    reusable_preparser_ = new PreParser(zone(), &scanner_, ast_value_factory(),
+                                        &pending_error_handler_,
+                                        runtime_call_stats_, stack_limit_);
+    reusable_preparser_->set_allow_lazy(true);
+#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
+    SET_ALLOW(natives);
+    SET_ALLOW(harmony_do_expressions);
+    SET_ALLOW(harmony_function_sent);
+    SET_ALLOW(harmony_async_await);
+    SET_ALLOW(harmony_trailing_commas);
+    SET_ALLOW(harmony_class_fields);
+#undef SET_ALLOW
+  }
+  // Aborting inner function preparsing would leave scopes in an inconsistent
+  // state; we don't parse inner functions in the abortable mode anyway.
+  DCHECK(!is_inner_function || !may_abort);
+
+  PreParser::PreParseResult result = reusable_preparser_->PreParseFunction(
+      kind, function_scope, parsing_module_, is_inner_function, may_abort,
+      use_counts_);
 
   // Return immediately if pre-parser decided to abort parsing.
   if (result == PreParser::kPreParseAbort) return kLazyParsingAborted;
@@ -2832,28 +2817,27 @@
     *ok = false;
     return kLazyParsingComplete;
   }
-  if (logger.has_error()) {
-    ReportMessageAt(Scanner::Location(logger.start(), logger.end()),
-                    logger.message(), logger.argument_opt(),
-                    logger.error_type());
+  if (pending_error_handler_.has_pending_error()) {
     *ok = false;
     return kLazyParsingComplete;
   }
-  scope->set_end_position(logger.end());
+  PreParserLogger* logger = reusable_preparser_->logger();
+  function_scope->set_end_position(logger->end());
   Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
-  total_preparse_skipped_ += scope->end_position() - function_block_pos;
-  *materialized_literal_count = logger.literals();
-  *expected_property_count = logger.properties();
-  SetLanguageMode(scope, logger.language_mode());
-  if (logger.uses_super_property()) scope->RecordSuperPropertyUsage();
-  if (logger.calls_eval()) scope->RecordEvalCall();
+  total_preparse_skipped_ +=
+      function_scope->end_position() - function_scope->start_position();
+  *num_parameters = logger->num_parameters();
+  *function_length = logger->function_length();
+  *has_duplicate_parameters = logger->has_duplicate_parameters();
+  *materialized_literal_count = logger->literals();
+  *expected_property_count = logger->properties();
   if (!is_inner_function && produce_cached_parse_data()) {
     DCHECK(log_);
-    // Position right after terminal '}'.
-    int body_end = scanner()->location().end_pos;
-    log_->LogFunction(function_block_pos, body_end, *materialized_literal_count,
-                      *expected_property_count, language_mode(),
-                      scope->uses_super_property(), scope->calls_eval());
+    log_->LogFunction(
+        function_scope->start_position(), function_scope->end_position(),
+        *num_parameters, *function_length, *has_duplicate_parameters,
+        *materialized_literal_count, *expected_property_count, language_mode(),
+        function_scope->uses_super_property(), function_scope->calls_eval());
   }
   return kLazyParsingComplete;
 }
@@ -3126,15 +3110,57 @@
                              Yield::kOnExceptionThrow);
 }
 
+ZoneList<Statement*>* Parser::ParseFunction(
+    const AstRawString* function_name, int pos, FunctionKind kind,
+    FunctionLiteral::FunctionType function_type,
+    DeclarationScope* function_scope, int* num_parameters, int* function_length,
+    bool* has_duplicate_parameters, int* materialized_literal_count,
+    int* expected_property_count, bool* ok) {
+  FunctionState function_state(&function_state_, &scope_state_, function_scope);
+
+  DuplicateFinder duplicate_finder(scanner()->unicode_cache());
+  ExpressionClassifier formals_classifier(this, &duplicate_finder);
+
+  if (IsGeneratorFunction(kind)) PrepareGeneratorVariables(&function_state);
+
+  ParserFormalParameters formals(function_scope);
+  ParseFormalParameterList(&formals, CHECK_OK);
+  Expect(Token::RPAREN, CHECK_OK);
+  int formals_end_position = scanner()->location().end_pos;
+  *num_parameters = formals.num_parameters();
+  *function_length = formals.function_length;
+
+  CheckArityRestrictions(formals.arity, kind, formals.has_rest,
+                         function_scope->start_position(), formals_end_position,
+                         CHECK_OK);
+  Expect(Token::LBRACE, CHECK_OK);
+
+  ZoneList<Statement*>* body = ParseEagerFunctionBody(
+      function_name, pos, formals, kind, function_type, ok);
+
+  // Validate parameter names. We can do this only after parsing the function,
+  // since the function can declare itself strict.
+  const bool allow_duplicate_parameters =
+      is_sloppy(function_scope->language_mode()) && formals.is_simple &&
+      !IsConciseMethod(kind);
+  ValidateFormalParameters(function_scope->language_mode(),
+                           allow_duplicate_parameters, CHECK_OK);
+
+  RewriteDestructuringAssignments();
+
+  *has_duplicate_parameters =
+      !classifier()->is_valid_formal_parameter_list_without_duplicates();
+
+  *materialized_literal_count = function_state.materialized_literal_count();
+  *expected_property_count = function_state.expected_property_count();
+  return body;
+}
+
 ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
     const AstRawString* function_name, int pos,
     const ParserFormalParameters& parameters, FunctionKind kind,
     FunctionLiteral::FunctionType function_type, bool* ok) {
-  // Everything inside an eagerly parsed function will be parsed eagerly (see
-  // comment above). Lazy inner functions are handled separately and they won't
-  // require the mode to be PARSE_LAZILY (see ParseFunctionLiteral).
-  // TODO(marja): Refactor parsing modes: remove this.
-  ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
+  ParsingModeScope mode(this, allow_lazy() ? PARSE_LAZILY : PARSE_EAGERLY);
   ZoneList<Statement*>* result = new(zone()) ZoneList<Statement*>(8, zone());
 
   static const int kFunctionNameAssignmentIndex = 0;
@@ -3286,46 +3312,6 @@
   return result;
 }
 
-PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
-    SingletonLogger* logger, bool is_inner_function, bool may_abort) {
-  // This function may be called on a background thread too; record only the
-  // main thread preparse times.
-  if (pre_parse_timer_ != NULL) {
-    pre_parse_timer_->Start();
-  }
-  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.PreParse");
-
-  DCHECK_EQ(Token::LBRACE, scanner()->current_token());
-
-  if (reusable_preparser_ == NULL) {
-    reusable_preparser_ = new PreParser(zone(), &scanner_, ast_value_factory(),
-                                        NULL, stack_limit_);
-    reusable_preparser_->set_allow_lazy(true);
-#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
-    SET_ALLOW(natives);
-    SET_ALLOW(harmony_do_expressions);
-    SET_ALLOW(harmony_for_in);
-    SET_ALLOW(harmony_function_sent);
-    SET_ALLOW(harmony_restrictive_declarations);
-    SET_ALLOW(harmony_async_await);
-    SET_ALLOW(harmony_trailing_commas);
-    SET_ALLOW(harmony_class_fields);
-#undef SET_ALLOW
-  }
-  // Aborting inner function preparsing would leave scopes in an inconsistent
-  // state; we don't parse inner functions in the abortable mode anyway.
-  DCHECK(!is_inner_function || !may_abort);
-
-  DeclarationScope* function_scope = function_state_->scope();
-  PreParser::PreParseResult result = reusable_preparser_->PreParseLazyFunction(
-      function_scope, parsing_module_, logger, is_inner_function, may_abort,
-      use_counts_);
-  if (pre_parse_timer_ != NULL) {
-    pre_parse_timer_->Stop();
-  }
-  return result;
-}
-
 Expression* Parser::InstallHomeObject(Expression* function_literal,
                                       Expression* home_object) {
   Block* do_block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
@@ -3426,12 +3412,12 @@
   FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
       ast_value_factory()->empty_string(), initializer_scope, body,
       initializer_state.materialized_literal_count(),
-      initializer_state.expected_property_count(), 0,
+      initializer_state.expected_property_count(), 0, count,
       FunctionLiteral::kNoDuplicateParameters,
       FunctionLiteral::kAnonymousExpression,
-      FunctionLiteral::kShouldLazyCompile, initializer_scope->start_position());
+      FunctionLiteral::kShouldLazyCompile, initializer_scope->start_position(),
+      true);
   function_literal->set_is_class_field_initializer(true);
-  function_literal->scope()->set_arity(count);
   return function_literal;
 }
 
@@ -3442,7 +3428,7 @@
           constructor->scope(),
           constructor->scope()->NewUnresolved(
               factory(), ast_value_factory()->this_string(), kNoSourcePosition,
-              kNoSourcePosition + 4, THIS_VARIABLE)),
+              THIS_VARIABLE)),
       kNoSourcePosition);
   constructor->body()->InsertAt(0, call_initializer, zone());
   return constructor;
@@ -3636,6 +3622,7 @@
   }
   do_block->set_scope(scope()->FinalizeBlockScope());
   do_expr->set_represented_function(class_info->constructor);
+  AddFunctionForNameInference(class_info->constructor);
 
   return do_expr;
 }
@@ -3668,10 +3655,8 @@
   DCHECK(inner_scope->is_declaration_scope());
   Scope* function_scope = inner_scope->outer_scope();
   DCHECK(function_scope->is_function_scope());
-  ZoneList<Declaration*>* decls = inner_scope->declarations();
   BlockState block_state(&scope_state_, inner_scope);
-  for (int i = 0; i < decls->length(); ++i) {
-    Declaration* decl = decls->at(i);
+  for (Declaration* decl : *inner_scope->declarations()) {
     if (decl->proxy()->var()->mode() != VAR || !decl->IsVariableDeclaration()) {
       continue;
     }
@@ -3781,6 +3766,15 @@
   }
   isolate->counters()->total_preparse_skipped()->Increment(
       total_preparse_skipped_);
+  if (!parsing_on_main_thread_ &&
+      FLAG_runtime_stats ==
+          v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE) {
+    // Copy over the counters from the background thread to the main counters on
+    // the isolate.
+    // TODO(cbruni,lpy): properly attach the runtime stats to the trace for
+    // background parsing.
+    isolate->counters()->runtime_call_stats()->Add(runtime_call_stats_);
+  }
 }
 
 
@@ -3804,18 +3798,12 @@
   // Ok to use Isolate here; this function is only called in the main thread.
   DCHECK(parsing_on_main_thread_);
   Isolate* isolate = info->isolate();
-  pre_parse_timer_ = isolate->counters()->pre_parse();
 
-  if (info->is_lazy()) {
-    DCHECK(!info->is_eval());
-    if (info->shared_info()->is_function()) {
-      result = ParseLazy(isolate, info);
-    } else {
-      result = ParseProgram(isolate, info);
-    }
-  } else {
+  if (info->is_toplevel()) {
     SetCachedData(info);
     result = ParseProgram(isolate, info);
+  } else {
+    result = ParseFunction(isolate, info);
   }
   info->set_literal(result);
 
@@ -3830,8 +3818,12 @@
   DCHECK(info->literal() == NULL);
   FunctionLiteral* result = NULL;
 
-  CompleteParserRecorder recorder;
-  if (produce_cached_parse_data()) log_ = &recorder;
+  ParserLogger logger;
+  if (produce_cached_parse_data()) log_ = &logger;
+  if (FLAG_runtime_stats) {
+    // Create separate runtime stats for background parsing.
+    runtime_call_stats_ = new (zone()) RuntimeCallStats();
+  }
 
   std::unique_ptr<Utf16CharacterStream> stream;
   Utf16CharacterStream* stream_ptr;
@@ -3854,12 +3846,12 @@
   // don't). We work around this by storing all the scopes which need their end
   // position set at the end of the script (the top scope and possible eval
   // scopes) and set their end position after we know the script length.
-  if (info->is_lazy()) {
-    result = DoParseLazy(info, info->function_name(), stream_ptr);
-  } else {
+  if (info->is_toplevel()) {
     fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
     scanner_.Initialize(stream_ptr);
     result = DoParseProgram(info);
+  } else {
+    result = DoParseFunction(info, info->function_name(), stream_ptr);
   }
 
   info->set_literal(result);
@@ -3868,9 +3860,13 @@
   // care of calling Parser::Internalize just before compilation.
 
   if (produce_cached_parse_data()) {
-    if (result != NULL) *info->cached_data() = recorder.GetScriptData();
+    if (result != NULL) *info->cached_data() = logger.GetScriptData();
     log_ = NULL;
   }
+  if (FLAG_runtime_stats) {
+    // TODO(cbruni,lpy): properly attach the runtime stats to the trace for
+    // background parsing.
+  }
 }
 
 Parser::TemplateLiteralState Parser::OpenTemplateLiteral(int pos) {
@@ -3943,9 +3939,9 @@
             const_cast<ZoneList<Expression*>*>(raw_strings), raw_idx, pos),
         zone());
 
-    // Ensure hash is suitable as a Smi value
+    // Truncate hash to Smi-range.
     Smi* hash_obj = Smi::cast(Internals::IntToSmi(static_cast<int>(hash)));
-    args->Add(factory()->NewSmiLiteral(hash_obj->value(), pos), zone());
+    args->Add(factory()->NewNumberLiteral(hash_obj->value(), pos), zone());
 
     Expression* call_site = factory()->NewCallRuntime(
         Context::GET_TEMPLATE_CALL_SITE_INDEX, args, start);
@@ -4002,7 +3998,7 @@
     ZoneList<Expression*>* spread_list =
         new (zone()) ZoneList<Expression*>(0, zone());
     spread_list->Add(list->at(0)->AsSpread()->expression(), zone());
-    args->Add(factory()->NewCallRuntime(Context::SPREAD_ITERABLE_INDEX,
+    args->Add(factory()->NewCallRuntime(Runtime::kSpreadIterablePrepare,
                                         spread_list, kNoSourcePosition),
               zone());
     return args;
@@ -4338,8 +4334,7 @@
 
     Expression* result;
     DCHECK_NOT_NULL(lhs->raw_name());
-    result = ExpressionFromIdentifier(lhs->raw_name(), lhs->position(),
-                                      lhs->end_position());
+    result = ExpressionFromIdentifier(lhs->raw_name(), lhs->position());
     args->Add(left, zone());
     args->Add(right, zone());
     Expression* call =
diff --git a/src/parsing/parser.h b/src/parsing/parser.h
index 418bedf..736419d 100644
--- a/src/parsing/parser.h
+++ b/src/parsing/parser.h
@@ -7,9 +7,11 @@
 
 #include "src/ast/ast.h"
 #include "src/ast/scopes.h"
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
 #include "src/parsing/parser-base.h"
-#include "src/parsing/preparse-data.h"
 #include "src/parsing/preparse-data-format.h"
+#include "src/parsing/preparse-data.h"
 #include "src/parsing/preparser.h"
 #include "src/pending-compilation-error-handler.h"
 
@@ -29,11 +31,11 @@
   enum {
     kStartPositionIndex,
     kEndPositionIndex,
+    kNumParametersIndex,
+    kFunctionLengthIndex,
     kLiteralCountIndex,
     kPropertyCountIndex,
-    kLanguageModeIndex,
-    kUsesSuperPropertyIndex,
-    kCallsEvalIndex,
+    kFlagsIndex,
     kSize
   };
 
@@ -42,18 +44,43 @@
 
   FunctionEntry() : backing_() { }
 
-  int start_pos() { return backing_[kStartPositionIndex]; }
-  int end_pos() { return backing_[kEndPositionIndex]; }
-  int literal_count() { return backing_[kLiteralCountIndex]; }
-  int property_count() { return backing_[kPropertyCountIndex]; }
-  LanguageMode language_mode() {
-    DCHECK(is_valid_language_mode(backing_[kLanguageModeIndex]));
-    return static_cast<LanguageMode>(backing_[kLanguageModeIndex]);
-  }
-  bool uses_super_property() { return backing_[kUsesSuperPropertyIndex]; }
-  bool calls_eval() { return backing_[kCallsEvalIndex]; }
+  class LanguageModeField : public BitField<LanguageMode, 0, 1> {};
+  class UsesSuperPropertyField
+      : public BitField<bool, LanguageModeField::kNext, 1> {};
+  class CallsEvalField
+      : public BitField<bool, UsesSuperPropertyField::kNext, 1> {};
+  class HasDuplicateParametersField
+      : public BitField<bool, CallsEvalField::kNext, 1> {};
 
-  bool is_valid() { return !backing_.is_empty(); }
+  static uint32_t EncodeFlags(LanguageMode language_mode,
+                              bool uses_super_property, bool calls_eval,
+                              bool has_duplicate_parameters) {
+    return LanguageModeField::encode(language_mode) |
+           UsesSuperPropertyField::encode(uses_super_property) |
+           CallsEvalField::encode(calls_eval) |
+           HasDuplicateParametersField::encode(has_duplicate_parameters);
+  }
+
+  int start_pos() const { return backing_[kStartPositionIndex]; }
+  int end_pos() const { return backing_[kEndPositionIndex]; }
+  int num_parameters() const { return backing_[kNumParametersIndex]; }
+  int function_length() const { return backing_[kFunctionLengthIndex]; }
+  int literal_count() const { return backing_[kLiteralCountIndex]; }
+  int property_count() const { return backing_[kPropertyCountIndex]; }
+  LanguageMode language_mode() const {
+    return LanguageModeField::decode(backing_[kFlagsIndex]);
+  }
+  bool uses_super_property() const {
+    return UsesSuperPropertyField::decode(backing_[kFlagsIndex]);
+  }
+  bool calls_eval() const {
+    return CallsEvalField::decode(backing_[kFlagsIndex]);
+  }
+  bool has_duplicate_parameters() const {
+    return HasDuplicateParametersField::decode(backing_[kFlagsIndex]);
+  }
+
+  bool is_valid() const { return !backing_.is_empty(); }
 
  private:
   Vector<unsigned> backing_;
@@ -75,8 +102,6 @@
   FunctionEntry GetFunctionEntry(int start);
   int FunctionCount();
 
-  bool HasError();
-
   unsigned* Data() {  // Writable data as unsigned int array.
     return reinterpret_cast<unsigned*>(const_cast<byte*>(script_data_->data()));
   }
@@ -107,7 +132,6 @@
 // JAVASCRIPT PARSING
 
 class Parser;
-class SingletonLogger;
 
 
 struct ParserFormalParameters : FormalParametersBase {
@@ -134,7 +158,6 @@
       : FormalParametersBase(scope), params(4, scope->zone()) {}
   ZoneList<Parameter> params;
 
-  int Arity() const { return params.length(); }
   const Parameter& at(int i) const { return params[i]; }
 };
 
@@ -168,7 +191,7 @@
   typedef ParserTargetScope TargetScope;
 };
 
-class Parser : public ParserBase<Parser> {
+class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
  public:
   explicit Parser(ParseInfo* info);
   ~Parser() {
@@ -178,6 +201,8 @@
     cached_parse_data_ = NULL;
   }
 
+  static bool const IsPreParser() { return false; }
+
   // Parses the source code represented by the compilation info and sets its
   // function literal.  Returns false (and deallocates any allocated AST
   // nodes) if parsing failed.
@@ -205,6 +230,27 @@
   friend class ParserBase<Parser>;
   friend class v8::internal::ExpressionClassifier<ParserTypes<Parser>>;
 
+  bool AllowsLazyParsingWithoutUnresolvedVariables() const {
+    return scope()->AllowsLazyParsingWithoutUnresolvedVariables(
+        original_scope_);
+  }
+
+  bool parse_lazily() const { return mode_ == PARSE_LAZILY; }
+  enum Mode { PARSE_LAZILY, PARSE_EAGERLY };
+
+  class ParsingModeScope BASE_EMBEDDED {
+   public:
+    ParsingModeScope(Parser* parser, Mode mode)
+        : parser_(parser), old_mode_(parser->mode_) {
+      parser_->mode_ = mode;
+    }
+    ~ParsingModeScope() { parser_->mode_ = old_mode_; }
+
+   private:
+    Parser* parser_;
+    Mode old_mode_;
+  };
+
   // Runtime encoding of different completion modes.
   enum CompletionKind {
     kNormalCompletion,
@@ -230,9 +276,10 @@
   // Returns NULL if parsing failed.
   FunctionLiteral* ParseProgram(Isolate* isolate, ParseInfo* info);
 
-  FunctionLiteral* ParseLazy(Isolate* isolate, ParseInfo* info);
-  FunctionLiteral* DoParseLazy(ParseInfo* info, const AstRawString* raw_name,
-                               Utf16CharacterStream* source);
+  FunctionLiteral* ParseFunction(Isolate* isolate, ParseInfo* info);
+  FunctionLiteral* DoParseFunction(ParseInfo* info,
+                                   const AstRawString* raw_name,
+                                   Utf16CharacterStream* source);
 
   // Called by ParseProgram after setting up the scanner.
   FunctionLiteral* DoParseProgram(ParseInfo* info);
@@ -243,11 +290,12 @@
     return compile_options_;
   }
   bool consume_cached_parse_data() const {
-    return compile_options_ == ScriptCompiler::kConsumeParserCache &&
-           cached_parse_data_ != NULL;
+    return allow_lazy() &&
+           compile_options_ == ScriptCompiler::kConsumeParserCache;
   }
   bool produce_cached_parse_data() const {
-    return compile_options_ == ScriptCompiler::kProduceParserCache;
+    return allow_lazy() &&
+           compile_options_ == ScriptCompiler::kProduceParserCache;
   }
 
   void ParseModuleItemList(ZoneList<Statement*>* body, bool* ok);
@@ -358,11 +406,13 @@
     void VisitObjectLiteral(ObjectLiteral* node, Variable** temp_var);
     void VisitArrayLiteral(ArrayLiteral* node, Variable** temp_var);
 
-    bool IsBindingContext() const { return IsBindingContext(context_); }
+    bool IsBindingContext() const {
+      return context_ == BINDING || context_ == INITIALIZER;
+    }
     bool IsInitializerContext() const { return context_ != ASSIGNMENT; }
-    bool IsAssignmentContext() const { return IsAssignmentContext(context_); }
-    bool IsAssignmentContext(PatternContext c) const;
-    bool IsBindingContext(PatternContext c) const;
+    bool IsAssignmentContext() const {
+      return context_ == ASSIGNMENT || context_ == ASSIGNMENT_INITIALIZER;
+    }
     bool IsSubPattern() const { return recursion_level_ > 1; }
     PatternContext SetAssignmentContextIfNeeded(Expression* node);
     PatternContext SetInitializerContextIfNeeded(Expression* node);
@@ -453,13 +503,13 @@
   void InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope);
 
   VariableProxy* NewUnresolved(const AstRawString* name, int begin_pos,
-                               int end_pos = kNoSourcePosition,
                                VariableKind kind = NORMAL_VARIABLE);
   VariableProxy* NewUnresolved(const AstRawString* name);
   Variable* Declare(Declaration* declaration,
                     DeclarationDescriptor::Kind declaration_kind,
                     VariableMode mode, InitializationFlag init, bool* ok,
-                    Scope* declaration_scope = nullptr);
+                    Scope* declaration_scope = nullptr,
+                    int var_end_pos = kNoSourcePosition);
   Declaration* DeclareVariable(const AstRawString* name, VariableMode mode,
                                int pos, bool* ok);
   Declaration* DeclareVariable(const AstRawString* name, VariableMode mode,
@@ -480,13 +530,11 @@
   // by parsing the function with PreParser. Consumes the ending }.
   // If may_abort == true, the (pre-)parser may decide to abort skipping
   // in order to force the function to be eagerly parsed, after all.
-  LazyParsingResult SkipLazyFunctionBody(int* materialized_literal_count,
-                                         int* expected_property_count,
-                                         bool is_inner_function, bool may_abort,
-                                         bool* ok);
-
-  PreParser::PreParseResult ParseLazyFunctionBodyWithPreParser(
-      SingletonLogger* logger, bool is_inner_function, bool may_abort);
+  LazyParsingResult SkipFunction(
+      FunctionKind kind, DeclarationScope* function_scope, int* num_parameters,
+      int* function_length, bool* has_duplicate_parameters,
+      int* materialized_literal_count, int* expected_property_count,
+      bool is_inner_function, bool may_abort, bool* ok);
 
   Block* BuildParameterInitializationBlock(
       const ParserFormalParameters& parameters, bool* ok);
@@ -498,6 +546,13 @@
       const ParserFormalParameters& parameters, FunctionKind kind,
       FunctionLiteral::FunctionType function_type, bool* ok);
 
+  ZoneList<Statement*>* ParseFunction(
+      const AstRawString* function_name, int pos, FunctionKind kind,
+      FunctionLiteral::FunctionType function_type,
+      DeclarationScope* function_scope, int* num_parameters,
+      int* function_length, bool* has_duplicate_parameters,
+      int* materialized_literal_count, int* expected_property_count, bool* ok);
+
   void ThrowPendingError(Isolate* isolate, Handle<Script> script);
 
   class TemplateLiteral : public ZoneObject {
@@ -923,7 +978,7 @@
   }
 
   V8_INLINE Expression* ThisExpression(int pos = kNoSourcePosition) {
-    return NewUnresolved(ast_value_factory()->this_string(), pos, pos + 4,
+    return NewUnresolved(ast_value_factory()->this_string(), pos,
                          THIS_VARIABLE);
   }
 
@@ -935,12 +990,12 @@
   Literal* ExpressionFromLiteral(Token::Value token, int pos);
 
   V8_INLINE Expression* ExpressionFromIdentifier(
-      const AstRawString* name, int start_position, int end_position,
+      const AstRawString* name, int start_position,
       InferName infer = InferName::kYes) {
     if (infer == InferName::kYes) {
       fni_->PushVariableName(name);
     }
-    return NewUnresolved(name, start_position, end_position);
+    return NewUnresolved(name, start_position);
   }
 
   V8_INLINE Expression* ExpressionFromString(int pos) {
@@ -994,6 +1049,7 @@
                                     Expression* initializer,
                                     int initializer_end_position,
                                     bool is_rest) {
+    parameters->UpdateArityAndFunctionLength(initializer != nullptr, is_rest);
     bool is_simple = pattern->IsVariableProxy() && initializer == nullptr;
     const AstRawString* name = is_simple
                                    ? pattern->AsVariableProxy()->raw_name()
@@ -1076,6 +1132,7 @@
   Scanner scanner_;
   PreParser* reusable_preparser_;
   Scope* original_scope_;  // for ES5 function declarations in sloppy eval
+  Mode mode_;
 
   friend class ParserTarget;
   friend class ParserTargetScope;
@@ -1090,9 +1147,49 @@
   // parsing.
   int use_counts_[v8::Isolate::kUseCounterFeatureCount];
   int total_preparse_skipped_;
-  HistogramTimer* pre_parse_timer_;
-
   bool parsing_on_main_thread_;
+  ParserLogger* log_;
+};
+
+// ----------------------------------------------------------------------------
+// Target is a support class to facilitate manipulation of the
+// Parser's target_stack_ (the stack of potential 'break' and
+// 'continue' statement targets). Upon construction, a new target is
+// added; it is removed upon destruction.
+
+class ParserTarget BASE_EMBEDDED {
+ public:
+  ParserTarget(ParserBase<Parser>* parser, BreakableStatement* statement)
+      : variable_(&parser->impl()->target_stack_),
+        statement_(statement),
+        previous_(parser->impl()->target_stack_) {
+    parser->impl()->target_stack_ = this;
+  }
+
+  ~ParserTarget() { *variable_ = previous_; }
+
+  ParserTarget* previous() { return previous_; }
+  BreakableStatement* statement() { return statement_; }
+
+ private:
+  ParserTarget** variable_;
+  BreakableStatement* statement_;
+  ParserTarget* previous_;
+};
+
+class ParserTargetScope BASE_EMBEDDED {
+ public:
+  explicit ParserTargetScope(ParserBase<Parser>* parser)
+      : variable_(&parser->impl()->target_stack_),
+        previous_(parser->impl()->target_stack_) {
+    parser->impl()->target_stack_ = nullptr;
+  }
+
+  ~ParserTargetScope() { *variable_ = previous_; }
+
+ private:
+  ParserTarget** variable_;
+  ParserTarget* previous_;
 };
 
 }  // namespace internal
diff --git a/src/parsing/pattern-rewriter.cc b/src/parsing/pattern-rewriter.cc
index 7898f87..f3d9bb0 100644
--- a/src/parsing/pattern-rewriter.cc
+++ b/src/parsing/pattern-rewriter.cc
@@ -68,16 +68,6 @@
 }
 
 
-bool Parser::PatternRewriter::IsAssignmentContext(PatternContext c) const {
-  return c == ASSIGNMENT || c == ASSIGNMENT_INITIALIZER;
-}
-
-
-bool Parser::PatternRewriter::IsBindingContext(PatternContext c) const {
-  return c == BINDING || c == INITIALIZER;
-}
-
-
 Parser::PatternRewriter::PatternContext
 Parser::PatternRewriter::SetAssignmentContextIfNeeded(Expression* node) {
   PatternContext old_context = context();
@@ -142,9 +132,8 @@
   // an initial value in the declaration (because they are initialized upon
   // entering the function).
   const AstRawString* name = pattern->raw_name();
-  VariableProxy* proxy = factory()->NewVariableProxy(
-      name, NORMAL_VARIABLE, parser_->scanner()->location().beg_pos,
-      parser_->scanner()->location().end_pos);
+  VariableProxy* proxy =
+      factory()->NewVariableProxy(name, NORMAL_VARIABLE, pattern->position());
   Declaration* declaration = factory()->NewVariableDeclaration(
       proxy, descriptor_->scope, descriptor_->declaration_pos);
   Variable* var = parser_->Declare(
diff --git a/src/parsing/preparse-data-format.h b/src/parsing/preparse-data-format.h
index f7d9f68..30d1d75 100644
--- a/src/parsing/preparse-data-format.h
+++ b/src/parsing/preparse-data-format.h
@@ -14,22 +14,13 @@
  public:
   // Layout and constants of the preparse data exchange format.
   static const unsigned kMagicNumber = 0xBadDead;
-  static const unsigned kCurrentVersion = 11;
+  static const unsigned kCurrentVersion = 13;
 
   static const int kMagicOffset = 0;
   static const int kVersionOffset = 1;
-  static const int kHasErrorOffset = 2;
-  static const int kFunctionsSizeOffset = 3;
-  static const int kSizeOffset = 4;
-  static const int kHeaderSize = 5;
-
-  // If encoding a message, the following positions are fixed.
-  static const int kMessageStartPos = 0;
-  static const int kMessageEndPos = 1;
-  static const int kMessageArgCountPos = 2;
-  static const int kParseErrorTypePos = 3;
-  static const int kMessageTemplatePos = 4;
-  static const int kMessageArgPos = 5;
+  static const int kFunctionsSizeOffset = 2;
+  static const int kSizeOffset = 3;
+  static const int kHeaderSize = 4;
 
   static const unsigned char kNumberTerminator = 0x80u;
 };
diff --git a/src/parsing/preparse-data.cc b/src/parsing/preparse-data.cc
index e1ef74c..e9a4e8f 100644
--- a/src/parsing/preparse-data.cc
+++ b/src/parsing/preparse-data.cc
@@ -12,53 +12,36 @@
 namespace v8 {
 namespace internal {
 
+void ParserLogger::LogFunction(int start, int end, int num_parameters,
+                               int function_length,
+                               bool has_duplicate_parameters, int literals,
+                               int properties, LanguageMode language_mode,
+                               bool uses_super_property, bool calls_eval) {
+  function_store_.Add(start);
+  function_store_.Add(end);
+  function_store_.Add(num_parameters);
+  function_store_.Add(function_length);
+  function_store_.Add(literals);
+  function_store_.Add(properties);
+  function_store_.Add(
+      FunctionEntry::EncodeFlags(language_mode, uses_super_property, calls_eval,
+                                 has_duplicate_parameters));
+}
 
-CompleteParserRecorder::CompleteParserRecorder() {
+ParserLogger::ParserLogger() {
   preamble_[PreparseDataConstants::kMagicOffset] =
       PreparseDataConstants::kMagicNumber;
   preamble_[PreparseDataConstants::kVersionOffset] =
       PreparseDataConstants::kCurrentVersion;
-  preamble_[PreparseDataConstants::kHasErrorOffset] = false;
   preamble_[PreparseDataConstants::kFunctionsSizeOffset] = 0;
   preamble_[PreparseDataConstants::kSizeOffset] = 0;
-  DCHECK_EQ(5, PreparseDataConstants::kHeaderSize);
+  DCHECK_EQ(4, PreparseDataConstants::kHeaderSize);
 #ifdef DEBUG
   prev_start_ = -1;
 #endif
 }
 
-
-void CompleteParserRecorder::LogMessage(int start_pos, int end_pos,
-                                        MessageTemplate::Template message,
-                                        const char* arg_opt,
-                                        ParseErrorType error_type) {
-  if (HasError()) return;
-  preamble_[PreparseDataConstants::kHasErrorOffset] = true;
-  function_store_.Reset();
-  STATIC_ASSERT(PreparseDataConstants::kMessageStartPos == 0);
-  function_store_.Add(start_pos);
-  STATIC_ASSERT(PreparseDataConstants::kMessageEndPos == 1);
-  function_store_.Add(end_pos);
-  STATIC_ASSERT(PreparseDataConstants::kMessageArgCountPos == 2);
-  function_store_.Add((arg_opt == NULL) ? 0 : 1);
-  STATIC_ASSERT(PreparseDataConstants::kParseErrorTypePos == 3);
-  function_store_.Add(error_type);
-  STATIC_ASSERT(PreparseDataConstants::kMessageTemplatePos == 4);
-  function_store_.Add(static_cast<unsigned>(message));
-  STATIC_ASSERT(PreparseDataConstants::kMessageArgPos == 5);
-  if (arg_opt != NULL) WriteString(CStrVector(arg_opt));
-}
-
-
-void CompleteParserRecorder::WriteString(Vector<const char> str) {
-  function_store_.Add(str.length());
-  for (int i = 0; i < str.length(); i++) {
-    function_store_.Add(str[i]);
-  }
-}
-
-
-ScriptData* CompleteParserRecorder::GetScriptData() {
+ScriptData* ParserLogger::GetScriptData() {
   int function_size = function_store_.size();
   int total_size = PreparseDataConstants::kHeaderSize + function_size;
   unsigned* data = NewArray<unsigned>(total_size);
diff --git a/src/parsing/preparse-data.h b/src/parsing/preparse-data.h
index ddc4d03..767484a 100644
--- a/src/parsing/preparse-data.h
+++ b/src/parsing/preparse-data.h
@@ -46,158 +46,64 @@
   DISALLOW_COPY_AND_ASSIGN(ScriptData);
 };
 
-// Abstract interface for preparse data recorder.
-class ParserRecorder {
+class PreParserLogger final {
  public:
-  ParserRecorder() { }
-  virtual ~ParserRecorder() { }
+  PreParserLogger()
+      : end_(-1),
+        num_parameters_(-1),
+        function_length_(-1),
+        has_duplicate_parameters_(false) {}
 
-  // Logs the scope and some details of a function literal in the source.
-  virtual void LogFunction(int start, int end, int literals, int properties,
-                           LanguageMode language_mode, bool uses_super_property,
-                           bool calls_eval) = 0;
-
-  // Logs an error message and marks the log as containing an error.
-  // Further logging will be ignored, and ExtractData will return a vector
-  // representing the error only.
-  virtual void LogMessage(int start, int end, MessageTemplate::Template message,
-                          const char* argument_opt,
-                          ParseErrorType error_type) = 0;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(ParserRecorder);
-};
-
-
-class SingletonLogger : public ParserRecorder {
- public:
-  SingletonLogger()
-      : has_error_(false), start_(-1), end_(-1), error_type_(kSyntaxError) {}
-  virtual ~SingletonLogger() {}
-
-  void Reset() { has_error_ = false; }
-
-  virtual void LogFunction(int start, int end, int literals, int properties,
-                           LanguageMode language_mode, bool uses_super_property,
-                           bool calls_eval) {
-    DCHECK(!has_error_);
-    start_ = start;
+  void LogFunction(int end, int num_parameters, int function_length,
+                   bool has_duplicate_parameters, int literals,
+                   int properties) {
     end_ = end;
+    num_parameters_ = num_parameters;
+    function_length_ = function_length;
+    has_duplicate_parameters_ = has_duplicate_parameters;
     literals_ = literals;
     properties_ = properties;
-    language_mode_ = language_mode;
-    uses_super_property_ = uses_super_property;
-    calls_eval_ = calls_eval;
   }
 
-  // Logs an error message and marks the log as containing an error.
-  // Further logging will be ignored, and ExtractData will return a vector
-  // representing the error only.
-  virtual void LogMessage(int start, int end, MessageTemplate::Template message,
-                          const char* argument_opt, ParseErrorType error_type) {
-    if (has_error_) return;
-    has_error_ = true;
-    start_ = start;
-    end_ = end;
-    message_ = message;
-    argument_opt_ = argument_opt;
-    error_type_ = error_type;
-  }
-
-  bool has_error() const { return has_error_; }
-
-  int start() const { return start_; }
   int end() const { return end_; }
+  int num_parameters() const {
+    return num_parameters_;
+  }
+  int function_length() const {
+    return function_length_;
+  }
+  bool has_duplicate_parameters() const {
+    return has_duplicate_parameters_;
+  }
   int literals() const {
-    DCHECK(!has_error_);
     return literals_;
   }
   int properties() const {
-    DCHECK(!has_error_);
     return properties_;
   }
-  LanguageMode language_mode() const {
-    DCHECK(!has_error_);
-    return language_mode_;
-  }
-  bool uses_super_property() const {
-    DCHECK(!has_error_);
-    return uses_super_property_;
-  }
-  bool calls_eval() const {
-    DCHECK(!has_error_);
-    return calls_eval_;
-  }
-  ParseErrorType error_type() const {
-    DCHECK(has_error_);
-    return error_type_;
-  }
-  MessageTemplate::Template message() {
-    DCHECK(has_error_);
-    return message_;
-  }
-  const char* argument_opt() const {
-    DCHECK(has_error_);
-    return argument_opt_;
-  }
 
  private:
-  bool has_error_;
-  int start_;
   int end_;
   // For function entries.
+  int num_parameters_;
+  int function_length_;
+  bool has_duplicate_parameters_;
   int literals_;
   int properties_;
-  LanguageMode language_mode_;
-  bool uses_super_property_;
-  bool calls_eval_;
-  // For error messages.
-  MessageTemplate::Template message_;
-  const char* argument_opt_;
-  ParseErrorType error_type_;
 };
 
-
-class CompleteParserRecorder : public ParserRecorder {
+class ParserLogger final {
  public:
-  struct Key {
-    bool is_one_byte;
-    Vector<const byte> literal_bytes;
-  };
+  ParserLogger();
 
-  CompleteParserRecorder();
-  virtual ~CompleteParserRecorder() {}
+  void LogFunction(int start, int end, int num_parameters, int function_length,
+                   bool has_duplicate_parameters, int literals, int properties,
+                   LanguageMode language_mode, bool uses_super_property,
+                   bool calls_eval);
 
-  virtual void LogFunction(int start, int end, int literals, int properties,
-                           LanguageMode language_mode, bool uses_super_property,
-                           bool calls_eval) {
-    function_store_.Add(start);
-    function_store_.Add(end);
-    function_store_.Add(literals);
-    function_store_.Add(properties);
-    function_store_.Add(language_mode);
-    function_store_.Add(uses_super_property);
-    function_store_.Add(calls_eval);
-  }
-
-  // Logs an error message and marks the log as containing an error.
-  // Further logging will be ignored, and ExtractData will return a vector
-  // representing the error only.
-  virtual void LogMessage(int start, int end, MessageTemplate::Template message,
-                          const char* argument_opt, ParseErrorType error_type);
   ScriptData* GetScriptData();
 
-  bool HasError() {
-    return static_cast<bool>(preamble_[PreparseDataConstants::kHasErrorOffset]);
-  }
-  Vector<unsigned> ErrorMessageData() {
-    DCHECK(HasError());
-    return function_store_.ToVector();
-  }
-
  private:
-  void WriteString(Vector<const char> str);
-
   Collector<unsigned> function_store_;
   unsigned preamble_[PreparseDataConstants::kHeaderSize];
 
diff --git a/src/parsing/preparser.cc b/src/parsing/preparser.cc
index 88470f7..1b21c3d 100644
--- a/src/parsing/preparser.cc
+++ b/src/parsing/preparser.cc
@@ -83,12 +83,16 @@
   return symbol;
 }
 
-PreParser::PreParseResult PreParser::PreParseLazyFunction(
-    DeclarationScope* function_scope, bool parsing_module, ParserRecorder* log,
+PreParser::PreParseResult PreParser::PreParseFunction(
+    FunctionKind kind, DeclarationScope* function_scope, bool parsing_module,
     bool is_inner_function, bool may_abort, int* use_counts) {
+  RuntimeCallTimerScope runtime_timer(
+      runtime_call_stats_,
+      track_unresolved_variables_
+          ? &RuntimeCallStats::PreParseWithVariableResolution
+          : &RuntimeCallStats::PreParseNoVariableResolution);
   DCHECK_EQ(FUNCTION_SCOPE, function_scope->scope_type());
   parsing_module_ = parsing_module;
-  log_ = log;
   use_counts_ = use_counts;
   DCHECK(!track_unresolved_variables_);
   track_unresolved_variables_ = is_inner_function;
@@ -98,24 +102,62 @@
   // PreParser.
   DCHECK_NULL(scope_state_);
   FunctionState function_state(&function_state_, &scope_state_, function_scope);
-  DCHECK_EQ(Token::LBRACE, scanner()->current_token());
-  bool ok = true;
-  int start_position = peek_position();
-  LazyParsingResult result = ParseLazyFunctionLiteralBody(may_abort, &ok);
+  // This indirection is needed so that we can use the CHECK_OK macros.
+  bool ok_holder = true;
+  bool* ok = &ok_holder;
+
+  PreParserFormalParameters formals(function_scope);
+  bool has_duplicate_parameters = false;
+  DuplicateFinder duplicate_finder(scanner()->unicode_cache());
+  std::unique_ptr<ExpressionClassifier> formals_classifier;
+
+  // Parse non-arrow function parameters. For arrow functions, the parameters
+  // have already been parsed.
+  if (!IsArrowFunction(kind)) {
+    formals_classifier.reset(new ExpressionClassifier(this, &duplicate_finder));
+    // We return kPreParseSuccess in failure cases too - errors are retrieved
+    // separately by Parser::SkipLazyFunctionBody.
+    ParseFormalParameterList(&formals, CHECK_OK_VALUE(kPreParseSuccess));
+    Expect(Token::RPAREN, CHECK_OK_VALUE(kPreParseSuccess));
+    int formals_end_position = scanner()->location().end_pos;
+
+    CheckArityRestrictions(
+        formals.arity, kind, formals.has_rest, function_scope->start_position(),
+        formals_end_position, CHECK_OK_VALUE(kPreParseSuccess));
+    has_duplicate_parameters =
+        !classifier()->is_valid_formal_parameter_list_without_duplicates();
+  }
+
+  Expect(Token::LBRACE, CHECK_OK_VALUE(kPreParseSuccess));
+  LazyParsingResult result = ParseStatementListAndLogFunction(
+      &formals, has_duplicate_parameters, may_abort, ok);
   use_counts_ = nullptr;
   track_unresolved_variables_ = false;
   if (result == kLazyParsingAborted) {
     return kPreParseAbort;
   } else if (stack_overflow()) {
     return kPreParseStackOverflow;
-  } else if (!ok) {
-    ReportUnexpectedToken(scanner()->current_token());
+  } else if (!*ok) {
+    DCHECK(pending_error_handler_->has_pending_error());
   } else {
     DCHECK_EQ(Token::RBRACE, scanner()->peek());
+
+    if (!IsArrowFunction(kind)) {
+      // Validate parameter names. We can do this only after parsing the
+      // function, since the function can declare itself strict.
+      const bool allow_duplicate_parameters =
+          is_sloppy(function_scope->language_mode()) && formals.is_simple &&
+          !IsConciseMethod(kind);
+      ValidateFormalParameters(function_scope->language_mode(),
+                               allow_duplicate_parameters,
+                               CHECK_OK_VALUE(kPreParseSuccess));
+    }
+
     if (is_strict(function_scope->language_mode())) {
       int end_pos = scanner()->location().end_pos;
-      CheckStrictOctalLiteral(start_position, end_pos, &ok);
-      CheckDecimalLiteralWithLeadingZero(start_position, end_pos);
+      CheckStrictOctalLiteral(function_scope->start_position(), end_pos, ok);
+      CheckDecimalLiteralWithLeadingZero(function_scope->start_position(),
+                                         end_pos);
     }
   }
   return kPreParseSuccess;
@@ -142,10 +184,14 @@
     LanguageMode language_mode, bool* ok) {
   // Function ::
   //   '(' FormalParameterList? ')' '{' FunctionBody '}'
+  RuntimeCallTimerScope runtime_timer(
+      runtime_call_stats_,
+      track_unresolved_variables_
+          ? &RuntimeCallStats::PreParseWithVariableResolution
+          : &RuntimeCallStats::PreParseNoVariableResolution);
 
   // Parse function body.
   PreParserStatementList body;
-  bool outer_is_script_scope = scope()->is_script_scope();
   DeclarationScope* function_scope = NewFunctionScope(kind);
   function_scope->SetLanguageMode(language_mode);
   FunctionState function_state(&function_state_, &scope_state_, function_scope);
@@ -163,17 +209,8 @@
   CheckArityRestrictions(formals.arity, kind, formals.has_rest, start_position,
                          formals_end_position, CHECK_OK);
 
-  // See Parser::ParseFunctionLiteral for more information about lazy parsing
-  // and lazy compilation.
-  bool is_lazily_parsed = (outer_is_script_scope && allow_lazy() &&
-                           !function_state_->this_function_is_parenthesized());
-
   Expect(Token::LBRACE, CHECK_OK);
-  if (is_lazily_parsed) {
-    ParseLazyFunctionLiteralBody(false, CHECK_OK);
-  } else {
-    ParseStatementList(body, Token::RBRACE, CHECK_OK);
-  }
+  ParseStatementList(body, Token::RBRACE, CHECK_OK);
   Expect(Token::RBRACE, CHECK_OK);
 
   // Parsing the body may change the language mode in our scope.
@@ -187,18 +224,26 @@
       is_sloppy(language_mode) && formals.is_simple && !IsConciseMethod(kind);
   ValidateFormalParameters(language_mode, allow_duplicate_parameters, CHECK_OK);
 
+  int end_position = scanner()->location().end_pos;
   if (is_strict(language_mode)) {
-    int end_position = scanner()->location().end_pos;
     CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
     CheckDecimalLiteralWithLeadingZero(start_position, end_position);
   }
+  function_scope->set_end_position(end_position);
+
+  if (FLAG_trace_preparse) {
+    PrintF("  [%s]: %i-%i\n",
+           track_unresolved_variables_ ? "Preparse resolution"
+                                       : "Preparse no-resolution",
+           function_scope->start_position(), function_scope->end_position());
+  }
 
   return Expression::Default();
 }
 
-PreParser::LazyParsingResult PreParser::ParseLazyFunctionLiteralBody(
+PreParser::LazyParsingResult PreParser::ParseStatementListAndLogFunction(
+    PreParserFormalParameters* formals, bool has_duplicate_parameters,
     bool may_abort, bool* ok) {
-  int body_start = position();
   PreParserStatementList body;
   LazyParsingResult result = ParseStatementList(
       body, Token::RBRACE, may_abort, CHECK_OK_VALUE(kLazyParsingComplete));
@@ -207,28 +252,26 @@
   // Position right after terminal '}'.
   DCHECK_EQ(Token::RBRACE, scanner()->peek());
   int body_end = scanner()->peek_location().end_pos;
-  DeclarationScope* scope = this->scope()->AsDeclarationScope();
-  DCHECK(scope->is_function_scope());
-  log_->LogFunction(body_start, body_end,
-                    function_state_->materialized_literal_count(),
-                    function_state_->expected_property_count(), language_mode(),
-                    scope->uses_super_property(), scope->calls_eval());
+  DCHECK(this->scope()->is_function_scope());
+  log_.LogFunction(body_end, formals->num_parameters(),
+                   formals->function_length, has_duplicate_parameters,
+                   function_state_->materialized_literal_count(),
+                   function_state_->expected_property_count());
   return kLazyParsingComplete;
 }
 
 PreParserExpression PreParser::ExpressionFromIdentifier(
-    PreParserIdentifier name, int start_position, int end_position,
-    InferName infer) {
+    PreParserIdentifier name, int start_position, InferName infer) {
   if (track_unresolved_variables_) {
     AstNodeFactory factory(ast_value_factory());
     // Setting the Zone is necessary because zone_ might be the temp Zone, and
     // AstValueFactory doesn't know about it.
     factory.set_zone(zone());
     DCHECK_NOT_NULL(name.string_);
-    scope()->NewUnresolved(&factory, name.string_, start_position, end_position,
+    scope()->NewUnresolved(&factory, name.string_, start_position,
                            NORMAL_VARIABLE);
   }
-  return PreParserExpression::FromIdentifier(name);
+  return PreParserExpression::FromIdentifier(name, zone());
 }
 
 void PreParser::DeclareAndInitializeVariables(
@@ -236,20 +279,23 @@
     const DeclarationDescriptor* declaration_descriptor,
     const DeclarationParsingResult::Declaration* declaration,
     ZoneList<const AstRawString*>* names, bool* ok) {
-  if (declaration->pattern.string_) {
+  if (declaration->pattern.identifiers_ != nullptr) {
+    DCHECK(FLAG_lazy_inner_functions);
     /* Mimic what Parser does when declaring variables (see
        Parser::PatternRewriter::VisitVariableProxy).
 
        var + no initializer -> RemoveUnresolved
-       let + no initializer -> RemoveUnresolved
+       let / const + no initializer -> RemoveUnresolved
        var + initializer -> RemoveUnresolved followed by NewUnresolved
-       let + initializer -> RemoveUnresolved
+       let / const + initializer -> RemoveUnresolved
     */
 
     if (declaration->initializer.IsEmpty() ||
-        declaration_descriptor->mode == VariableMode::LET) {
-      declaration_descriptor->scope->RemoveUnresolved(
-          declaration->pattern.string_);
+        (declaration_descriptor->mode == VariableMode::LET ||
+         declaration_descriptor->mode == VariableMode::CONST)) {
+      for (auto identifier : *(declaration->pattern.identifiers_)) {
+        declaration_descriptor->scope->RemoveUnresolved(identifier);
+      }
     }
   }
 }
diff --git a/src/parsing/preparser.h b/src/parsing/preparser.h
index 4b54748..f4687eb 100644
--- a/src/parsing/preparser.h
+++ b/src/parsing/preparser.h
@@ -118,27 +118,33 @@
   const AstRawString* string_;
   friend class PreParserExpression;
   friend class PreParser;
+  friend class PreParserFactory;
 };
 
 
 class PreParserExpression {
  public:
-  PreParserExpression() : code_(TypeField::encode(kEmpty)) {}
+  PreParserExpression()
+      : code_(TypeField::encode(kEmpty)), identifiers_(nullptr) {}
 
   static PreParserExpression Empty() { return PreParserExpression(); }
 
-  static PreParserExpression Default() {
-    return PreParserExpression(TypeField::encode(kExpression));
+  static PreParserExpression Default(
+      ZoneList<const AstRawString*>* identifiers = nullptr) {
+    return PreParserExpression(TypeField::encode(kExpression), identifiers);
   }
 
   static PreParserExpression Spread(PreParserExpression expression) {
-    return PreParserExpression(TypeField::encode(kSpreadExpression));
+    return PreParserExpression(TypeField::encode(kSpreadExpression),
+                               expression.identifiers_);
   }
 
-  static PreParserExpression FromIdentifier(PreParserIdentifier id) {
-    return PreParserExpression(TypeField::encode(kIdentifierExpression) |
-                                   IdentifierTypeField::encode(id.type_),
-                               id.string_);
+  static PreParserExpression FromIdentifier(PreParserIdentifier id,
+                                            Zone* zone) {
+    PreParserExpression expression(TypeField::encode(kIdentifierExpression) |
+                                   IdentifierTypeField::encode(id.type_));
+    expression.AddIdentifier(id.string_, zone);
+    return expression;
   }
 
   static PreParserExpression BinaryOperation(PreParserExpression left,
@@ -152,12 +158,16 @@
                                ExpressionTypeField::encode(kAssignment));
   }
 
-  static PreParserExpression ObjectLiteral() {
-    return PreParserExpression(TypeField::encode(kObjectLiteralExpression));
+  static PreParserExpression ObjectLiteral(
+      ZoneList<const AstRawString*>* identifiers = nullptr) {
+    return PreParserExpression(TypeField::encode(kObjectLiteralExpression),
+                               identifiers);
   }
 
-  static PreParserExpression ArrayLiteral() {
-    return PreParserExpression(TypeField::encode(kArrayLiteralExpression));
+  static PreParserExpression ArrayLiteral(
+      ZoneList<const AstRawString*>* identifiers = nullptr) {
+    return PreParserExpression(TypeField::encode(kArrayLiteralExpression),
+                               identifiers);
   }
 
   static PreParserExpression StringLiteral() {
@@ -313,7 +323,7 @@
 
   // More dummy implementations of things PreParser doesn't need to track:
   void set_index(int index) {}  // For YieldExpressions
-  void set_should_eager_compile() {}
+  void SetShouldEagerCompile() {}
   void set_should_be_used_once_hint() {}
 
   int position() const { return kNoSourcePosition; }
@@ -344,9 +354,20 @@
     kAssignment
   };
 
-  explicit PreParserExpression(uint32_t expression_code,
-                               const AstRawString* string = nullptr)
-      : code_(expression_code), string_(string) {}
+  explicit PreParserExpression(
+      uint32_t expression_code,
+      ZoneList<const AstRawString*>* identifiers = nullptr)
+      : code_(expression_code), identifiers_(identifiers) {}
+
+  void AddIdentifier(const AstRawString* identifier, Zone* zone) {
+    if (identifier == nullptr) {
+      return;
+    }
+    if (identifiers_ == nullptr) {
+      identifiers_ = new (zone) ZoneList<const AstRawString*>(1, zone);
+    }
+    identifiers_->Add(identifier, zone);
+  }
 
   // The first three bits are for the Type.
   typedef BitField<Type, 0, 3> TypeField;
@@ -368,31 +389,61 @@
   typedef BitField<bool, TypeField::kNext, 1> HasCoverInitializedNameField;
 
   uint32_t code_;
-  // Non-nullptr if the expression is one identifier.
-  const AstRawString* string_;
+  // If the PreParser is used in the identifier tracking mode,
+  // PreParserExpression accumulates identifiers in that expression.
+  ZoneList<const AstRawString*>* identifiers_;
 
   friend class PreParser;
+  friend class PreParserFactory;
+  template <typename T>
+  friend class PreParserList;
 };
 
 
 // The pre-parser doesn't need to build lists of expressions, identifiers, or
-// the like.
+// the like. If the PreParser is used in identifier tracking mode, it needs to
+// build lists of identifiers though.
 template <typename T>
 class PreParserList {
  public:
   // These functions make list->Add(some_expression) work (and do nothing).
-  PreParserList() : length_(0) {}
+  PreParserList() : length_(0), identifiers_(nullptr) {}
   PreParserList* operator->() { return this; }
-  void Add(T, void*) { ++length_; }
+  void Add(T, Zone* zone);
   int length() const { return length_; }
   static PreParserList Null() { return PreParserList(-1); }
   bool IsNull() const { return length_ == -1; }
 
  private:
-  explicit PreParserList(int n) : length_(n) {}
+  explicit PreParserList(int n) : length_(n), identifiers_(nullptr) {}
   int length_;
+  ZoneList<const AstRawString*>* identifiers_;
+
+  friend class PreParser;
+  friend class PreParserFactory;
 };
 
+template <>
+inline void PreParserList<PreParserExpression>::Add(
+    PreParserExpression expression, Zone* zone) {
+  if (expression.identifiers_ != nullptr) {
+    DCHECK(FLAG_lazy_inner_functions);
+    DCHECK(zone != nullptr);
+    if (identifiers_ == nullptr) {
+      identifiers_ = new (zone) ZoneList<const AstRawString*>(1, zone);
+    }
+    for (auto identifier : (*expression.identifiers_)) {
+      identifiers_->Add(identifier, zone);
+    }
+  }
+  ++length_;
+}
+
+template <typename T>
+void PreParserList<T>::Add(T, Zone* zone) {
+  ++length_;
+}
+
 typedef PreParserList<PreParserExpression> PreParserExpressionList;
 
 class PreParserStatement;
@@ -480,10 +531,18 @@
 
 class PreParserFactory {
  public:
-  explicit PreParserFactory(void* unused_value_factory) {}
+  explicit PreParserFactory(AstValueFactory* ast_value_factory)
+      : zone_(ast_value_factory->zone()) {}
+
+  void set_zone(Zone* zone) { zone_ = zone; }
+
   PreParserExpression NewStringLiteral(PreParserIdentifier identifier,
                                        int pos) {
-    return PreParserExpression::Default();
+    // This is needed for object literal property names. Property names are
+    // normalized to string literals during object literal parsing.
+    PreParserExpression expression = PreParserExpression::Default();
+    expression.AddIdentifier(identifier.string_, zone_);
+    return expression;
   }
   PreParserExpression NewNumberLiteral(double number,
                                        int pos) {
@@ -500,7 +559,7 @@
   PreParserExpression NewArrayLiteral(PreParserExpressionList values,
                                       int first_spread_index, int literal_index,
                                       int pos) {
-    return PreParserExpression::ArrayLiteral();
+    return PreParserExpression::ArrayLiteral(values.identifiers_);
   }
   PreParserExpression NewClassLiteralProperty(PreParserExpression key,
                                               PreParserExpression value,
@@ -513,18 +572,18 @@
                                                PreParserExpression value,
                                                ObjectLiteralProperty::Kind kind,
                                                bool is_computed_name) {
-    return PreParserExpression::Default();
+    return PreParserExpression::Default(value.identifiers_);
   }
   PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
                                                PreParserExpression value,
                                                bool is_computed_name) {
-    return PreParserExpression::Default();
+    return PreParserExpression::Default(value.identifiers_);
   }
   PreParserExpression NewObjectLiteral(PreParserExpressionList properties,
                                        int literal_index,
                                        int boilerplate_properties,
                                        int pos) {
-    return PreParserExpression::ObjectLiteral();
+    return PreParserExpression::ObjectLiteral(properties.identifiers_);
   }
   PreParserExpression NewVariableProxy(void* variable) {
     return PreParserExpression::Default();
@@ -599,10 +658,11 @@
   PreParserExpression NewFunctionLiteral(
       PreParserIdentifier name, Scope* scope, PreParserStatementList body,
       int materialized_literal_count, int expected_property_count,
-      int parameter_count,
+      int parameter_count, int function_length,
       FunctionLiteral::ParameterFlag has_duplicate_parameters,
       FunctionLiteral::FunctionType function_type,
-      FunctionLiteral::EagerCompileHint eager_compile_hint, int position) {
+      FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
+      bool has_braces) {
     return PreParserExpression::Default();
   }
 
@@ -693,15 +753,15 @@
     static int dummy = 42;
     return &dummy;
   }
+
+ private:
+  Zone* zone_;
 };
 
 
 struct PreParserFormalParameters : FormalParametersBase {
   explicit PreParserFormalParameters(DeclarationScope* scope)
       : FormalParametersBase(scope) {}
-  int arity = 0;
-
-  int Arity() const { return arity; }
   PreParserIdentifier at(int i) { return PreParserIdentifier(); }  // Dummy
 };
 
@@ -779,11 +839,17 @@
   };
 
   PreParser(Zone* zone, Scanner* scanner, AstValueFactory* ast_value_factory,
-            ParserRecorder* log, uintptr_t stack_limit)
-      : ParserBase<PreParser>(zone, scanner, stack_limit, NULL,
-                              ast_value_factory, log),
+            PendingCompilationErrorHandler* pending_error_handler,
+            RuntimeCallStats* runtime_call_stats, uintptr_t stack_limit)
+      : ParserBase<PreParser>(zone, scanner, stack_limit, nullptr,
+                              ast_value_factory, runtime_call_stats),
         use_counts_(nullptr),
-        track_unresolved_variables_(false) {}
+        track_unresolved_variables_(false),
+        pending_error_handler_(pending_error_handler) {}
+
+  static bool const IsPreParser() { return true; }
+
+  PreParserLogger* logger() { return &log_; }
 
   // Pre-parse the program from the character stream; returns true on
   // success (even if parsing failed, the pre-parse data successfully
@@ -828,10 +894,11 @@
   // keyword and parameters, and have consumed the initial '{'.
   // At return, unless an error occurred, the scanner is positioned before the
   // the final '}'.
-  PreParseResult PreParseLazyFunction(DeclarationScope* function_scope,
-                                      bool parsing_module, ParserRecorder* log,
-                                      bool track_unresolved_variables,
-                                      bool may_abort, int* use_counts);
+  PreParseResult PreParseFunction(FunctionKind kind,
+                                  DeclarationScope* function_scope,
+                                  bool parsing_module,
+                                  bool track_unresolved_variables,
+                                  bool may_abort, int* use_counts);
 
  private:
   // These types form an algebra over syntactic categories that is just
@@ -849,9 +916,16 @@
       const PreParserFormalParameters& parameters, FunctionKind kind,
       FunctionLiteral::FunctionType function_type, bool* ok);
 
-  V8_INLINE LazyParsingResult SkipLazyFunctionBody(
+  // Indicates that we won't switch from the preparser to the preparser; we'll
+  // just stay where we are.
+  bool AllowsLazyParsingWithoutUnresolvedVariables() const { return false; }
+  bool parse_lazily() const { return false; }
+
+  V8_INLINE LazyParsingResult SkipFunction(
+      FunctionKind kind, DeclarationScope* function_scope, int* num_parameters,
+      int* function_length, bool* has_duplicate_parameters,
       int* materialized_literal_count, int* expected_property_count,
-      bool track_unresolved_variables, bool may_abort, bool* ok) {
+      bool is_inner_function, bool may_abort, bool* ok) {
     UNREACHABLE();
     return kLazyParsingComplete;
   }
@@ -860,7 +934,9 @@
       FunctionNameValidity function_name_validity, FunctionKind kind,
       int function_token_pos, FunctionLiteral::FunctionType function_type,
       LanguageMode language_mode, bool* ok);
-  LazyParsingResult ParseLazyFunctionLiteralBody(bool may_abort, bool* ok);
+  LazyParsingResult ParseStatementListAndLogFunction(
+      PreParserFormalParameters* formals, bool has_duplicate_parameters,
+      bool maybe_abort, bool* ok);
 
   struct TemplateLiteralState {};
 
@@ -1202,8 +1278,9 @@
                                  MessageTemplate::Template message,
                                  const char* arg = NULL,
                                  ParseErrorType error_type = kSyntaxError) {
-    log_->LogMessage(source_location.beg_pos, source_location.end_pos, message,
-                     arg, error_type);
+    pending_error_handler_->ReportMessageAt(source_location.beg_pos,
+                                            source_location.end_pos, message,
+                                            arg, error_type);
   }
 
   V8_INLINE void ReportMessageAt(Scanner::Location source_location,
@@ -1322,7 +1399,7 @@
   }
 
   PreParserExpression ExpressionFromIdentifier(
-      PreParserIdentifier name, int start_position, int end_position,
+      PreParserIdentifier name, int start_position,
       InferName infer = InferName::kYes);
 
   V8_INLINE PreParserExpression ExpressionFromString(int pos) {
@@ -1372,7 +1449,7 @@
                                     PreParserExpression initializer,
                                     int initializer_end_position,
                                     bool is_rest) {
-    ++parameters->arity;
+    parameters->UpdateArityAndFunctionLength(!initializer.IsEmpty(), is_rest);
   }
 
   V8_INLINE void DeclareFormalParameter(DeclarationScope* scope,
@@ -1408,7 +1485,7 @@
 
   V8_INLINE PreParserExpression
   ExpressionListToExpression(PreParserExpressionList args) {
-    return PreParserExpression::Default();
+    return PreParserExpression::Default(args.identifiers_);
   }
 
   V8_INLINE void AddAccessorPrefixToFunctionName(bool is_get,
@@ -1436,6 +1513,8 @@
 
   int* use_counts_;
   bool track_unresolved_variables_;
+  PreParserLogger log_;
+  PendingCompilationErrorHandler* pending_error_handler_;
 };
 
 PreParserExpression PreParser::SpreadCall(PreParserExpression function,
@@ -1454,7 +1533,6 @@
     PreParserIdentifier function_name, int pos,
     const PreParserFormalParameters& parameters, FunctionKind kind,
     FunctionLiteral::FunctionType function_type, bool* ok) {
-  ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
   PreParserStatementList result;
 
   Scope* inner_scope = scope();
diff --git a/src/parsing/rewriter.cc b/src/parsing/rewriter.cc
index 57009bd..69ac417 100644
--- a/src/parsing/rewriter.cc
+++ b/src/parsing/rewriter.cc
@@ -20,6 +20,7 @@
         result_assigned_(false),
         replacement_(nullptr),
         is_set_(false),
+        breakable_(false),
         zone_(ast_value_factory->zone()),
         closure_scope_(closure_scope),
         factory_(ast_value_factory) {
@@ -33,6 +34,7 @@
         result_assigned_(false),
         replacement_(nullptr),
         is_set_(false),
+        breakable_(false),
         zone_(ast_value_factory->zone()),
         closure_scope_(closure_scope),
         factory_(ast_value_factory) {
@@ -77,6 +79,22 @@
   // was hoping for.
   bool is_set_;
 
+  bool breakable_;
+
+  class BreakableScope final {
+   public:
+    explicit BreakableScope(Processor* processor, bool breakable = true)
+        : processor_(processor), previous_(processor->breakable_) {
+      processor->breakable_ = processor->breakable_ || breakable;
+    }
+
+    ~BreakableScope() { processor_->breakable_ = previous_; }
+
+   private:
+    Processor* processor_;
+    bool previous_;
+  };
+
   Zone* zone_;
   DeclarationScope* closure_scope_;
   AstNodeFactory factory_;
@@ -106,7 +124,13 @@
 
 
 void Processor::Process(ZoneList<Statement*>* statements) {
-  for (int i = statements->length() - 1; i >= 0; --i) {
+  // If we're in a breakable scope (named block, iteration, or switch), we walk
+  // all statements. The last value producing statement before the break needs
+  // to assign to .result. If we're not in a breakable scope, only the last
+  // value producing statement in the block assigns to .result, so we can stop
+  // early.
+  for (int i = statements->length() - 1; i >= 0 && (breakable_ || !is_set_);
+       --i) {
     Visit(statements->at(i));
     statements->Set(i, replacement_);
   }
@@ -122,7 +146,10 @@
   // with some JS VMs: For instance, using smjs, print(eval('var x = 7'))
   // returns 'undefined'. To obtain the same behavior with v8, we need
   // to prevent rewriting in that case.
-  if (!node->ignore_completion_value()) Process(node->statements());
+  if (!node->ignore_completion_value()) {
+    BreakableScope scope(this, node->labels() != nullptr);
+    Process(node->statements());
+  }
   replacement_ = node;
 }
 
@@ -140,35 +167,33 @@
 void Processor::VisitIfStatement(IfStatement* node) {
   // Rewrite both branches.
   bool set_after = is_set_;
+
   Visit(node->then_statement());
   node->set_then_statement(replacement_);
   bool set_in_then = is_set_;
+
   is_set_ = set_after;
   Visit(node->else_statement());
   node->set_else_statement(replacement_);
-  is_set_ = is_set_ && set_in_then;
-  replacement_ = node;
 
-  if (!is_set_) {
-    is_set_ = true;
-    replacement_ = AssignUndefinedBefore(node);
-  }
+  replacement_ = set_in_then && is_set_ ? node : AssignUndefinedBefore(node);
+  is_set_ = true;
 }
 
 
 void Processor::VisitIterationStatement(IterationStatement* node) {
-  // Rewrite the body.
-  bool set_after = is_set_;
-  is_set_ = false;  // We are in a loop, so we can't rely on [set_after].
+  // The statement may have to produce a value, so always assign undefined
+  // before.
+  // TODO(verwaest): Omit it if we know that there's no break/continue leaving
+  // it early.
+  DCHECK(breakable_ || !is_set_);
+  BreakableScope scope(this);
+
   Visit(node->body());
   node->set_body(replacement_);
-  is_set_ = is_set_ && set_after;
-  replacement_ = node;
 
-  if (!is_set_) {
-    is_set_ = true;
-    replacement_ = AssignUndefinedBefore(node);
-  }
+  replacement_ = AssignUndefinedBefore(node);
+  is_set_ = true;
 }
 
 
@@ -200,73 +225,72 @@
 void Processor::VisitTryCatchStatement(TryCatchStatement* node) {
   // Rewrite both try and catch block.
   bool set_after = is_set_;
+
   Visit(node->try_block());
   node->set_try_block(static_cast<Block*>(replacement_));
   bool set_in_try = is_set_;
+
   is_set_ = set_after;
   Visit(node->catch_block());
   node->set_catch_block(static_cast<Block*>(replacement_));
-  is_set_ = is_set_ && set_in_try;
-  replacement_ = node;
 
-  if (!is_set_) {
-    is_set_ = true;
-    replacement_ = AssignUndefinedBefore(node);
-  }
+  replacement_ = is_set_ && set_in_try ? node : AssignUndefinedBefore(node);
+  is_set_ = true;
 }
 
 
 void Processor::VisitTryFinallyStatement(TryFinallyStatement* node) {
-  // Rewrite both try and finally block (in reverse order).
-  bool set_after = is_set_;
-  is_set_ = true;  // Don't normally need to assign in finally block.
-  Visit(node->finally_block());
-  node->set_finally_block(replacement_->AsBlock());
-  {  // Save .result value at the beginning of the finally block and restore it
-     // at the end again: ".backup = .result; ...; .result = .backup"
-     // This is necessary because the finally block does not normally contribute
-     // to the completion value.
-     CHECK_NOT_NULL(closure_scope());
-     Variable* backup = closure_scope()->NewTemporary(
-         factory()->ast_value_factory()->dot_result_string());
-     Expression* backup_proxy = factory()->NewVariableProxy(backup);
-     Expression* result_proxy = factory()->NewVariableProxy(result_);
-     Expression* save = factory()->NewAssignment(
-         Token::ASSIGN, backup_proxy, result_proxy, kNoSourcePosition);
-     Expression* restore = factory()->NewAssignment(
-         Token::ASSIGN, result_proxy, backup_proxy, kNoSourcePosition);
-     node->finally_block()->statements()->InsertAt(
-         0, factory()->NewExpressionStatement(save, kNoSourcePosition), zone());
-     node->finally_block()->statements()->Add(
-         factory()->NewExpressionStatement(restore, kNoSourcePosition), zone());
+  // Only rewrite finally if it could contain 'break' or 'continue'. Always
+  // rewrite try.
+  if (breakable_) {
+    bool set_after = is_set_;
+    // Only set result before a 'break' or 'continue'.
+    is_set_ = true;
+    Visit(node->finally_block());
+    node->set_finally_block(replacement_->AsBlock());
+    // Save .result value at the beginning of the finally block and restore it
+    // at the end again: ".backup = .result; ...; .result = .backup"
+    // This is necessary because the finally block does not normally contribute
+    // to the completion value.
+    CHECK_NOT_NULL(closure_scope());
+    Variable* backup = closure_scope()->NewTemporary(
+        factory()->ast_value_factory()->dot_result_string());
+    Expression* backup_proxy = factory()->NewVariableProxy(backup);
+    Expression* result_proxy = factory()->NewVariableProxy(result_);
+    Expression* save = factory()->NewAssignment(
+        Token::ASSIGN, backup_proxy, result_proxy, kNoSourcePosition);
+    Expression* restore = factory()->NewAssignment(
+        Token::ASSIGN, result_proxy, backup_proxy, kNoSourcePosition);
+    node->finally_block()->statements()->InsertAt(
+        0, factory()->NewExpressionStatement(save, kNoSourcePosition), zone());
+    node->finally_block()->statements()->Add(
+        factory()->NewExpressionStatement(restore, kNoSourcePosition), zone());
+    is_set_ = set_after;
   }
-  is_set_ = set_after;
   Visit(node->try_block());
   node->set_try_block(replacement_->AsBlock());
-  replacement_ = node;
 
-  if (!is_set_) {
-    is_set_ = true;
-    replacement_ = AssignUndefinedBefore(node);
-  }
+  replacement_ = is_set_ ? node : AssignUndefinedBefore(node);
+  is_set_ = true;
 }
 
 
 void Processor::VisitSwitchStatement(SwitchStatement* node) {
-  // Rewrite statements in all case clauses (in reverse order).
+  // The statement may have to produce a value, so always assign undefined
+  // before.
+  // TODO(verwaest): Omit it if we know that there's no break/continue leaving
+  // it early.
+  DCHECK(breakable_ || !is_set_);
+  BreakableScope scope(this);
+  // Rewrite statements in all case clauses.
   ZoneList<CaseClause*>* clauses = node->cases();
-  bool set_after = is_set_;
   for (int i = clauses->length() - 1; i >= 0; --i) {
     CaseClause* clause = clauses->at(i);
     Process(clause->statements());
   }
-  is_set_ = is_set_ && set_after;
-  replacement_ = node;
 
-  if (!is_set_) {
-    is_set_ = true;
-    replacement_ = AssignUndefinedBefore(node);
-  }
+  replacement_ = AssignUndefinedBefore(node);
+  is_set_ = true;
 }
 
 
@@ -285,12 +309,9 @@
 void Processor::VisitWithStatement(WithStatement* node) {
   Visit(node->statement());
   node->set_statement(replacement_);
-  replacement_ = node;
 
-  if (!is_set_) {
-    is_set_ = true;
-    replacement_ = AssignUndefinedBefore(node);
-  }
+  replacement_ = is_set_ ? node : AssignUndefinedBefore(node);
+  is_set_ = true;
 }
 
 
diff --git a/src/parsing/scanner-character-streams.cc b/src/parsing/scanner-character-streams.cc
index 3f10cfa..f7c7fd5 100644
--- a/src/parsing/scanner-character-streams.cc
+++ b/src/parsing/scanner-character-streams.cc
@@ -14,6 +14,10 @@
 namespace v8 {
 namespace internal {
 
+namespace {
+const unibrow::uchar kUtf8Bom = 0xfeff;
+}  // namespace
+
 // ----------------------------------------------------------------------------
 // BufferedUtf16CharacterStreams
 //
@@ -259,7 +263,9 @@
   while (it < chunk.length && chars < position) {
     unibrow::uchar t =
         unibrow::Utf8::ValueOfIncremental(chunk.data[it], &incomplete_char);
-    if (t != unibrow::Utf8::kIncomplete) {
+    if (t == kUtf8Bom && current_.pos.chars == 0) {
+      // BOM detected at beginning of the stream. Don't copy it.
+    } else if (t != unibrow::Utf8::kIncomplete) {
       chars++;
       if (t > unibrow::Utf16::kMaxNonSurrogateCharCode) chars++;
     }
@@ -300,8 +306,6 @@
     return;
   }
 
-  static const unibrow::uchar kUtf8Bom = 0xfeff;
-
   unibrow::Utf8::Utf8IncrementalBuffer incomplete_char =
       current_.pos.incomplete_char;
   size_t it;
@@ -349,9 +353,9 @@
 
   // No chunks. Fetch at least one, so we can assume !chunks_.empty() below.
   if (chunks_.empty()) {
-    DCHECK_EQ(current_.chunk_no, 0);
-    DCHECK_EQ(current_.pos.bytes, 0);
-    DCHECK_EQ(current_.pos.chars, 0);
+    DCHECK_EQ(current_.chunk_no, 0u);
+    DCHECK_EQ(current_.pos.bytes, 0u);
+    DCHECK_EQ(current_.pos.chars, 0u);
     FetchChunk();
   }
 
@@ -438,7 +442,8 @@
     FillBufferFromCurrentChunk();
   }
 
-  DCHECK_EQ(current_.pos.chars - position, buffer_end_ - buffer_cursor_);
+  DCHECK_EQ(current_.pos.chars - position,
+            static_cast<size_t>(buffer_end_ - buffer_cursor_));
   return buffer_end_ - buffer_cursor_;
 }
 
@@ -497,7 +502,7 @@
   // let's look at chunks back-to-front.
   size_t chunk_no = chunks.size() - 1;
   while (chunks[chunk_no].byte_pos > position) {
-    DCHECK_NE(chunk_no, 0);
+    DCHECK_NE(chunk_no, 0u);
     chunk_no--;
   }
   DCHECK_LE(chunks[chunk_no].byte_pos, position);
@@ -537,6 +542,7 @@
   return len;
 }
 
+#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
 // ----------------------------------------------------------------------------
 // TwoByteExternalStreamingStream
 //
@@ -592,7 +598,7 @@
   // one_char_buffer_ to hold the full character.
   bool lonely_byte = (chunks_[chunk_no].byte_pos == (2 * position + 1));
   if (lonely_byte) {
-    DCHECK_NE(chunk_no, 0);
+    DCHECK_NE(chunk_no, 0u);
     Chunk& previous_chunk = chunks_[chunk_no - 1];
 #ifdef V8_TARGET_BIG_ENDIAN
     uc16 character = current.data[0] |
@@ -630,6 +636,162 @@
   return true;
 }
 
+#else
+
+// ----------------------------------------------------------------------------
+// TwoByteExternalBufferedStream
+//
+// This class is made specifically to address unaligned access to 16-bit data
+// in MIPS and ARM architectures. It replaces class
+// TwoByteExternalStreamingStream which in some cases does have unaligned
+// accesse to 16-bit data
+
+class TwoByteExternalBufferedStream : public Utf16CharacterStream {
+ public:
+  explicit TwoByteExternalBufferedStream(
+      ScriptCompiler::ExternalSourceStream* source);
+  ~TwoByteExternalBufferedStream();
+
+ protected:
+  static const size_t kBufferSize = 512;
+
+  bool ReadBlock() override;
+
+  // FillBuffer should read up to kBufferSize characters at position and store
+  // them into buffer_[0..]. It returns the number of characters stored.
+  size_t FillBuffer(size_t position, size_t chunk_no);
+
+  // Fixed sized buffer that this class reads from.
+  // The base class' buffer_start_ should always point to buffer_.
+  uc16 buffer_[kBufferSize];
+
+  Chunks chunks_;
+  ScriptCompiler::ExternalSourceStream* source_;
+};
+
+TwoByteExternalBufferedStream::TwoByteExternalBufferedStream(
+    ScriptCompiler::ExternalSourceStream* source)
+    : Utf16CharacterStream(buffer_, buffer_, buffer_, 0), source_(source) {}
+
+TwoByteExternalBufferedStream::~TwoByteExternalBufferedStream() {
+  DeleteChunks(chunks_);
+}
+
+bool TwoByteExternalBufferedStream::ReadBlock() {
+  size_t position = pos();
+  // Find chunk in which the position belongs
+  size_t chunk_no = FindChunk(chunks_, source_, 2 * position + 1);
+
+  // Out of data? Return 0.
+  if (chunks_[chunk_no].byte_length == 0) {
+    buffer_cursor_ = buffer_start_;
+    buffer_end_ = buffer_start_;
+    return false;
+  }
+
+  Chunk& current = chunks_[chunk_no];
+
+  bool odd_start = current.byte_pos % 2;
+  // Common case: character is in current chunk.
+  DCHECK_LE(current.byte_pos, 2 * position + odd_start);
+  DCHECK_LT(2 * position + 1, current.byte_pos + current.byte_length);
+
+  // If character starts on odd address copy text in buffer so there is always
+  // aligned access to characters. This is important on MIPS and ARM
+  // architectures. Otherwise read characters from memory directly.
+  if (!odd_start) {
+    buffer_start_ = reinterpret_cast<const uint16_t*>(current.data);
+    size_t number_chars = current.byte_length / 2;
+    buffer_end_ = buffer_start_ + number_chars;
+    buffer_pos_ = current.byte_pos / 2;
+    buffer_cursor_ = buffer_start_ + (position - buffer_pos_);
+    DCHECK_EQ(position, pos());
+    return true;
+  } else {
+    buffer_start_ = buffer_;
+    buffer_pos_ = position;
+    buffer_cursor_ = buffer_;
+    buffer_end_ = buffer_ + FillBuffer(position, chunk_no);
+    DCHECK_EQ(pos(), position);
+    DCHECK_LE(buffer_end_, buffer_start_ + kBufferSize);
+    return buffer_cursor_ < buffer_end_;
+  }
+}
+
+size_t TwoByteExternalBufferedStream::FillBuffer(size_t position,
+                                                 size_t chunk_no) {
+  DCHECK_EQ(chunks_[chunk_no].byte_pos % 2, 1u);
+  bool odd_start = true;
+  // Align buffer_pos_ to the size of the buffer.
+  {
+    size_t new_pos = position / kBufferSize * kBufferSize;
+    if (new_pos != position) {
+      chunk_no = FindChunk(chunks_, source_, 2 * new_pos + 1);
+      buffer_pos_ = new_pos;
+      buffer_cursor_ = buffer_start_ + (position - buffer_pos_);
+      position = new_pos;
+      odd_start = chunks_[chunk_no].byte_pos % 2;
+    }
+  }
+
+  Chunk* current = &chunks_[chunk_no];
+
+  // Annoying edge case: Chunks may not be 2-byte aligned, meaning that a
+  // character may be split between the previous and the current chunk.
+  // If we find such a lonely byte at the beginning of the chunk, we'll copy
+  // it to the first byte in buffer_.
+  size_t totalLength = 0;
+  bool lonely_byte = (current->byte_pos == (2 * position + 1));
+  if (lonely_byte) {
+    DCHECK_NE(chunk_no, 0u);
+    Chunk& previous_chunk = chunks_[chunk_no - 1];
+    *reinterpret_cast<uint8_t*>(buffer_) =
+        previous_chunk.data[previous_chunk.byte_length - 1];
+    totalLength++;
+  }
+
+  // Common case: character is in current chunk.
+  DCHECK_LE(current->byte_pos, 2 * position + odd_start);
+  DCHECK_LT(2 * position + 1, current->byte_pos + current->byte_length);
+
+  // Copy characters from current chunk starting from chunk_pos to the end of
+  // buffer or chunk.
+  size_t chunk_pos = position - current->byte_pos / 2;
+  size_t start_offset = odd_start && chunk_pos != 0;
+  size_t bytes_to_move =
+      i::Min(2 * kBufferSize - lonely_byte,
+             current->byte_length - 2 * chunk_pos + start_offset);
+  i::MemMove(reinterpret_cast<uint8_t*>(buffer_) + lonely_byte,
+             current->data + 2 * chunk_pos - start_offset, bytes_to_move);
+
+  // Fill up the rest of the buffer if there is space and data left.
+  totalLength += bytes_to_move;
+  position = (current->byte_pos + current->byte_length) / 2;
+  if (position - buffer_pos_ < kBufferSize) {
+    chunk_no = FindChunk(chunks_, source_, 2 * position + 1);
+    current = &chunks_[chunk_no];
+    odd_start = current->byte_pos % 2;
+    bytes_to_move = i::Min(2 * kBufferSize - totalLength, current->byte_length);
+    while (bytes_to_move) {
+      // Common case: character is in current chunk.
+      DCHECK_LE(current->byte_pos, 2 * position + odd_start);
+      DCHECK_LT(2 * position + 1, current->byte_pos + current->byte_length);
+
+      i::MemMove(reinterpret_cast<uint8_t*>(buffer_) + totalLength,
+                 current->data, bytes_to_move);
+      totalLength += bytes_to_move;
+      position = (current->byte_pos + current->byte_length) / 2;
+      chunk_no = FindChunk(chunks_, source_, 2 * position + 1);
+      current = &chunks_[chunk_no];
+      odd_start = current->byte_pos % 2;
+      bytes_to_move =
+          i::Min(2 * kBufferSize - totalLength, current->byte_length);
+    }
+  }
+  return totalLength / 2;
+}
+#endif
+
 // ----------------------------------------------------------------------------
 // ScannerStream: Create stream instances.
 
@@ -669,7 +831,11 @@
     v8::ScriptCompiler::StreamedSource::Encoding encoding) {
   switch (encoding) {
     case v8::ScriptCompiler::StreamedSource::TWO_BYTE:
+#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
       return new TwoByteExternalStreamingStream(source_stream);
+#else
+      return new TwoByteExternalBufferedStream(source_stream);
+#endif
     case v8::ScriptCompiler::StreamedSource::ONE_BYTE:
       return new OneByteExternalStreamingStream(source_stream);
     case v8::ScriptCompiler::StreamedSource::UTF8:
diff --git a/src/parsing/scanner.cc b/src/parsing/scanner.cc
index e41b56f..363ab7d 100644
--- a/src/parsing/scanner.cc
+++ b/src/parsing/scanner.cc
@@ -59,7 +59,7 @@
   } else {
     scanner_->SeekNext(bookmark_);
     scanner_->Next();
-    DCHECK_EQ(scanner_->location().beg_pos, bookmark_);
+    DCHECK_EQ(scanner_->location().beg_pos, static_cast<int>(bookmark_));
   }
   bookmark_ = kBookmarkWasApplied;
 }
@@ -1153,7 +1153,7 @@
 
         if (next_.literal_chars->one_byte_literal().length() <= 10 &&
             value <= Smi::kMaxValue && c0_ != '.' && c0_ != 'e' && c0_ != 'E') {
-          next_.smi_value_ = static_cast<int>(value);
+          next_.smi_value_ = static_cast<uint32_t>(value);
           literal.Complete();
           HandleLeadSurrogate();
 
@@ -1638,7 +1638,7 @@
   // 3, re-scan, by scanning the look-ahead char + 1 token (next_).
   c0_ = source_->Advance();
   Next();
-  DCHECK_EQ(next_.location.beg_pos, position);
+  DCHECK_EQ(next_.location.beg_pos, static_cast<int>(position));
 }
 
 }  // namespace internal
diff --git a/src/parsing/scanner.h b/src/parsing/scanner.h
index b2b1a8a..6f6fab5 100644
--- a/src/parsing/scanner.h
+++ b/src/parsing/scanner.h
@@ -284,7 +284,7 @@
   }
 
   // Returns the value of the last smi that was scanned.
-  int smi_value() const { return current_.smi_value_; }
+  uint32_t smi_value() const { return current_.smi_value_; }
 
   // Seek forward to the given position.  This operation does not
   // work in general, for instance when there are pushed back
@@ -369,14 +369,15 @@
     INLINE(void AddChar(uc32 code_unit)) {
       if (position_ >= backing_store_.length()) ExpandBuffer();
       if (is_one_byte_) {
-        if (code_unit <= unibrow::Latin1::kMaxChar) {
+        if (code_unit <= static_cast<uc32>(unibrow::Latin1::kMaxChar)) {
           backing_store_[position_] = static_cast<byte>(code_unit);
           position_ += kOneByteSize;
           return;
         }
         ConvertToTwoByte();
       }
-      if (code_unit <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+      if (code_unit <=
+          static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
         *reinterpret_cast<uint16_t*>(&backing_store_[position_]) = code_unit;
         position_ += kUC16Size;
       } else {
@@ -487,7 +488,7 @@
     Location location;
     LiteralBuffer* literal_chars;
     LiteralBuffer* raw_literal_chars;
-    int smi_value_;
+    uint32_t smi_value_;
     Token::Value token;
   };
 
diff --git a/src/pending-compilation-error-handler.cc b/src/pending-compilation-error-handler.cc
index 3e88efc..8f7660d 100644
--- a/src/pending-compilation-error-handler.cc
+++ b/src/pending-compilation-error-handler.cc
@@ -13,20 +13,29 @@
 namespace v8 {
 namespace internal {
 
+Handle<String> PendingCompilationErrorHandler::ArgumentString(
+    Isolate* isolate) {
+  if (arg_ != NULL) return arg_->string();
+  if (char_arg_ != NULL) {
+    return isolate->factory()
+        ->NewStringFromUtf8(CStrVector(char_arg_))
+        .ToHandleChecked();
+  }
+  if (!handle_arg_.is_null()) return handle_arg_;
+  return isolate->factory()->undefined_string();
+}
+
+Handle<String> PendingCompilationErrorHandler::FormatMessage(Isolate* isolate) {
+  return MessageTemplate::FormatMessage(isolate, message_,
+                                        ArgumentString(isolate));
+}
+
 void PendingCompilationErrorHandler::ThrowPendingError(Isolate* isolate,
                                                        Handle<Script> script) {
   if (!has_pending_error_) return;
   MessageLocation location(script, start_position_, end_position_);
   Factory* factory = isolate->factory();
-  Handle<String> argument;
-  if (arg_ != NULL) {
-    argument = arg_->string();
-  } else if (char_arg_ != NULL) {
-    argument =
-        factory->NewStringFromUtf8(CStrVector(char_arg_)).ToHandleChecked();
-  } else if (!handle_arg_.is_null()) {
-    argument = handle_arg_;
-  }
+  Handle<String> argument = ArgumentString(isolate);
   isolate->debug()->OnCompileError(script);
 
   Handle<Object> error;
diff --git a/src/pending-compilation-error-handler.h b/src/pending-compilation-error-handler.h
index 6190d49..563bef9 100644
--- a/src/pending-compilation-error-handler.h
+++ b/src/pending-compilation-error-handler.h
@@ -75,8 +75,11 @@
   bool has_pending_error() const { return has_pending_error_; }
 
   void ThrowPendingError(Isolate* isolate, Handle<Script> script);
+  Handle<String> FormatMessage(Isolate* isolate);
 
  private:
+  Handle<String> ArgumentString(Isolate* isolate);
+
   bool has_pending_error_;
   int start_position_;
   int end_position_;
diff --git a/src/perf-jit.cc b/src/perf-jit.cc
index a8c7255..6641a12 100644
--- a/src/perf-jit.cc
+++ b/src/perf-jit.cc
@@ -272,7 +272,7 @@
                       static_cast<size_t>(name_length));
     name_string = std::unique_ptr<char[]>(buffer);
   }
-  DCHECK_EQ(name_length, strlen(name_string.get()));
+  DCHECK_EQ(name_length, static_cast<int>(strlen(name_string.get())));
 
   PerfJitCodeDebugInfo debug_info;
 
@@ -299,7 +299,7 @@
 
   for (SourcePositionTableIterator iterator(code->source_position_table());
        !iterator.done(); iterator.Advance()) {
-    int position = iterator.source_position();
+    int position = iterator.source_position().ScriptOffset();
     int line_number = Script::GetLineNumber(script, position);
     // Compute column.
     int relative_line_number = line_number - script_line_offset;
@@ -356,8 +356,8 @@
   }
 
   char padding_bytes[] = "\0\0\0\0\0\0\0\0";
-  DCHECK_LT(padding_size, sizeof(padding_bytes));
-  LogWriteBytes(padding_bytes, padding_size);
+  DCHECK_LT(padding_size, static_cast<int>(sizeof(padding_bytes)));
+  LogWriteBytes(padding_bytes, static_cast<int>(padding_size));
 }
 
 void PerfJitLogger::CodeMoveEvent(AbstractCode* from, Address to) {
diff --git a/src/ppc/assembler-ppc.h b/src/ppc/assembler-ppc.h
index 7843e2e..f49ac63 100644
--- a/src/ppc/assembler-ppc.h
+++ b/src/ppc/assembler-ppc.h
@@ -1216,7 +1216,8 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+  void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+                         int id);
 
   // Writes a single byte or word of data in the code stream.  Used
   // for inline tables, e.g., jump-tables.
diff --git a/src/ppc/code-stubs-ppc.cc b/src/ppc/code-stubs-ppc.cc
index ce423ea..a48fc06 100644
--- a/src/ppc/code-stubs-ppc.cc
+++ b/src/ppc/code-stubs-ppc.cc
@@ -561,7 +561,7 @@
   // If either is a Smi (we know that not both are), then they can only
   // be strictly equal if the other is a HeapNumber.
   STATIC_ASSERT(kSmiTag == 0);
-  DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+  DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
   __ and_(r5, lhs, rhs);
   __ JumpIfNotSmi(r5, &not_smis);
   // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
@@ -1576,13 +1576,10 @@
   __ SmiToShortArrayOffset(r4, r4);
   __ addi(r4, r4, Operand(2));
 
-  __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset));
-  __ JumpIfSmi(r3, &runtime);
-  __ CompareObjectType(r3, r5, r5, JS_OBJECT_TYPE);
-  __ bne(&runtime);
+  // Check that the last match info is a FixedArray.
+  __ LoadP(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
+  __ JumpIfSmi(last_match_info_elements, &runtime);
   // Check that the object has fast elements.
-  __ LoadP(last_match_info_elements,
-           FieldMemOperand(r3, JSArray::kElementsOffset));
   __ LoadP(r3,
            FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
   __ CompareRoot(r3, Heap::kFixedArrayMapRootIndex);
@@ -1591,7 +1588,7 @@
   // additional information.
   __ LoadP(
       r3, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
-  __ addi(r5, r4, Operand(RegExpImpl::kLastMatchOverhead));
+  __ addi(r5, r4, Operand(RegExpMatchInfo::kLastMatchOverhead));
   __ SmiUntag(r0, r3);
   __ cmp(r5, r0);
   __ bgt(&runtime);
@@ -1601,21 +1598,23 @@
   // Store the capture count.
   __ SmiTag(r5, r4);
   __ StoreP(r5, FieldMemOperand(last_match_info_elements,
-                                RegExpImpl::kLastCaptureCountOffset),
+                                RegExpMatchInfo::kNumberOfCapturesOffset),
             r0);
   // Store last subject and last input.
   __ StoreP(subject, FieldMemOperand(last_match_info_elements,
-                                     RegExpImpl::kLastSubjectOffset),
+                                     RegExpMatchInfo::kLastSubjectOffset),
             r0);
   __ mr(r5, subject);
-  __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastSubjectOffset,
-                      subject, r10, kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ RecordWriteField(last_match_info_elements,
+                      RegExpMatchInfo::kLastSubjectOffset, subject, r10,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs);
   __ mr(subject, r5);
   __ StoreP(subject, FieldMemOperand(last_match_info_elements,
-                                     RegExpImpl::kLastInputOffset),
+                                     RegExpMatchInfo::kLastInputOffset),
             r0);
-  __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastInputOffset,
-                      subject, r10, kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ RecordWriteField(last_match_info_elements,
+                      RegExpMatchInfo::kLastInputOffset, subject, r10,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   ExternalReference address_of_static_offsets_vector =
@@ -1626,10 +1625,10 @@
   // r5: offsets vector
   Label next_capture;
   // Capture register counter starts from number of capture registers and
-  // counts down until wraping after zero.
-  __ addi(
-      r3, last_match_info_elements,
-      Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag - kPointerSize));
+  // counts down until wrapping after zero.
+  __ addi(r3, last_match_info_elements,
+          Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag -
+                  kPointerSize));
   __ addi(r5, r5, Operand(-kIntSize));  // bias down for lwzu
   __ mtctr(r4);
   __ bind(&next_capture);
@@ -1641,7 +1640,7 @@
   __ bdnz(&next_capture);
 
   // Return last match info.
-  __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset));
+  __ mr(r3, last_match_info_elements);
   __ addi(sp, sp, Operand(4 * kPointerSize));
   __ Ret();
 
@@ -1873,6 +1872,7 @@
 }
 
 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+  // r3 - number of arguments
   // r4 - function
   // r6 - slot id
   // r5 - vector
@@ -1881,25 +1881,22 @@
   __ cmp(r4, r8);
   __ bne(miss);
 
-  __ mov(r3, Operand(arg_count()));
-
   // Increment the call count for monomorphic function calls.
   IncrementCallCount(masm, r5, r6, r0);
 
   __ mr(r5, r7);
   __ mr(r6, r4);
-  ArrayConstructorStub stub(masm->isolate(), arg_count());
+  ArrayConstructorStub stub(masm->isolate());
   __ TailCallStub(&stub);
 }
 
 
 void CallICStub::Generate(MacroAssembler* masm) {
+  // r3 - number of arguments
   // r4 - function
   // r6 - slot id (Smi)
   // r5 - vector
   Label extra_checks_or_miss, call, call_function, call_count_incremented;
-  int argc = arg_count();
-  ParameterCount actual(argc);
 
   // The checks. First, does r4 match the recorded monomorphic target?
   __ SmiToPtrArrayOffset(r9, r6);
@@ -1933,7 +1930,6 @@
   // Increment the call count for monomorphic function calls.
   IncrementCallCount(masm, r5, r6, r0);
 
-  __ mov(r3, Operand(argc));
   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
                                                     tail_call_mode()),
           RelocInfo::CODE_TARGET);
@@ -1977,7 +1973,6 @@
   IncrementCallCount(masm, r5, r6, r0);
 
   __ bind(&call_count_incremented);
-  __ mov(r3, Operand(argc));
   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
@@ -2010,13 +2005,12 @@
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
     CreateWeakCellStub create_stub(masm->isolate());
-    __ Push(r5);
-    __ Push(r6);
-    __ Push(cp, r4);
+    __ SmiTag(r3);
+    __ Push(r3, r5, r6, cp, r4);
     __ CallStub(&create_stub);
-    __ Pop(cp, r4);
-    __ Pop(r6);
-    __ Pop(r5);
+    __ Pop(r5, r6, cp, r4);
+    __ Pop(r3);
+    __ SmiUntag(r3);
   }
 
   __ b(&call_function);
@@ -2033,14 +2027,21 @@
 void CallICStub::GenerateMiss(MacroAssembler* masm) {
   FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
 
-  // Push the function and feedback info.
-  __ Push(r4, r5, r6);
+  // Preserve the number of arguments as Smi.
+  __ SmiTag(r3);
+
+  // Push the receiver and the function and feedback info.
+  __ Push(r3, r4, r5, r6);
 
   // Call the entry.
   __ CallRuntime(Runtime::kCallIC_Miss);
 
   // Move result to r4 and exit the internal frame.
   __ mr(r4, r3);
+
+  // Restore number of arguments.
+  __ Pop(r3);
+  __ SmiUntag(r3);
 }
 
 
@@ -3195,21 +3196,6 @@
   __ Ret();
 }
 
-
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate());
-  stub.GenerateForTrampoline(masm);
-}
-
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate());
-  stub.GenerateForTrampoline(masm);
-}
-
-
 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(r5);
   CallICStub stub(isolate(), state());
@@ -3217,14 +3203,6 @@
 }
 
 
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
                              Register receiver_map, Register scratch1,
                              Register scratch2, bool is_polymorphic,
@@ -3318,184 +3296,12 @@
   __ Jump(ip);
 }
 
-
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // r4
-  Register name = LoadWithVectorDescriptor::NameRegister();          // r5
-  Register vector = LoadWithVectorDescriptor::VectorRegister();      // r6
-  Register slot = LoadWithVectorDescriptor::SlotRegister();          // r3
-  Register feedback = r7;
-  Register receiver_map = r8;
-  Register scratch1 = r9;
-
-  __ SmiToPtrArrayOffset(r0, slot);
-  __ add(feedback, vector, r0);
-  __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
-  __ bne(&not_array);
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, true, &miss);
-
-  __ bind(&not_array);
-  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
-  __ bne(&miss);
-  masm->isolate()->load_stub_cache()->GenerateProbe(
-      masm, receiver, name, feedback, receiver_map, scratch1, r10);
-
-  __ bind(&miss);
-  LoadIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ b(&compare_map);
-}
-
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
-  GenerateImpl(masm, false);
-}
-
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // r4
-  Register key = LoadWithVectorDescriptor::NameRegister();           // r5
-  Register vector = LoadWithVectorDescriptor::VectorRegister();      // r6
-  Register slot = LoadWithVectorDescriptor::SlotRegister();          // r3
-  Register feedback = r7;
-  Register receiver_map = r8;
-  Register scratch1 = r9;
-
-  __ SmiToPtrArrayOffset(r0, slot);
-  __ add(feedback, vector, r0);
-  __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  __ bind(&try_array);
-  // Is it a fixed array?
-  __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
-  __ bne(&not_array);
-
-  // We have a polymorphic element handler.
-  Label polymorphic, try_poly_name;
-  __ bind(&polymorphic);
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, true, &miss);
-
-  __ bind(&not_array);
-  // Is it generic?
-  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
-  __ bne(&try_poly_name);
-  Handle<Code> megamorphic_stub =
-      KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
-  __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
-  __ bind(&try_poly_name);
-  // We might have a name in feedback, and a fixed array in the next slot.
-  __ cmp(key, feedback);
-  __ bne(&miss);
-  // If the name comparison succeeded, we know we have a fixed array with
-  // at least one map/handler pair.
-  __ SmiToPtrArrayOffset(r0, slot);
-  __ add(feedback, vector, r0);
-  __ LoadP(feedback,
-           FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, false, &miss);
-
-  __ bind(&miss);
-  KeyedLoadIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ b(&compare_map);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
-  StoreICStub stub(isolate(), state());
-  stub.GenerateForTrampoline(masm);
-}
-
 void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
   KeyedStoreICStub stub(isolate(), state());
   stub.GenerateForTrampoline(masm);
 }
 
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // r4
-  Register key = StoreWithVectorDescriptor::NameRegister();           // r5
-  Register vector = StoreWithVectorDescriptor::VectorRegister();      // r6
-  Register slot = StoreWithVectorDescriptor::SlotRegister();          // r7
-  DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r3));          // r3
-  Register feedback = r8;
-  Register receiver_map = r9;
-  Register scratch1 = r10;
-
-  __ SmiToPtrArrayOffset(r0, slot);
-  __ add(feedback, vector, r0);
-  __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
-  __ bne(&not_array);
-
-  Register scratch2 = r11;
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
-                   &miss);
-
-  __ bind(&not_array);
-  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
-  __ bne(&miss);
-  masm->isolate()->store_stub_cache()->GenerateProbe(
-      masm, receiver, key, feedback, receiver_map, scratch1, scratch2);
-
-  __ bind(&miss);
-  StoreIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ b(&compare_map);
-}
-
 void KeyedStoreICStub::Generate(MacroAssembler* masm) {
   GenerateImpl(masm, false);
 }
@@ -3862,30 +3668,19 @@
 
 void ArrayConstructorStub::GenerateDispatchToArrayStub(
     MacroAssembler* masm, AllocationSiteOverrideMode mode) {
-  if (argument_count() == ANY) {
-    Label not_zero_case, not_one_case;
-    __ cmpi(r3, Operand::Zero());
-    __ bne(&not_zero_case);
-    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+  Label not_zero_case, not_one_case;
+  __ cmpi(r3, Operand::Zero());
+  __ bne(&not_zero_case);
+  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
 
-    __ bind(&not_zero_case);
-    __ cmpi(r3, Operand(1));
-    __ bgt(&not_one_case);
-    CreateArrayDispatchOneArgument(masm, mode);
+  __ bind(&not_zero_case);
+  __ cmpi(r3, Operand(1));
+  __ bgt(&not_one_case);
+  CreateArrayDispatchOneArgument(masm, mode);
 
-    __ bind(&not_one_case);
-    ArrayNArgumentsConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else if (argument_count() == NONE) {
-    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-  } else if (argument_count() == ONE) {
-    CreateArrayDispatchOneArgument(masm, mode);
-  } else if (argument_count() == MORE_THAN_ONE) {
-    ArrayNArgumentsConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else {
-    UNREACHABLE();
-  }
+  __ bind(&not_one_case);
+  ArrayNArgumentsConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
 }
 
 
@@ -3937,23 +3732,9 @@
   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
 
   __ bind(&subclassing);
-  switch (argument_count()) {
-    case ANY:
-    case MORE_THAN_ONE:
-      __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
-      __ StorePX(r4, MemOperand(sp, r0));
-      __ addi(r3, r3, Operand(3));
-      break;
-    case NONE:
-      __ StoreP(r4, MemOperand(sp, 0 * kPointerSize));
-      __ li(r3, Operand(3));
-      break;
-    case ONE:
-      __ StoreP(r4, MemOperand(sp, 1 * kPointerSize));
-      __ li(r3, Operand(4));
-      break;
-  }
-
+  __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+  __ StorePX(r4, MemOperand(sp, r0));
+  __ addi(r3, r3, Operand(3));
   __ Push(r6, r5);
   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
 }
@@ -4385,7 +4166,7 @@
   const int kParameterMapHeaderSize =
       FixedArray::kHeaderSize + 2 * kPointerSize;
   // If there are no mapped parameters, we do not need the parameter_map.
-  __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
+  __ CmpSmiLiteral(r9, Smi::kZero, r0);
   if (CpuFeatures::IsSupported(ISELECT)) {
     __ SmiToPtrArrayOffset(r11, r9);
     __ addi(r11, r11, Operand(kParameterMapHeaderSize));
@@ -4467,7 +4248,7 @@
   // r9 = mapped parameter count (tagged)
   // Initialize parameter map. If there are no mapped arguments, we're done.
   Label skip_parameter_map;
-  __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
+  __ CmpSmiLiteral(r9, Smi::kZero, r0);
   if (CpuFeatures::IsSupported(ISELECT)) {
     __ isel(eq, r4, r7, r4);
     __ beq(&skip_parameter_map);
@@ -4690,134 +4471,6 @@
   __ TailCallRuntime(Runtime::kNewStrictArguments);
 }
 
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register value = r3;
-  Register slot = r5;
-
-  Register cell = r4;
-  Register cell_details = r6;
-  Register cell_value = r7;
-  Register cell_value_map = r8;
-  Register scratch = r9;
-
-  Register context = cp;
-  Register context_temp = cell;
-
-  Label fast_heapobject_case, fast_smi_case, slow_case;
-
-  if (FLAG_debug_code) {
-    __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
-    __ Check(ne, kUnexpectedValue);
-  }
-
-  // Go up the context chain to the script context.
-  for (int i = 0; i < depth(); i++) {
-    __ LoadP(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
-    context = context_temp;
-  }
-
-  // Load the PropertyCell at the specified slot.
-  __ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2));
-  __ add(cell, context, r0);
-  __ LoadP(cell, ContextMemOperand(cell));
-
-  // Load PropertyDetails for the cell (actually only the cell_type and kind).
-  __ LoadP(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
-  __ SmiUntag(cell_details);
-  __ andi(cell_details, cell_details,
-          Operand(PropertyDetails::PropertyCellTypeField::kMask |
-                  PropertyDetails::KindField::kMask |
-                  PropertyDetails::kAttributesReadOnlyMask));
-
-  // Check if PropertyCell holds mutable data.
-  Label not_mutable_data;
-  __ cmpi(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
-                                    PropertyCellType::kMutable) |
-                                PropertyDetails::KindField::encode(kData)));
-  __ bne(&not_mutable_data);
-  __ JumpIfSmi(value, &fast_smi_case);
-
-  __ bind(&fast_heapobject_case);
-  __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
-  // RecordWriteField clobbers the value register, so we copy it before the
-  // call.
-  __ mr(r6, value);
-  __ RecordWriteField(cell, PropertyCell::kValueOffset, r6, scratch,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ Ret();
-
-  __ bind(&not_mutable_data);
-  // Check if PropertyCell value matches the new value (relevant for Constant,
-  // ConstantType and Undefined cells).
-  Label not_same_value;
-  __ LoadP(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
-  __ cmp(cell_value, value);
-  __ bne(&not_same_value);
-
-  // Make sure the PropertyCell is not marked READ_ONLY.
-  __ andi(r0, cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask));
-  __ bne(&slow_case, cr0);
-
-  if (FLAG_debug_code) {
-    Label done;
-    // This can only be true for Constant, ConstantType and Undefined cells,
-    // because we never store the_hole via this stub.
-    __ cmpi(cell_details,
-            Operand(PropertyDetails::PropertyCellTypeField::encode(
-                        PropertyCellType::kConstant) |
-                    PropertyDetails::KindField::encode(kData)));
-    __ beq(&done);
-    __ cmpi(cell_details,
-            Operand(PropertyDetails::PropertyCellTypeField::encode(
-                        PropertyCellType::kConstantType) |
-                    PropertyDetails::KindField::encode(kData)));
-    __ beq(&done);
-    __ cmpi(cell_details,
-            Operand(PropertyDetails::PropertyCellTypeField::encode(
-                        PropertyCellType::kUndefined) |
-                    PropertyDetails::KindField::encode(kData)));
-    __ Check(eq, kUnexpectedValue);
-    __ bind(&done);
-  }
-  __ Ret();
-  __ bind(&not_same_value);
-
-  // Check if PropertyCell contains data with constant type (and is not
-  // READ_ONLY).
-  __ cmpi(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
-                                    PropertyCellType::kConstantType) |
-                                PropertyDetails::KindField::encode(kData)));
-  __ bne(&slow_case);
-
-  // Now either both old and new values must be smis or both must be heap
-  // objects with same map.
-  Label value_is_heap_object;
-  __ JumpIfNotSmi(value, &value_is_heap_object);
-  __ JumpIfNotSmi(cell_value, &slow_case);
-  // Old and new values are smis, no need for a write barrier here.
-  __ bind(&fast_smi_case);
-  __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
-  __ Ret();
-
-  __ bind(&value_is_heap_object);
-  __ JumpIfSmi(cell_value, &slow_case);
-
-  __ LoadP(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
-  __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ cmp(cell_value_map, scratch);
-  __ beq(&fast_heapobject_case);
-
-  // Fallback to runtime.
-  __ bind(&slow_case);
-  __ SmiTag(slot);
-  __ Push(slot, value);
-  __ TailCallRuntime(is_strict(language_mode())
-                         ? Runtime::kStoreGlobalViaContext_Strict
-                         : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
-
 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   return ref0.address() - ref1.address();
 }
@@ -5113,7 +4766,7 @@
   __ Push(scratch, scratch);
   __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
   __ Push(scratch, holder);
-  __ Push(Smi::FromInt(0));  // should_throw_on_error -> false
+  __ Push(Smi::kZero);  // should_throw_on_error -> false
   __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
   __ push(scratch);
 
diff --git a/src/ppc/interface-descriptors-ppc.cc b/src/ppc/interface-descriptors-ppc.cc
index 3ff0fde..74ad564 100644
--- a/src/ppc/interface-descriptors-ppc.cc
+++ b/src/ppc/interface-descriptors-ppc.cc
@@ -29,9 +29,9 @@
 const Register LoadDescriptor::NameRegister() { return r5; }
 const Register LoadDescriptor::SlotRegister() { return r3; }
 
-
 const Register LoadWithVectorDescriptor::VectorRegister() { return r6; }
 
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return r7; }
 
 const Register StoreDescriptor::ReceiverRegister() { return r4; }
 const Register StoreDescriptor::NameRegister() { return r5; }
@@ -44,10 +44,6 @@
 const Register StoreTransitionDescriptor::VectorRegister() { return r6; }
 const Register StoreTransitionDescriptor::MapRegister() { return r8; }
 
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r5; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r3; }
-
-
 const Register StringCompareDescriptor::LeftRegister() { return r4; }
 const Register StringCompareDescriptor::RightRegister() { return r3; }
 
@@ -157,7 +153,7 @@
 
 void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {r4, r6, r5};
+  Register registers[] = {r4, r3, r6, r5};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -206,13 +202,6 @@
 }
 
 
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r5, r4, r3};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 void TransitionElementsKindDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {r3, r4};
diff --git a/src/ppc/macro-assembler-ppc.cc b/src/ppc/macro-assembler-ppc.cc
index 9b5f80e..6588540 100644
--- a/src/ppc/macro-assembler-ppc.cc
+++ b/src/ppc/macro-assembler-ppc.cc
@@ -1605,90 +1605,6 @@
 }
 
 
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
-                                            Register scratch, Label* miss) {
-  Label same_contexts;
-
-  DCHECK(!holder_reg.is(scratch));
-  DCHECK(!holder_reg.is(ip));
-  DCHECK(!scratch.is(ip));
-
-  // Load current lexical context from the active StandardFrame, which
-  // may require crawling past STUB frames.
-  Label load_context;
-  Label has_context;
-  DCHECK(!ip.is(scratch));
-  mr(ip, fp);
-  bind(&load_context);
-  LoadP(scratch,
-        MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
-  JumpIfNotSmi(scratch, &has_context);
-  LoadP(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
-  b(&load_context);
-  bind(&has_context);
-
-// In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
-  cmpi(scratch, Operand::Zero());
-  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
-#endif
-
-  // Load the native context of the current context.
-  LoadP(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
-
-  // Check the context is a native context.
-  if (emit_debug_code()) {
-    // Cannot use ip as a temporary in this verification code. Due to the fact
-    // that ip is clobbered as part of cmp with an object Operand.
-    push(holder_reg);  // Temporarily save holder on the stack.
-    // Read the first word and compare to the native_context_map.
-    LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
-    LoadRoot(ip, Heap::kNativeContextMapRootIndex);
-    cmp(holder_reg, ip);
-    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
-    pop(holder_reg);  // Restore holder.
-  }
-
-  // Check if both contexts are the same.
-  LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-  cmp(scratch, ip);
-  beq(&same_contexts);
-
-  // Check the context is a native context.
-  if (emit_debug_code()) {
-    // Cannot use ip as a temporary in this verification code. Due to the fact
-    // that ip is clobbered as part of cmp with an object Operand.
-    push(holder_reg);    // Temporarily save holder on the stack.
-    mr(holder_reg, ip);  // Move ip to its holding place.
-    LoadRoot(ip, Heap::kNullValueRootIndex);
-    cmp(holder_reg, ip);
-    Check(ne, kJSGlobalProxyContextShouldNotBeNull);
-
-    LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
-    LoadRoot(ip, Heap::kNativeContextMapRootIndex);
-    cmp(holder_reg, ip);
-    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
-    // Restore ip is not needed. ip is reloaded below.
-    pop(holder_reg);  // Restore holder.
-    // Restore ip to holder's context.
-    LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-  }
-
-  // Check that the security token in the calling global object is
-  // compatible with the security token in the receiving global
-  // object.
-  int token_offset =
-      Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
-  LoadP(scratch, FieldMemOperand(scratch, token_offset));
-  LoadP(ip, FieldMemOperand(ip, token_offset));
-  cmp(scratch, ip);
-  bne(miss);
-
-  bind(&same_contexts);
-}
-
-
 // Compute the hash code from the untagged key.  This must be kept in sync with
 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
 // code-stub-hydrogen.cc
@@ -1729,86 +1645,6 @@
   ExtractBitRange(t0, t0, 29, 0);
 }
 
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
-                                              Register key, Register result,
-                                              Register t0, Register t1,
-                                              Register t2) {
-  // Register use:
-  //
-  // elements - holds the slow-case elements of the receiver on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // key      - holds the smi key on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // result   - holds the result on exit if the load succeeded.
-  //            Allowed to be the same as 'key' or 'result'.
-  //            Unchanged on bailout so 'key' or 'result' can be used
-  //            in further computation.
-  //
-  // Scratch registers:
-  //
-  // t0 - holds the untagged key on entry and holds the hash once computed.
-  //
-  // t1 - used to hold the capacity mask of the dictionary
-  //
-  // t2 - used for the index into the dictionary.
-  Label done;
-
-  GetNumberHash(t0, t1);
-
-  // Compute the capacity mask.
-  LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
-  SmiUntag(t1);
-  subi(t1, t1, Operand(1));
-
-  // Generate an unrolled loop that performs a few probes before giving up.
-  for (int i = 0; i < kNumberDictionaryProbes; i++) {
-    // Use t2 for index calculations and keep the hash intact in t0.
-    mr(t2, t0);
-    // Compute the masked index: (hash + i + i * i) & mask.
-    if (i > 0) {
-      addi(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
-    }
-    and_(t2, t2, t1);
-
-    // Scale the index by multiplying by the element size.
-    DCHECK(SeededNumberDictionary::kEntrySize == 3);
-    slwi(ip, t2, Operand(1));
-    add(t2, t2, ip);  // t2 = t2 * 3
-
-    // Check if the key is identical to the name.
-    slwi(t2, t2, Operand(kPointerSizeLog2));
-    add(t2, elements, t2);
-    LoadP(ip,
-          FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
-    cmp(key, ip);
-    if (i != kNumberDictionaryProbes - 1) {
-      beq(&done);
-    } else {
-      bne(miss);
-    }
-  }
-
-  bind(&done);
-  // Check that the value is a field property.
-  // t2: elements + (index * kPointerSize)
-  const int kDetailsOffset =
-      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
-  LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
-  DCHECK_EQ(DATA, 0);
-  and_(r0, t1, ip, SetRC);
-  bne(miss, cr0);
-
-  // Get the value at the masked, scaled index and return.
-  const int kValueOffset =
-      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
-  LoadP(result, FieldMemOperand(t2, kValueOffset));
-}
-
-
 void MacroAssembler::Allocate(int object_size, Register result,
                               Register scratch1, Register scratch2,
                               Label* gc_required, AllocationFlags flags) {
@@ -2234,20 +2070,6 @@
   cmp(obj, r0);
 }
 
-
-void MacroAssembler::CheckFastElements(Register map, Register scratch,
-                                       Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  STATIC_ASSERT(FAST_ELEMENTS == 2);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
-  lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
-  STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
-  cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
-  bgt(fail);
-}
-
-
 void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
                                              Label* fail) {
   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
@@ -2525,18 +2347,6 @@
   return has_frame_ || !stub->SometimesSetsUpAFrame();
 }
 
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
-  // If the hash field contains an array index pick it out. The assert checks
-  // that the constants for the maximum number of digits for an array index
-  // cached in the hash field and the number of bits reserved for it does not
-  // conflict.
-  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
-         (1 << String::kArrayIndexValueBits));
-  DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
-}
-
-
 void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
   SmiUntag(ip, smi);
   ConvertIntToDouble(ip, value);
@@ -3282,73 +3092,6 @@
   STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
 }
 
-
-void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
-                               Register scratch) {
-  Label align_loop, aligned, word_loop, byte_loop, byte_loop_1, done;
-
-  DCHECK(!scratch.is(r0));
-
-  cmpi(length, Operand::Zero());
-  beq(&done);
-
-  // Check src alignment and length to see whether word_loop is possible
-  andi(scratch, src, Operand(kPointerSize - 1));
-  beq(&aligned, cr0);
-  subfic(scratch, scratch, Operand(kPointerSize * 2));
-  cmp(length, scratch);
-  blt(&byte_loop);
-
-  // Align src before copying in word size chunks.
-  subi(scratch, scratch, Operand(kPointerSize));
-  mtctr(scratch);
-  bind(&align_loop);
-  lbz(scratch, MemOperand(src));
-  addi(src, src, Operand(1));
-  subi(length, length, Operand(1));
-  stb(scratch, MemOperand(dst));
-  addi(dst, dst, Operand(1));
-  bdnz(&align_loop);
-
-  bind(&aligned);
-
-  // Copy bytes in word size chunks.
-  if (emit_debug_code()) {
-    andi(r0, src, Operand(kPointerSize - 1));
-    Assert(eq, kExpectingAlignmentForCopyBytes, cr0);
-  }
-
-  ShiftRightImm(scratch, length, Operand(kPointerSizeLog2));
-  cmpi(scratch, Operand::Zero());
-  beq(&byte_loop);
-
-  mtctr(scratch);
-  bind(&word_loop);
-  LoadP(scratch, MemOperand(src));
-  addi(src, src, Operand(kPointerSize));
-  subi(length, length, Operand(kPointerSize));
-
-  StoreP(scratch, MemOperand(dst));
-  addi(dst, dst, Operand(kPointerSize));
-  bdnz(&word_loop);
-
-  // Copy the last bytes if any left.
-  cmpi(length, Operand::Zero());
-  beq(&done);
-
-  bind(&byte_loop);
-  mtctr(length);
-  bind(&byte_loop_1);
-  lbz(scratch, MemOperand(src));
-  addi(src, src, Operand(1));
-  stb(scratch, MemOperand(dst));
-  addi(dst, dst, Operand(1));
-  bdnz(&byte_loop_1);
-
-  bind(&done);
-}
-
-
 void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
                                                  Register count,
                                                  Register filler) {
@@ -3451,7 +3194,7 @@
   cmp(index, ip);
   Check(lt, kIndexIsTooLarge);
 
-  DCHECK(Smi::FromInt(0) == 0);
+  DCHECK(Smi::kZero == 0);
   cmpi(index, Operand::Zero());
   Check(ge, kIndexIsNegative);
 
@@ -3828,7 +3571,7 @@
 
   // For all objects but the receiver, check that the cache is empty.
   EnumLength(r6, r4);
-  CmpSmiLiteral(r6, Smi::FromInt(0), r0);
+  CmpSmiLiteral(r6, Smi::kZero, r0);
   bne(call_runtime);
 
   bind(&start);
@@ -4687,7 +4430,8 @@
   ExternalReference new_space_allocation_top_adr =
       ExternalReference::new_space_allocation_top_address(isolate());
   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
-  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+  const int kMementoLastWordOffset =
+      kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
   Register mask = scratch2_reg;
 
   DCHECK(!AreAliased(receiver_reg, scratch_reg, mask));
@@ -4697,7 +4441,7 @@
 
   DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
   lis(mask, Operand((~Page::kPageAlignmentMask >> 16)));
-  addi(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  addi(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
 
   // If the object is in new space, we need to check whether it is on the same
   // page as the current top.
@@ -4718,7 +4462,7 @@
   // we are below top.
   bind(&top_check);
   cmp(scratch_reg, ip);
-  bgt(no_memento_found);
+  bge(no_memento_found);
   // Memento map check.
   bind(&map_check);
   LoadP(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
diff --git a/src/ppc/macro-assembler-ppc.h b/src/ppc/macro-assembler-ppc.h
index ba4d277..28eceb1 100644
--- a/src/ppc/macro-assembler-ppc.h
+++ b/src/ppc/macro-assembler-ppc.h
@@ -662,19 +662,8 @@
   // ---------------------------------------------------------------------------
   // Inline caching support
 
-  // Generate code for checking access rights - used for security checks
-  // on access to global objects across environments. The holder register
-  // is left untouched, whereas both scratch registers are clobbered.
-  void CheckAccessGlobalProxy(Register holder_reg, Register scratch,
-                              Label* miss);
-
   void GetNumberHash(Register t0, Register scratch);
 
-  void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
-                                Register result, Register t0, Register t1,
-                                Register t2);
-
-
   inline void MarkCode(NopMarkerTypes type) { nop(type); }
 
   // Check if the given instruction is a 'type' marker.
@@ -769,11 +758,6 @@
                        Register scratch1, Register scratch2,
                        Label* gc_required);
 
-  // Copies a number of bytes from src to dst. All registers are clobbered. On
-  // exit src and dst will point to the place just after where the last byte was
-  // read or written and length will be zero.
-  void CopyBytes(Register src, Register dst, Register length, Register scratch);
-
   // Initialize fields with filler values.  |count| fields starting at
   // |current_address| are overwritten with the value in |filler|.  At the end
   // the loop, |current_address| points at the next uninitialized field.
@@ -819,11 +803,6 @@
   // sets the flags and leaves the object type in the type_reg register.
   void CompareInstanceType(Register map, Register type_reg, InstanceType type);
 
-
-  // Check if a map for a JSObject indicates that the object has fast elements.
-  // Jump to the specified label if it does not.
-  void CheckFastElements(Register map, Register scratch, Label* fail);
-
   // Check if a map for a JSObject indicates that the object can have both smi
   // and HeapObject elements.  Jump to the specified label if it does not.
   void CheckFastObjectElements(Register map, Register scratch, Label* fail);
@@ -912,13 +891,6 @@
     return eq;
   }
 
-
-  // Picks out an array index from the hash field.
-  // Register use:
-  //   hash - holds the index's hash. Clobbered.
-  //   index - holds the overwritten index on exit.
-  void IndexFromHash(Register hash, Register index);
-
   // Get the number of least significant bits from a register
   void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
   void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
diff --git a/src/profiler/cpu-profiler-inl.h b/src/profiler/cpu-profiler-inl.h
index 504c3f6..440c6a1 100644
--- a/src/profiler/cpu-profiler-inl.h
+++ b/src/profiler/cpu-profiler-inl.h
@@ -35,7 +35,7 @@
 
 void CodeDeoptEventRecord::UpdateCodeMap(CodeMap* code_map) {
   CodeEntry* entry = code_map->FindEntry(start);
-  if (entry != NULL) entry->set_deopt_info(deopt_reason, position, deopt_id);
+  if (entry != NULL) entry->set_deopt_info(deopt_reason, deopt_id);
 }
 
 
diff --git a/src/profiler/cpu-profiler.cc b/src/profiler/cpu-profiler.cc
index 7a0cf9c..6821ba6 100644
--- a/src/profiler/cpu-profiler.cc
+++ b/src/profiler/cpu-profiler.cc
@@ -305,7 +305,7 @@
   // Disable logging when using the new implementation.
   saved_is_logging_ = logger->is_logging_;
   logger->is_logging_ = false;
-  generator_.reset(new ProfileGenerator(profiles_.get()));
+  generator_.reset(new ProfileGenerator(isolate_, profiles_.get()));
   processor_.reset(new ProfilerEventsProcessor(isolate_, generator_.get(),
                                                sampling_interval_));
   logger->SetUpProfilerListener();
@@ -326,33 +326,21 @@
   processor_->StartSynchronously();
 }
 
-
 CpuProfile* CpuProfiler::StopProfiling(const char* title) {
   if (!is_profiling_) return nullptr;
   StopProcessorIfLastProfile(title);
-  CpuProfile* result = profiles_->StopProfiling(title);
-  if (result) {
-    result->Print();
-  }
-  return result;
+  return profiles_->StopProfiling(title);
 }
 
-
 CpuProfile* CpuProfiler::StopProfiling(String* title) {
-  if (!is_profiling_) return nullptr;
-  const char* profile_title = profiles_->GetName(title);
-  StopProcessorIfLastProfile(profile_title);
-  return profiles_->StopProfiling(profile_title);
+  return StopProfiling(profiles_->GetName(title));
 }
 
-
 void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
-  if (profiles_->IsLastProfile(title)) {
-    StopProcessor();
-  }
+  if (!profiles_->IsLastProfile(title)) return;
+  StopProcessor();
 }
 
-
 void CpuProfiler::StopProcessor() {
   Logger* logger = isolate_->logger();
   is_profiling_ = false;
diff --git a/src/profiler/cpu-profiler.h b/src/profiler/cpu-profiler.h
index e9ccc57..fa31754 100644
--- a/src/profiler/cpu-profiler.h
+++ b/src/profiler/cpu-profiler.h
@@ -83,7 +83,6 @@
  public:
   Address start;
   const char* deopt_reason;
-  SourcePosition position;
   int deopt_id;
   void* pc;
   int fp_to_sp_delta;
@@ -123,7 +122,7 @@
     CodeEventRecord generic;
 #define DECLARE_CLASS(ignore, type) type type##_;
     CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
-#undef DECLARE_TYPE
+#undef DECLARE_CLASS
   };
 };
 
diff --git a/src/profiler/heap-snapshot-generator.cc b/src/profiler/heap-snapshot-generator.cc
index d0fa2e4..2fd682e 100644
--- a/src/profiler/heap-snapshot-generator.cc
+++ b/src/profiler/heap-snapshot-generator.cc
@@ -1312,7 +1312,7 @@
   HeapObject* obj = shared;
   String* shared_name = shared->DebugName();
   const char* name = NULL;
-  if (shared_name != *heap_->isolate()->factory()->empty_string()) {
+  if (shared_name != heap_->empty_string()) {
     name = names_->GetName(shared_name);
     TagObject(shared->code(), names_->GetFormatted("(code for %s)", name));
   } else {
diff --git a/src/profiler/profile-generator-inl.h b/src/profiler/profile-generator-inl.h
index c50964d..5a7017a 100644
--- a/src/profiler/profile-generator-inl.h
+++ b/src/profiler/profile-generator-inl.h
@@ -25,25 +25,26 @@
       position_(0),
       bailout_reason_(kEmptyBailoutReason),
       deopt_reason_(kNoDeoptReason),
-      deopt_position_(SourcePosition::Unknown()),
       deopt_id_(kNoDeoptimizationId),
       line_info_(line_info),
       instruction_start_(instruction_start) {}
 
-ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
+ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry,
+                         ProfileNode* parent)
     : tree_(tree),
       entry_(entry),
       self_ticks_(0),
       children_(CodeEntriesMatch),
+      parent_(parent),
       id_(tree->next_node_id()),
-      line_ticks_(LineTickMatch) {}
-
+      line_ticks_(LineTickMatch) {
+  tree_->EnqueueNode(this);
+}
 
 inline unsigned ProfileNode::function_id() const {
   return tree_->GetFunctionId(this);
 }
 
-
 inline Isolate* ProfileNode::isolate() const { return tree_->isolate(); }
 
 }  // namespace internal
diff --git a/src/profiler/profile-generator.cc b/src/profiler/profile-generator.cc
index 583ef0f..b647670 100644
--- a/src/profiler/profile-generator.cc
+++ b/src/profiler/profile-generator.cc
@@ -10,6 +10,8 @@
 #include "src/global-handles.h"
 #include "src/profiler/cpu-profiler.h"
 #include "src/profiler/profile-generator-inl.h"
+#include "src/tracing/trace-event.h"
+#include "src/tracing/traced-value.h"
 #include "src/unicode.h"
 
 namespace v8 {
@@ -140,11 +142,8 @@
 }
 
 void CodeEntry::AddInlineStack(int pc_offset,
-                               std::vector<CodeEntry*>& inline_stack) {
-  // It's better to use std::move to place the vector into the map,
-  // but it's not supported by the current stdlibc++ on MacOS.
-  inline_locations_.insert(std::make_pair(pc_offset, std::vector<CodeEntry*>()))
-      .first->second.swap(inline_stack);
+                               std::vector<CodeEntry*> inline_stack) {
+  inline_locations_.insert(std::make_pair(pc_offset, std::move(inline_stack)));
 }
 
 const std::vector<CodeEntry*>* CodeEntry::GetInlineStack(int pc_offset) const {
@@ -153,12 +152,9 @@
 }
 
 void CodeEntry::AddDeoptInlinedFrames(
-    int deopt_id, std::vector<DeoptInlinedFrame>& inlined_frames) {
-  // It's better to use std::move to place the vector into the map,
-  // but it's not supported by the current stdlibc++ on MacOS.
-  deopt_inlined_frames_
-      .insert(std::make_pair(deopt_id, std::vector<DeoptInlinedFrame>()))
-      .first->second.swap(inlined_frames);
+    int deopt_id, std::vector<CpuProfileDeoptFrame> inlined_frames) {
+  deopt_inlined_frames_.insert(
+      std::make_pair(deopt_id, std::move(inlined_frames)));
 }
 
 bool CodeEntry::HasDeoptInlinedFramesFor(int deopt_id) const {
@@ -181,16 +177,9 @@
   DCHECK_NE(kNoDeoptimizationId, deopt_id_);
   if (deopt_inlined_frames_.find(deopt_id_) == deopt_inlined_frames_.end()) {
     info.stack.push_back(CpuProfileDeoptFrame(
-        {script_id_, position_ + deopt_position_.position()}));
+        {script_id_, static_cast<size_t>(std::max(0, position()))}));
   } else {
-    size_t deopt_position = deopt_position_.raw();
-    // Copy stack of inlined frames where the deopt happened.
-    std::vector<DeoptInlinedFrame>& frames = deopt_inlined_frames_[deopt_id_];
-    for (DeoptInlinedFrame& inlined_frame : base::Reversed(frames)) {
-      info.stack.push_back(CpuProfileDeoptFrame(
-          {inlined_frame.script_id, deopt_position + inlined_frame.position}));
-      deopt_position = 0;  // Done with innermost frame.
-    }
+    info.stack = deopt_inlined_frames_[deopt_id_];
   }
   return info;
 }
@@ -214,9 +203,8 @@
   base::HashMap::Entry* map_entry =
       children_.LookupOrInsert(entry, CodeEntryHash(entry));
   ProfileNode* node = reinterpret_cast<ProfileNode*>(map_entry->value);
-  if (node == NULL) {
-    // New node added.
-    node = new ProfileNode(tree_, entry);
+  if (!node) {
+    node = new ProfileNode(tree_, entry, this);
     map_entry->value = node;
     children_list_.Add(node);
   }
@@ -305,7 +293,7 @@
 ProfileTree::ProfileTree(Isolate* isolate)
     : root_entry_(CodeEventListener::FUNCTION_TAG, "(root)"),
       next_node_id_(1),
-      root_(new ProfileNode(this, &root_entry_)),
+      root_(new ProfileNode(this, &root_entry_, nullptr)),
       isolate_(isolate),
       next_function_id_(1),
       function_ids_(ProfileNode::CodeEntriesMatch) {}
@@ -397,13 +385,22 @@
   }
 }
 
+using v8::tracing::TracedValue;
+
 CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
                        bool record_samples)
     : title_(title),
       record_samples_(record_samples),
       start_time_(base::TimeTicks::HighResolutionNow()),
       top_down_(profiler->isolate()),
-      profiler_(profiler) {}
+      profiler_(profiler),
+      streaming_next_sample_(0) {
+  auto value = TracedValue::Create();
+  value->SetDouble("startTime",
+                   (start_time_ - base::TimeTicks()).InMicroseconds());
+  TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
+                              "Profile", this, "data", std::move(value));
+}
 
 void CpuProfile::AddPath(base::TimeTicks timestamp,
                          const std::vector<CodeEntry*>& path, int src_line,
@@ -414,10 +411,94 @@
     timestamps_.Add(timestamp);
     samples_.Add(top_frame_node);
   }
+  const int kSamplesFlushCount = 100;
+  const int kNodesFlushCount = 10;
+  if (samples_.length() - streaming_next_sample_ >= kSamplesFlushCount ||
+      top_down_.pending_nodes_count() >= kNodesFlushCount) {
+    StreamPendingTraceEvents();
+  }
 }
 
-void CpuProfile::CalculateTotalTicksAndSamplingRate() {
+namespace {
+
+void BuildNodeValue(const ProfileNode* node, TracedValue* value) {
+  const CodeEntry* entry = node->entry();
+  value->BeginDictionary("callFrame");
+  value->SetString("functionName", entry->name());
+  if (*entry->resource_name()) {
+    value->SetString("url", entry->resource_name());
+  }
+  value->SetInteger("scriptId", entry->script_id());
+  if (entry->line_number()) {
+    value->SetInteger("lineNumber", entry->line_number() - 1);
+  }
+  if (entry->column_number()) {
+    value->SetInteger("columnNumber", entry->column_number() - 1);
+  }
+  value->EndDictionary();
+  value->SetInteger("id", node->id());
+  if (node->parent()) {
+    value->SetInteger("parent", node->parent()->id());
+  }
+  const char* deopt_reason = entry->bailout_reason();
+  if (deopt_reason && deopt_reason[0] && strcmp(deopt_reason, "no reason")) {
+    value->SetString("deoptReason", deopt_reason);
+  }
+}
+
+}  // namespace
+
+void CpuProfile::StreamPendingTraceEvents() {
+  std::vector<const ProfileNode*> pending_nodes = top_down_.TakePendingNodes();
+  if (pending_nodes.empty() && !samples_.length()) return;
+  auto value = TracedValue::Create();
+
+  if (!pending_nodes.empty() || streaming_next_sample_ != samples_.length()) {
+    value->BeginDictionary("cpuProfile");
+    if (!pending_nodes.empty()) {
+      value->BeginArray("nodes");
+      for (auto node : pending_nodes) {
+        value->BeginDictionary();
+        BuildNodeValue(node, value.get());
+        value->EndDictionary();
+      }
+      value->EndArray();
+    }
+    if (streaming_next_sample_ != samples_.length()) {
+      value->BeginArray("samples");
+      for (int i = streaming_next_sample_; i < samples_.length(); ++i) {
+        value->AppendInteger(samples_[i]->id());
+      }
+      value->EndArray();
+    }
+    value->EndDictionary();
+  }
+  if (streaming_next_sample_ != samples_.length()) {
+    value->BeginArray("timeDeltas");
+    base::TimeTicks lastTimestamp =
+        streaming_next_sample_ ? timestamps_[streaming_next_sample_ - 1]
+                               : start_time();
+    for (int i = streaming_next_sample_; i < timestamps_.length(); ++i) {
+      value->AppendInteger(
+          static_cast<int>((timestamps_[i] - lastTimestamp).InMicroseconds()));
+      lastTimestamp = timestamps_[i];
+    }
+    value->EndArray();
+    DCHECK(samples_.length() == timestamps_.length());
+    streaming_next_sample_ = samples_.length();
+  }
+
+  TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
+                              "ProfileChunk", this, "data", std::move(value));
+}
+
+void CpuProfile::FinishProfile() {
   end_time_ = base::TimeTicks::HighResolutionNow();
+  StreamPendingTraceEvents();
+  auto value = TracedValue::Create();
+  value->SetDouble("endTime", (end_time_ - base::TimeTicks()).InMicroseconds());
+  TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
+                              "ProfileChunk", this, "data", std::move(value));
 }
 
 void CpuProfile::Print() {
@@ -504,7 +585,7 @@
 
 CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
   const int title_len = StrLength(title);
-  CpuProfile* profile = NULL;
+  CpuProfile* profile = nullptr;
   current_profiles_semaphore_.Wait();
   for (int i = current_profiles_.length() - 1; i >= 0; --i) {
     if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
@@ -514,8 +595,8 @@
   }
   current_profiles_semaphore_.Signal();
 
-  if (profile == NULL) return NULL;
-  profile->CalculateTotalTicksAndSamplingRate();
+  if (!profile) return nullptr;
+  profile->FinishProfile();
   finished_profiles_.Add(profile);
   return profile;
 }
@@ -554,8 +635,9 @@
   current_profiles_semaphore_.Signal();
 }
 
-ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
-    : profiles_(profiles) {}
+ProfileGenerator::ProfileGenerator(Isolate* isolate,
+                                   CpuProfilesCollection* profiles)
+    : isolate_(isolate), profiles_(profiles) {}
 
 void ProfileGenerator::RecordTickSample(const TickSample& sample) {
   std::vector<CodeEntry*> entries;
@@ -576,16 +658,14 @@
       // Don't use PC when in external callback code, as it can point
       // inside callback's code, and we will erroneously report
       // that a callback calls itself.
-      entries.push_back(code_map_.FindEntry(
-          reinterpret_cast<Address>(sample.external_callback_entry)));
+      entries.push_back(FindEntry(sample.external_callback_entry));
     } else {
-      CodeEntry* pc_entry =
-          code_map_.FindEntry(reinterpret_cast<Address>(sample.pc));
+      CodeEntry* pc_entry = FindEntry(sample.pc);
       // If there is no pc_entry we're likely in native code.
       // Find out, if top of stack was pointing inside a JS function
       // meaning that we have encountered a frameless invocation.
       if (!pc_entry && !sample.has_external_callback) {
-        pc_entry = code_map_.FindEntry(reinterpret_cast<Address>(sample.tos));
+        pc_entry = FindEntry(sample.tos);
       }
       // If pc is in the function code before it set up stack frame or after the
       // frame was destroyed SafeStackFrameIterator incorrectly thinks that
@@ -618,8 +698,7 @@
 
     for (unsigned i = 0; i < sample.frames_count; ++i) {
       Address stack_pos = reinterpret_cast<Address>(sample.stack[i]);
-      CodeEntry* entry = code_map_.FindEntry(stack_pos);
-
+      CodeEntry* entry = FindEntry(stack_pos);
       if (entry) {
         // Find out if the entry has an inlining stack associated.
         int pc_offset =
@@ -662,6 +741,22 @@
                                       sample.update_stats);
 }
 
+CodeEntry* ProfileGenerator::FindEntry(void* address) {
+  CodeEntry* entry = code_map_.FindEntry(reinterpret_cast<Address>(address));
+  if (!entry) {
+    RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
+    void* start = reinterpret_cast<void*>(rcs);
+    void* end = reinterpret_cast<void*>(rcs + 1);
+    if (start <= address && address < end) {
+      RuntimeCallCounter* counter =
+          reinterpret_cast<RuntimeCallCounter*>(address);
+      entry = new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name,
+                            CodeEntry::kEmptyNamePrefix, "native V8Runtime");
+      code_map_.AddCode(reinterpret_cast<Address>(address), entry, 1);
+    }
+  }
+  return entry;
+}
 
 CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
   switch (tag) {
diff --git a/src/profiler/profile-generator.h b/src/profiler/profile-generator.h
index 179d411..1b3cad6 100644
--- a/src/profiler/profile-generator.h
+++ b/src/profiler/profile-generator.h
@@ -49,13 +49,6 @@
                    Address instruction_start = NULL);
   ~CodeEntry();
 
-  // Container describing inlined frames at eager deopt points. Is eventually
-  // being translated into v8::CpuProfileDeoptFrame by the profiler.
-  struct DeoptInlinedFrame {
-    int position;
-    int script_id;
-  };
-
   const char* name_prefix() const { return name_prefix_; }
   bool has_name_prefix() const { return name_prefix_[0] != '\0'; }
   const char* name() const { return name_; }
@@ -72,18 +65,15 @@
   }
   const char* bailout_reason() const { return bailout_reason_; }
 
-  void set_deopt_info(const char* deopt_reason, SourcePosition position,
-                      int deopt_id) {
+  void set_deopt_info(const char* deopt_reason, int deopt_id) {
     DCHECK(!has_deopt_info());
     deopt_reason_ = deopt_reason;
-    deopt_position_ = position;
     deopt_id_ = deopt_id;
   }
   CpuProfileDeoptInfo GetDeoptInfo();
   bool has_deopt_info() const { return deopt_id_ != kNoDeoptimizationId; }
   void clear_deopt_info() {
     deopt_reason_ = kNoDeoptReason;
-    deopt_position_ = SourcePosition::Unknown();
     deopt_id_ = kNoDeoptimizationId;
   }
 
@@ -99,10 +89,10 @@
 
   int GetSourceLine(int pc_offset) const;
 
-  void AddInlineStack(int pc_offset, std::vector<CodeEntry*>& inline_stack);
+  void AddInlineStack(int pc_offset, std::vector<CodeEntry*> inline_stack);
   const std::vector<CodeEntry*>* GetInlineStack(int pc_offset) const;
 
-  void AddDeoptInlinedFrames(int deopt_id, std::vector<DeoptInlinedFrame>&);
+  void AddDeoptInlinedFrames(int deopt_id, std::vector<CpuProfileDeoptFrame>);
   bool HasDeoptInlinedFramesFor(int deopt_id) const;
 
   Address instruction_start() const { return instruction_start_; }
@@ -167,13 +157,12 @@
   int position_;
   const char* bailout_reason_;
   const char* deopt_reason_;
-  SourcePosition deopt_position_;
   int deopt_id_;
   JITLineInfoTable* line_info_;
   Address instruction_start_;
   // Should be an unordered_map, but it doesn't currently work on Win & MacOS.
   std::map<int, std::vector<CodeEntry*>> inline_locations_;
-  std::map<int, std::vector<DeoptInlinedFrame>> deopt_inlined_frames_;
+  std::map<int, std::vector<CpuProfileDeoptFrame>> deopt_inlined_frames_;
 
   DISALLOW_COPY_AND_ASSIGN(CodeEntry);
 };
@@ -183,7 +172,7 @@
 
 class ProfileNode {
  public:
-  inline ProfileNode(ProfileTree* tree, CodeEntry* entry);
+  inline ProfileNode(ProfileTree* tree, CodeEntry* entry, ProfileNode* parent);
 
   ProfileNode* FindChild(CodeEntry* entry);
   ProfileNode* FindOrAddChild(CodeEntry* entry);
@@ -196,6 +185,7 @@
   const List<ProfileNode*>* children() const { return &children_list_; }
   unsigned id() const { return id_; }
   unsigned function_id() const;
+  ProfileNode* parent() const { return parent_; }
   unsigned int GetHitLineCount() const { return line_ticks_.occupancy(); }
   bool GetLineTicks(v8::CpuProfileNode::LineTick* entries,
                     unsigned int length) const;
@@ -223,6 +213,7 @@
   // Mapping from CodeEntry* to ProfileNode*
   base::CustomMatcherHashMap children_;
   List<ProfileNode*> children_list_;
+  ProfileNode* parent_;
   unsigned id_;
   base::CustomMatcherHashMap line_ticks_;
 
@@ -251,10 +242,18 @@
 
   Isolate* isolate() const { return isolate_; }
 
+  void EnqueueNode(const ProfileNode* node) { pending_nodes_.push_back(node); }
+  size_t pending_nodes_count() const { return pending_nodes_.size(); }
+  std::vector<const ProfileNode*> TakePendingNodes() {
+    return std::move(pending_nodes_);
+  }
+
  private:
   template <typename Callback>
   void TraverseDepthFirst(Callback* callback);
 
+  std::vector<const ProfileNode*> pending_nodes_;
+
   CodeEntry root_entry_;
   unsigned next_node_id_;
   ProfileNode* root_;
@@ -274,7 +273,7 @@
   // Add pc -> ... -> main() call path to the profile.
   void AddPath(base::TimeTicks timestamp, const std::vector<CodeEntry*>& path,
                int src_line, bool update_stats);
-  void CalculateTotalTicksAndSamplingRate();
+  void FinishProfile();
 
   const char* title() const { return title_; }
   const ProfileTree* top_down() const { return &top_down_; }
@@ -294,6 +293,8 @@
   void Print();
 
  private:
+  void StreamPendingTraceEvents();
+
   const char* title_;
   bool record_samples_;
   base::TimeTicks start_time_;
@@ -302,6 +303,7 @@
   List<base::TimeTicks> timestamps_;
   ProfileTree top_down_;
   CpuProfiler* const profiler_;
+  int streaming_next_sample_;
 
   DISALLOW_COPY_AND_ASSIGN(CpuProfile);
 };
@@ -366,15 +368,17 @@
 
 class ProfileGenerator {
  public:
-  explicit ProfileGenerator(CpuProfilesCollection* profiles);
+  ProfileGenerator(Isolate* isolate, CpuProfilesCollection* profiles);
 
   void RecordTickSample(const TickSample& sample);
 
   CodeMap* code_map() { return &code_map_; }
 
  private:
+  CodeEntry* FindEntry(void* address);
   CodeEntry* EntryForVMState(StateTag tag);
 
+  Isolate* isolate_;
   CpuProfilesCollection* profiles_;
   CodeMap code_map_;
 
diff --git a/src/profiler/profiler-listener.cc b/src/profiler/profiler-listener.cc
index 4bceac2..640f967 100644
--- a/src/profiler/profiler-listener.cc
+++ b/src/profiler/profiler-listener.cc
@@ -90,18 +90,13 @@
     line_table = new JITLineInfoTable();
     int offset = abstract_code->IsCode() ? Code::kHeaderSize
                                          : BytecodeArray::kHeaderSize;
-    int start_position = shared->start_position();
-    int end_position = shared->end_position();
     for (SourcePositionTableIterator it(abstract_code->source_position_table());
          !it.done(); it.Advance()) {
-      int position = it.source_position();
-      // TODO(alph): in case of inlining the position may correspond to an
-      // inlined function source code. Do not collect positions that fall
-      // beyond the function source code. There's however a chance the
-      // inlined function has similar positions but in another script. So
-      // the proper fix is to store script_id in some form along with the
-      // inlined function positions.
-      if (position < start_position || position >= end_position) continue;
+      // TODO(alph,tebbi) Skipping inlined positions for now, because they might
+      // refer to a different script.
+      if (it.source_position().InliningId() != SourcePosition::kNotInlined)
+        continue;
+      int position = it.source_position().ScriptOffset();
       int line_number = script->GetLineNumber(position) + 1;
       int pc_offset = it.code_offset() + offset;
       line_table->SetPosition(pc_offset, line_number);
@@ -156,7 +151,6 @@
   Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc);
   rec->start = code->address();
   rec->deopt_reason = DeoptimizeReasonToString(info.deopt_reason);
-  rec->position = info.position;
   rec->deopt_id = info.deopt_id;
   rec->pc = reinterpret_cast<void*>(pc);
   rec->fp_to_sp_delta = fp_to_sp_delta;
@@ -245,8 +239,7 @@
       inline_stack.push_back(inline_entry);
     }
     if (!inline_stack.empty()) {
-      entry->AddInlineStack(pc_offset, inline_stack);
-      DCHECK(inline_stack.empty());
+      entry->AddInlineStack(pc_offset, std::move(inline_stack));
     }
   }
 }
@@ -254,55 +247,36 @@
 void ProfilerListener::RecordDeoptInlinedFrames(CodeEntry* entry,
                                                 AbstractCode* abstract_code) {
   if (abstract_code->kind() != AbstractCode::OPTIMIZED_FUNCTION) return;
-  Code* code = abstract_code->GetCode();
-  DeoptimizationInputData* deopt_input_data =
-      DeoptimizationInputData::cast(code->deoptimization_data());
-  int const mask = RelocInfo::ModeMask(RelocInfo::DEOPT_ID);
-  for (RelocIterator rit(code, mask); !rit.done(); rit.next()) {
-    RelocInfo* reloc_info = rit.rinfo();
-    DCHECK(RelocInfo::IsDeoptId(reloc_info->rmode()));
-    int deopt_id = static_cast<int>(reloc_info->data());
-    int translation_index =
-        deopt_input_data->TranslationIndex(deopt_id)->value();
-    TranslationIterator it(deopt_input_data->TranslationByteArray(),
-                           translation_index);
-    Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
-    DCHECK_EQ(Translation::BEGIN, opcode);
-    it.Skip(Translation::NumberOfOperandsFor(opcode));
-    std::vector<CodeEntry::DeoptInlinedFrame> inlined_frames;
-    while (it.HasNext() &&
-           Translation::BEGIN !=
-               (opcode = static_cast<Translation::Opcode>(it.Next()))) {
-      if (opcode != Translation::JS_FRAME &&
-          opcode != Translation::INTERPRETED_FRAME) {
-        it.Skip(Translation::NumberOfOperandsFor(opcode));
-        continue;
-      }
-      BailoutId ast_id = BailoutId(it.Next());
-      int shared_info_id = it.Next();
-      it.Next();  // Skip height
-      SharedFunctionInfo* shared = SharedFunctionInfo::cast(
-          deopt_input_data->LiteralArray()->get(shared_info_id));
-      int source_position;
-      if (opcode == Translation::INTERPRETED_FRAME) {
-        source_position =
-            Deoptimizer::ComputeSourcePositionFromBytecodeArray(shared, ast_id);
-      } else {
-        DCHECK(opcode == Translation::JS_FRAME);
-        source_position =
-            Deoptimizer::ComputeSourcePositionFromBaselineCode(shared, ast_id);
-      }
-      int script_id = v8::UnboundScript::kNoScriptId;
-      if (shared->script()->IsScript()) {
-        Script* script = Script::cast(shared->script());
-        script_id = script->id();
-      }
-      CodeEntry::DeoptInlinedFrame frame = {source_position, script_id};
-      inlined_frames.push_back(frame);
+  Handle<Code> code(abstract_code->GetCode());
+
+  SourcePosition last_position = SourcePosition::Unknown();
+  int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_ID) |
+             RelocInfo::ModeMask(RelocInfo::DEOPT_SCRIPT_OFFSET) |
+             RelocInfo::ModeMask(RelocInfo::DEOPT_INLINING_ID);
+  for (RelocIterator it(*code, mask); !it.done(); it.next()) {
+    RelocInfo* info = it.rinfo();
+    if (info->rmode() == RelocInfo::DEOPT_SCRIPT_OFFSET) {
+      int script_offset = static_cast<int>(info->data());
+      it.next();
+      DCHECK(it.rinfo()->rmode() == RelocInfo::DEOPT_INLINING_ID);
+      int inlining_id = static_cast<int>(it.rinfo()->data());
+      last_position = SourcePosition(script_offset, inlining_id);
+      continue;
     }
-    if (!inlined_frames.empty() && !entry->HasDeoptInlinedFramesFor(deopt_id)) {
-      entry->AddDeoptInlinedFrames(deopt_id, inlined_frames);
-      DCHECK(inlined_frames.empty());
+    if (info->rmode() == RelocInfo::DEOPT_ID) {
+      int deopt_id = static_cast<int>(info->data());
+      DCHECK(last_position.IsKnown());
+      std::vector<CpuProfileDeoptFrame> inlined_frames;
+      for (SourcePositionInfo& pos_info : last_position.InliningStack(code)) {
+        DCHECK(pos_info.position.ScriptOffset() != kNoSourcePosition);
+        size_t offset = static_cast<size_t>(pos_info.position.ScriptOffset());
+        int script_id = Script::cast(pos_info.function->script())->id();
+        inlined_frames.push_back(CpuProfileDeoptFrame({script_id, offset}));
+      }
+      if (!inlined_frames.empty() &&
+          !entry->HasDeoptInlinedFramesFor(deopt_id)) {
+        entry->AddDeoptInlinedFrames(deopt_id, std::move(inlined_frames));
+      }
     }
   }
 }
diff --git a/src/profiler/sampling-heap-profiler.cc b/src/profiler/sampling-heap-profiler.cc
index 3b2ca63..f2a3d4a 100644
--- a/src/profiler/sampling-heap-profiler.cc
+++ b/src/profiler/sampling-heap-profiler.cc
@@ -65,7 +65,7 @@
       stack_depth_(stack_depth),
       rate_(rate),
       flags_(flags) {
-  CHECK_GT(rate_, 0);
+  CHECK_GT(rate_, 0u);
   heap->new_space()->AddAllocationObserver(new_space_observer_.get());
   AllSpaces spaces(heap);
   for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
diff --git a/src/profiler/tick-sample.cc b/src/profiler/tick-sample.cc
index ecb2bf4..e1c84c4 100644
--- a/src/profiler/tick-sample.cc
+++ b/src/profiler/tick-sample.cc
@@ -5,6 +5,7 @@
 #include "src/profiler/tick-sample.h"
 
 #include "include/v8-profiler.h"
+#include "src/counters.h"
 #include "src/frames-inl.h"
 #include "src/msan.h"
 #include "src/simulator.h"
@@ -237,7 +238,15 @@
        it.top_frame_type() == internal::StackFrame::BUILTIN_EXIT)) {
     frames[i++] = isolate->c_function();
   }
+  i::RuntimeCallTimer* timer =
+      isolate->counters()->runtime_call_stats()->current_timer();
   for (; !it.done() && i < frames_limit; it.Advance()) {
+    while (timer && reinterpret_cast<i::Address>(timer) < it.frame()->fp() &&
+           i < frames_limit) {
+      frames[i++] = reinterpret_cast<i::Address>(timer->counter());
+      timer = timer->parent();
+    }
+    if (i == frames_limit) break;
     if (!it.frame()->is_interpreted()) {
       frames[i++] = it.frame()->pc();
       continue;
diff --git a/src/profiler/tracing-cpu-profiler.cc b/src/profiler/tracing-cpu-profiler.cc
index b24ca2f..8b31225 100644
--- a/src/profiler/tracing-cpu-profiler.cc
+++ b/src/profiler/tracing-cpu-profiler.cc
@@ -4,8 +4,13 @@
 
 #include "src/profiler/tracing-cpu-profiler.h"
 
+#include "src/profiler/cpu-profiler.h"
+#include "src/tracing/trace-event.h"
 #include "src/v8.h"
 
+#define PROFILER_TRACE_CATEGORY_ENABLED(cat) \
+  (*TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT(cat)))
+
 namespace v8 {
 
 std::unique_ptr<TracingCpuProfiler> TracingCpuProfiler::Create(
@@ -17,9 +22,57 @@
 
 namespace internal {
 
-TracingCpuProfilerImpl::TracingCpuProfilerImpl(Isolate* isolate) {}
+TracingCpuProfilerImpl::TracingCpuProfilerImpl(Isolate* isolate)
+    : isolate_(isolate), profiling_enabled_(false) {
+  // Make sure tracing system notices profiler categories.
+  PROFILER_TRACE_CATEGORY_ENABLED("v8.cpu_profiler");
+  PROFILER_TRACE_CATEGORY_ENABLED("v8.cpu_profiler.hires");
+  V8::GetCurrentPlatform()->AddTraceStateObserver(this);
+}
 
-TracingCpuProfilerImpl::~TracingCpuProfilerImpl() {}
+TracingCpuProfilerImpl::~TracingCpuProfilerImpl() {
+  StopProfiling();
+  V8::GetCurrentPlatform()->RemoveTraceStateObserver(this);
+}
+
+void TracingCpuProfilerImpl::OnTraceEnabled() {
+  if (!PROFILER_TRACE_CATEGORY_ENABLED("v8.cpu_profiler")) return;
+  profiling_enabled_ = true;
+  isolate_->RequestInterrupt(
+      [](v8::Isolate*, void* data) {
+        reinterpret_cast<TracingCpuProfilerImpl*>(data)->StartProfiling();
+      },
+      this);
+}
+
+void TracingCpuProfilerImpl::OnTraceDisabled() {
+  base::LockGuard<base::Mutex> lock(&mutex_);
+  if (!profiling_enabled_) return;
+  profiling_enabled_ = false;
+  isolate_->RequestInterrupt(
+      [](v8::Isolate*, void* data) {
+        reinterpret_cast<TracingCpuProfilerImpl*>(data)->StopProfiling();
+      },
+      this);
+}
+
+void TracingCpuProfilerImpl::StartProfiling() {
+  base::LockGuard<base::Mutex> lock(&mutex_);
+  if (!profiling_enabled_ || profiler_) return;
+  int sampling_interval_us =
+      PROFILER_TRACE_CATEGORY_ENABLED("v8.cpu_profiler.hires") ? 100 : 1000;
+  profiler_.reset(new CpuProfiler(isolate_));
+  profiler_->set_sampling_interval(
+      base::TimeDelta::FromMicroseconds(sampling_interval_us));
+  profiler_->StartProfiling("", true);
+}
+
+void TracingCpuProfilerImpl::StopProfiling() {
+  base::LockGuard<base::Mutex> lock(&mutex_);
+  if (!profiler_) return;
+  profiler_->StopProfiling("");
+  profiler_.reset();
+}
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/profiler/tracing-cpu-profiler.h b/src/profiler/tracing-cpu-profiler.h
index 80f1bdc..a512a94 100644
--- a/src/profiler/tracing-cpu-profiler.h
+++ b/src/profiler/tracing-cpu-profiler.h
@@ -5,18 +5,37 @@
 #ifndef V8_PROFILER_TRACING_CPU_PROFILER_H
 #define V8_PROFILER_TRACING_CPU_PROFILER_H
 
+#include "include/v8-platform.h"
 #include "include/v8-profiler.h"
+#include "src/base/atomic-utils.h"
 #include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
 
 namespace v8 {
 namespace internal {
 
-class TracingCpuProfilerImpl final : public TracingCpuProfiler {
+class CpuProfiler;
+class Isolate;
+
+class TracingCpuProfilerImpl final : public TracingCpuProfiler,
+                                     private v8::Platform::TraceStateObserver {
  public:
   explicit TracingCpuProfilerImpl(Isolate*);
   ~TracingCpuProfilerImpl();
 
+  // v8::Platform::TraceStateObserver
+  void OnTraceEnabled() final;
+  void OnTraceDisabled() final;
+
  private:
+  void StartProfiling();
+  void StopProfiling();
+
+  Isolate* isolate_;
+  std::unique_ptr<CpuProfiler> profiler_;
+  bool profiling_enabled_;
+  base::Mutex mutex_;
+
   DISALLOW_COPY_AND_ASSIGN(TracingCpuProfilerImpl);
 };
 
diff --git a/src/promise-utils.cc b/src/promise-utils.cc
new file mode 100644
index 0000000..607dbe8
--- /dev/null
+++ b/src/promise-utils.cc
@@ -0,0 +1,75 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/promise-utils.h"
+
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+enum PromiseResolvingFunctionContextSlot {
+  kAlreadyVisitedSlot = Context::MIN_CONTEXT_SLOTS,
+  kPromiseSlot,
+  kDebugEventSlot,
+  kPromiseContextLength,
+};
+
+JSObject* PromiseUtils::GetPromise(Handle<Context> context) {
+  return JSObject::cast(context->get(kPromiseSlot));
+}
+
+Object* PromiseUtils::GetDebugEvent(Handle<Context> context) {
+  return context->get(kDebugEventSlot);
+}
+
+bool PromiseUtils::HasAlreadyVisited(Handle<Context> context) {
+  return Smi::cast(context->get(kAlreadyVisitedSlot))->value() != 0;
+}
+
+void PromiseUtils::SetAlreadyVisited(Handle<Context> context) {
+  context->set(kAlreadyVisitedSlot, Smi::FromInt(1));
+}
+
+void PromiseUtils::CreateResolvingFunctions(Isolate* isolate,
+                                            Handle<JSObject> promise,
+                                            Handle<Object> debug_event,
+                                            Handle<JSFunction>* resolve,
+                                            Handle<JSFunction>* reject) {
+  DCHECK(debug_event->IsTrue(isolate) || debug_event->IsFalse(isolate));
+  Handle<Context> context =
+      isolate->factory()->NewPromiseResolvingFunctionContext(
+          kPromiseContextLength);
+  context->set_native_context(*isolate->native_context());
+  // We set the closure to be an empty function, same as native context.
+  context->set_closure(isolate->native_context()->closure());
+  context->set(kAlreadyVisitedSlot, Smi::kZero);
+  context->set(kPromiseSlot, *promise);
+  context->set(kDebugEventSlot, *debug_event);
+
+  Handle<SharedFunctionInfo> resolve_shared_fun(
+      isolate->native_context()->promise_resolve_shared_fun(), isolate);
+  Handle<JSFunction> resolve_fun =
+      isolate->factory()->NewFunctionFromSharedFunctionInfo(
+          isolate->sloppy_function_without_prototype_map(), resolve_shared_fun,
+          isolate->native_context(), TENURED);
+
+  Handle<SharedFunctionInfo> reject_shared_fun(
+      isolate->native_context()->promise_reject_shared_fun(), isolate);
+  Handle<JSFunction> reject_fun =
+      isolate->factory()->NewFunctionFromSharedFunctionInfo(
+          isolate->sloppy_function_without_prototype_map(), reject_shared_fun,
+          isolate->native_context(), TENURED);
+
+  resolve_fun->set_context(*context);
+  reject_fun->set_context(*context);
+
+  *resolve = resolve_fun;
+  *reject = reject_fun;
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/promise-utils.h b/src/promise-utils.h
new file mode 100644
index 0000000..6ed6fcd
--- /dev/null
+++ b/src/promise-utils.h
@@ -0,0 +1,32 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROMISE_UTILS_H_
+#define V8_PROMISE_UTILS_H_
+
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// Helper methods for Promise builtins.
+class PromiseUtils : public AllStatic {
+ public:
+  // These get and set the slots on the PromiseResolvingContext, which
+  // is used by the resolve/reject promise callbacks.
+  static JSObject* GetPromise(Handle<Context> context);
+  static Object* GetDebugEvent(Handle<Context> context);
+  static bool HasAlreadyVisited(Handle<Context> context);
+  static void SetAlreadyVisited(Handle<Context> context);
+
+  static void CreateResolvingFunctions(Isolate* isolate,
+                                       Handle<JSObject> promise,
+                                       Handle<Object> debug_event,
+                                       Handle<JSFunction>* resolve,
+                                       Handle<JSFunction>* reject);
+};
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_PROMISE_UTILS_H_
diff --git a/src/property.h b/src/property.h
index ebe7d3b..233233c 100644
--- a/src/property.h
+++ b/src/property.h
@@ -32,7 +32,7 @@
   PropertyDetails details_;
 
  protected:
-  Descriptor() : details_(Smi::FromInt(0)) {}
+  Descriptor() : details_(Smi::kZero) {}
 
   void Init(Handle<Name> key, Handle<Object> value, PropertyDetails details) {
     DCHECK(key->IsUniqueName());
diff --git a/src/prototype.h b/src/prototype.h
index 032d9b6..38d6cab 100644
--- a/src/prototype.h
+++ b/src/prototype.h
@@ -32,9 +32,9 @@
   PrototypeIterator(Isolate* isolate, Handle<JSReceiver> receiver,
                     WhereToStart where_to_start = kStartAtPrototype,
                     WhereToEnd where_to_end = END_AT_NULL)
-      : object_(NULL),
+      : isolate_(isolate),
+        object_(NULL),
         handle_(receiver),
-        isolate_(isolate),
         where_to_end_(where_to_end),
         is_at_end_(false),
         seen_proxies_(0) {
@@ -45,28 +45,43 @@
   PrototypeIterator(Isolate* isolate, JSReceiver* receiver,
                     WhereToStart where_to_start = kStartAtPrototype,
                     WhereToEnd where_to_end = END_AT_NULL)
-      : object_(receiver),
-        isolate_(isolate),
+      : isolate_(isolate),
+        object_(receiver),
         where_to_end_(where_to_end),
         is_at_end_(false),
         seen_proxies_(0) {
     if (where_to_start == kStartAtPrototype) Advance();
   }
 
-  explicit PrototypeIterator(Map* receiver_map)
-      : object_(receiver_map->prototype()),
-        isolate_(receiver_map->GetIsolate()),
-        where_to_end_(END_AT_NULL),
+  explicit PrototypeIterator(Map* receiver_map,
+                             WhereToEnd where_to_end = END_AT_NULL)
+      : isolate_(receiver_map->GetIsolate()),
+        object_(receiver_map->GetPrototypeChainRootMap(isolate_)->prototype()),
+        where_to_end_(where_to_end),
         is_at_end_(object_->IsNull(isolate_)),
-        seen_proxies_(0) {}
+        seen_proxies_(0) {
+    if (!is_at_end_ && where_to_end_ == END_AT_NON_HIDDEN) {
+      DCHECK(object_->IsJSReceiver());
+      Map* map = JSReceiver::cast(object_)->map();
+      is_at_end_ = !map->has_hidden_prototype();
+    }
+  }
 
-  explicit PrototypeIterator(Handle<Map> receiver_map)
-      : object_(NULL),
-        handle_(handle(receiver_map->prototype(), receiver_map->GetIsolate())),
-        isolate_(receiver_map->GetIsolate()),
-        where_to_end_(END_AT_NULL),
+  explicit PrototypeIterator(Handle<Map> receiver_map,
+                             WhereToEnd where_to_end = END_AT_NULL)
+      : isolate_(receiver_map->GetIsolate()),
+        object_(NULL),
+        handle_(receiver_map->GetPrototypeChainRootMap(isolate_)->prototype(),
+                isolate_),
+        where_to_end_(where_to_end),
         is_at_end_(handle_->IsNull(isolate_)),
-        seen_proxies_(0) {}
+        seen_proxies_(0) {
+    if (!is_at_end_ && where_to_end_ == END_AT_NON_HIDDEN) {
+      DCHECK(handle_->IsJSReceiver());
+      Map* map = JSReceiver::cast(*handle_)->map();
+      is_at_end_ = !map->has_hidden_prototype();
+    }
+  }
 
   ~PrototypeIterator() {}
 
@@ -161,9 +176,9 @@
   bool IsAtEnd() const { return is_at_end_; }
 
  private:
+  Isolate* isolate_;
   Object* object_;
   Handle<Object> handle_;
-  Isolate* isolate_;
   WhereToEnd where_to_end_;
   bool is_at_end_;
   int seen_proxies_;
diff --git a/src/regexp/OWNERS b/src/regexp/OWNERS
index d9d588d..c493afa 100644
--- a/src/regexp/OWNERS
+++ b/src/regexp/OWNERS
@@ -1,6 +1,4 @@
 set noparent
 
-jochen@chromium.org
-marja@chromium.org
-ulan@chromium.org
+jgruber@chromium.org
 yangguo@chromium.org
diff --git a/src/regexp/jsregexp.cc b/src/regexp/jsregexp.cc
index 96a778c..f0abc9a 100644
--- a/src/regexp/jsregexp.cc
+++ b/src/regexp/jsregexp.cc
@@ -136,16 +136,12 @@
                                         Handle<String> pattern,
                                         JSRegExp::Flags flags) {
   Isolate* isolate = re->GetIsolate();
-  Zone zone(isolate->allocator());
+  Zone zone(isolate->allocator(), ZONE_NAME);
   CompilationCache* compilation_cache = isolate->compilation_cache();
   MaybeHandle<FixedArray> maybe_cached =
       compilation_cache->LookupRegExp(pattern, flags);
   Handle<FixedArray> cached;
-  bool in_cache = maybe_cached.ToHandle(&cached);
-  LOG(isolate, RegExpCompileEvent(re, in_cache));
-
-  Handle<Object> result;
-  if (in_cache) {
+  if (maybe_cached.ToHandle(&cached)) {
     re->set_data(*cached);
     return re;
   }
@@ -194,7 +190,7 @@
 
 MaybeHandle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp,
                                      Handle<String> subject, int index,
-                                     Handle<JSObject> last_match_info) {
+                                     Handle<RegExpMatchInfo> last_match_info) {
   switch (regexp->TypeTag()) {
     case JSRegExp::ATOM:
       return AtomExec(regexp, subject, index, last_match_info);
@@ -222,17 +218,14 @@
                                                  match_pattern);
 }
 
-
-static void SetAtomLastCapture(FixedArray* array,
-                               String* subject,
-                               int from,
-                               int to) {
-  SealHandleScope shs(array->GetIsolate());
-  RegExpImpl::SetLastCaptureCount(array, 2);
-  RegExpImpl::SetLastSubject(array, subject);
-  RegExpImpl::SetLastInput(array, subject);
-  RegExpImpl::SetCapture(array, 0, from);
-  RegExpImpl::SetCapture(array, 1, to);
+static void SetAtomLastCapture(Handle<RegExpMatchInfo> last_match_info,
+                               String* subject, int from, int to) {
+  SealHandleScope shs(last_match_info->GetIsolate());
+  last_match_info->SetNumberOfCaptureRegisters(2);
+  last_match_info->SetLastSubject(subject);
+  last_match_info->SetLastInput(subject);
+  last_match_info->SetCapture(0, from);
+  last_match_info->SetCapture(1, to);
 }
 
 
@@ -289,7 +282,7 @@
 
 Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re, Handle<String> subject,
                                     int index,
-                                    Handle<JSObject> last_match_info) {
+                                    Handle<RegExpMatchInfo> last_match_info) {
   Isolate* isolate = re->GetIsolate();
 
   static const int kNumRegisters = 2;
@@ -302,8 +295,8 @@
 
   DCHECK_EQ(res, RegExpImpl::RE_SUCCESS);
   SealHandleScope shs(isolate);
-  FixedArray* array = FixedArray::cast(last_match_info->elements());
-  SetAtomLastCapture(array, *subject, output_registers[0], output_registers[1]);
+  SetAtomLastCapture(last_match_info, *subject, output_registers[0],
+                     output_registers[1]);
   return last_match_info;
 }
 
@@ -343,7 +336,7 @@
                                  bool is_one_byte) {
   // Compile the RegExp.
   Isolate* isolate = re->GetIsolate();
-  Zone zone(isolate->allocator());
+  Zone zone(isolate->allocator(), ZONE_NAME);
   PostponeInterruptsScope postpone(isolate);
   // If we had a compilation error the last time this is saved at the
   // saved code index.
@@ -417,7 +410,7 @@
 void RegExpImpl::SetIrregexpCaptureNameMap(FixedArray* re,
                                            Handle<FixedArray> value) {
   if (value.is_null()) {
-    re->set(JSRegExp::kIrregexpCaptureNameMapIndex, Smi::FromInt(0));
+    re->set(JSRegExp::kIrregexpCaptureNameMapIndex, Smi::kZero);
   } else {
     re->set(JSRegExp::kIrregexpCaptureNameMapIndex, *value);
   }
@@ -566,10 +559,9 @@
 #endif  // V8_INTERPRETED_REGEXP
 }
 
-MaybeHandle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp,
-                                             Handle<String> subject,
-                                             int previous_index,
-                                             Handle<JSObject> last_match_info) {
+MaybeHandle<Object> RegExpImpl::IrregexpExec(
+    Handle<JSRegExp> regexp, Handle<String> subject, int previous_index,
+    Handle<RegExpMatchInfo> last_match_info) {
   Isolate* isolate = regexp->GetIsolate();
   DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
 
@@ -613,31 +605,40 @@
   return isolate->factory()->null_value();
 }
 
-static void EnsureSize(Handle<JSObject> array, uint32_t minimum_size) {
-  if (static_cast<uint32_t>(array->elements()->length()) < minimum_size) {
-    array->GetElementsAccessor()->GrowCapacityAndConvert(array, minimum_size);
-  }
-}
+Handle<RegExpMatchInfo> RegExpImpl::SetLastMatchInfo(
+    Handle<RegExpMatchInfo> last_match_info, Handle<String> subject,
+    int capture_count, int32_t* match) {
+  // This is the only place where match infos can grow. If, after executing the
+  // regexp, RegExpExecStub finds that the match info is too small, it restarts
+  // execution in RegExpImpl::Exec, which finally grows the match info right
+  // here.
 
-Handle<JSObject> RegExpImpl::SetLastMatchInfo(Handle<JSObject> last_match_info,
-                                              Handle<String> subject,
-                                              int capture_count,
-                                              int32_t* match) {
-  DCHECK(last_match_info->HasFastObjectElements());
   int capture_register_count = (capture_count + 1) * 2;
-  EnsureSize(last_match_info, capture_register_count + kLastMatchOverhead);
-  DisallowHeapAllocation no_allocation;
-  FixedArray* array = FixedArray::cast(last_match_info->elements());
-  if (match != NULL) {
-    for (int i = 0; i < capture_register_count; i += 2) {
-      SetCapture(array, i, match[i]);
-      SetCapture(array, i + 1, match[i + 1]);
+  Handle<RegExpMatchInfo> result =
+      RegExpMatchInfo::ReserveCaptures(last_match_info, capture_register_count);
+  result->SetNumberOfCaptureRegisters(capture_register_count);
+
+  if (*result != *last_match_info) {
+    // The match info has been reallocated, update the corresponding reference
+    // on the native context.
+    Isolate* isolate = last_match_info->GetIsolate();
+    if (*last_match_info == *isolate->regexp_last_match_info()) {
+      isolate->native_context()->set_regexp_last_match_info(*result);
+    } else if (*last_match_info == *isolate->regexp_internal_match_info()) {
+      isolate->native_context()->set_regexp_internal_match_info(*result);
     }
   }
-  SetLastCaptureCount(array, capture_register_count);
-  SetLastSubject(array, *subject);
-  SetLastInput(array, *subject);
-  return last_match_info;
+
+  DisallowHeapAllocation no_allocation;
+  if (match != NULL) {
+    for (int i = 0; i < capture_register_count; i += 2) {
+      result->SetCapture(i, match[i]);
+      result->SetCapture(i + 1, match[i + 1]);
+    }
+  }
+  result->SetLastSubject(*subject);
+  result->SetLastInput(*subject);
+  return result;
 }
 
 
@@ -6781,10 +6782,10 @@
                                    FixedArray** last_match_cache,
                                    ResultsCacheType type) {
   FixedArray* cache;
-  if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
+  if (!key_string->IsInternalizedString()) return Smi::kZero;
   if (type == STRING_SPLIT_SUBSTRINGS) {
     DCHECK(key_pattern->IsString());
-    if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
+    if (!key_pattern->IsInternalizedString()) return Smi::kZero;
     cache = heap->string_split_cache();
   } else {
     DCHECK(type == REGEXP_MULTIPLE_INDICES);
@@ -6801,7 +6802,7 @@
         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
     if (cache->get(index + kStringOffset) != key_string ||
         cache->get(index + kPatternOffset) != key_pattern) {
-      return Smi::FromInt(0);
+      return Smi::kZero;
     }
   }
 
@@ -6831,7 +6832,7 @@
   uint32_t hash = key_string->Hash();
   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
                     ~(kArrayEntriesPerCacheEntry - 1));
-  if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
+  if (cache->get(index + kStringOffset) == Smi::kZero) {
     cache->set(index + kStringOffset, *key_string);
     cache->set(index + kPatternOffset, *key_pattern);
     cache->set(index + kArrayOffset, *value_array);
@@ -6839,16 +6840,16 @@
   } else {
     uint32_t index2 =
         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
-    if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
+    if (cache->get(index2 + kStringOffset) == Smi::kZero) {
       cache->set(index2 + kStringOffset, *key_string);
       cache->set(index2 + kPatternOffset, *key_pattern);
       cache->set(index2 + kArrayOffset, *value_array);
       cache->set(index2 + kLastMatchOffset, *last_match_cache);
     } else {
-      cache->set(index2 + kStringOffset, Smi::FromInt(0));
-      cache->set(index2 + kPatternOffset, Smi::FromInt(0));
-      cache->set(index2 + kArrayOffset, Smi::FromInt(0));
-      cache->set(index2 + kLastMatchOffset, Smi::FromInt(0));
+      cache->set(index2 + kStringOffset, Smi::kZero);
+      cache->set(index2 + kPatternOffset, Smi::kZero);
+      cache->set(index2 + kArrayOffset, Smi::kZero);
+      cache->set(index2 + kLastMatchOffset, Smi::kZero);
       cache->set(index + kStringOffset, *key_string);
       cache->set(index + kPatternOffset, *key_pattern);
       cache->set(index + kArrayOffset, *value_array);
@@ -6865,13 +6866,13 @@
     }
   }
   // Convert backing store to a copy-on-write array.
-  value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
+  value_array->set_map_no_write_barrier(isolate->heap()->fixed_cow_array_map());
 }
 
 
 void RegExpResultsCache::Clear(FixedArray* cache) {
   for (int i = 0; i < kRegExpResultsCacheSize; i++) {
-    cache->set(i, Smi::FromInt(0));
+    cache->set(i, Smi::kZero);
   }
 }
 
diff --git a/src/regexp/jsregexp.h b/src/regexp/jsregexp.h
index 8118889..b2e84ba 100644
--- a/src/regexp/jsregexp.h
+++ b/src/regexp/jsregexp.h
@@ -48,7 +48,7 @@
   // This function calls the garbage collector if necessary.
   V8_EXPORT_PRIVATE MUST_USE_RESULT static MaybeHandle<Object> Exec(
       Handle<JSRegExp> regexp, Handle<String> subject, int index,
-      Handle<JSObject> lastMatchInfo);
+      Handle<RegExpMatchInfo> last_match_info);
 
   // Prepares a JSRegExp object with Irregexp-specific data.
   static void IrregexpInitialize(Handle<JSRegExp> re,
@@ -71,7 +71,7 @@
 
   static Handle<Object> AtomExec(Handle<JSRegExp> regexp,
                                  Handle<String> subject, int index,
-                                 Handle<JSObject> lastMatchInfo);
+                                 Handle<RegExpMatchInfo> last_match_info);
 
   enum IrregexpResult { RE_FAILURE = 0, RE_SUCCESS = 1, RE_EXCEPTION = -1 };
 
@@ -103,12 +103,12 @@
   // Returns an empty handle in case of an exception.
   MUST_USE_RESULT static MaybeHandle<Object> IrregexpExec(
       Handle<JSRegExp> regexp, Handle<String> subject, int index,
-      Handle<JSObject> lastMatchInfo);
+      Handle<RegExpMatchInfo> last_match_info);
 
   // Set last match info.  If match is NULL, then setting captures is omitted.
-  static Handle<JSObject> SetLastMatchInfo(Handle<JSObject> last_match_info,
-                                           Handle<String> subject,
-                                           int capture_count, int32_t* match);
+  static Handle<RegExpMatchInfo> SetLastMatchInfo(
+      Handle<RegExpMatchInfo> last_match_info, Handle<String> subject,
+      int capture_count, int32_t* match);
 
   class GlobalCache {
    public:
@@ -142,49 +142,6 @@
     Handle<String> subject_;
   };
 
-
-  // Array index in the lastMatchInfo array.
-  static const int kLastCaptureCount = 0;
-  static const int kLastSubject = 1;
-  static const int kLastInput = 2;
-  static const int kFirstCapture = 3;
-  static const int kLastMatchOverhead = 3;
-
-  // Direct offset into the lastMatchInfo array.
-  static const int kLastCaptureCountOffset =
-      FixedArray::kHeaderSize + kLastCaptureCount * kPointerSize;
-  static const int kLastSubjectOffset =
-      FixedArray::kHeaderSize + kLastSubject * kPointerSize;
-  static const int kLastInputOffset =
-      FixedArray::kHeaderSize + kLastInput * kPointerSize;
-  static const int kFirstCaptureOffset =
-      FixedArray::kHeaderSize + kFirstCapture * kPointerSize;
-
-  // Used to access the lastMatchInfo array.
-  static int GetCapture(FixedArray* array, int index) {
-    return Smi::cast(array->get(index + kFirstCapture))->value();
-  }
-
-  static void SetLastCaptureCount(FixedArray* array, int to) {
-    array->set(kLastCaptureCount, Smi::FromInt(to));
-  }
-
-  static void SetLastSubject(FixedArray* array, String* to) {
-    array->set(kLastSubject, to);
-  }
-
-  static void SetLastInput(FixedArray* array, String* to) {
-    array->set(kLastInput, to);
-  }
-
-  static void SetCapture(FixedArray* array, int index, int to) {
-    array->set(index + kFirstCapture, Smi::FromInt(to));
-  }
-
-  static int GetLastCaptureCount(FixedArray* array) {
-    return Smi::cast(array->get(kLastCaptureCount))->value();
-  }
-
   // For acting on the JSRegExp data FixedArray.
   static int IrregexpMaxRegisterCount(FixedArray* re);
   static void SetIrregexpMaxRegisterCount(FixedArray* re, int value);
diff --git a/src/regexp/regexp-macro-assembler-tracer.cc b/src/regexp/regexp-macro-assembler-tracer.cc
index ec86526..abdf577 100644
--- a/src/regexp/regexp-macro-assembler-tracer.cc
+++ b/src/regexp/regexp-macro-assembler-tracer.cc
@@ -12,9 +12,9 @@
 RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
     Isolate* isolate, RegExpMacroAssembler* assembler)
     : RegExpMacroAssembler(isolate, assembler->zone()), assembler_(assembler) {
-  unsigned int type = assembler->Implementation();
-  DCHECK(type < 8);
-  const char* impl_names[] = {"IA32", "ARM", "ARM64", "MIPS",
+  IrregexpImplementation type = assembler->Implementation();
+  DCHECK_LT(type, 9);
+  const char* impl_names[] = {"IA32", "ARM", "ARM64", "MIPS",    "S390",
                               "PPC",  "X64", "X87",   "Bytecode"};
   PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
 }
diff --git a/src/regexp/regexp-parser.cc b/src/regexp/regexp-parser.cc
index dba81ae..fd3123f 100644
--- a/src/regexp/regexp-parser.cc
+++ b/src/regexp/regexp-parser.cc
@@ -1607,7 +1607,7 @@
 
 
 void RegExpBuilder::AddUnicodeCharacter(uc32 c) {
-  if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+  if (c > static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
     DCHECK(unicode());
     AddLeadSurrogate(unibrow::Utf16::LeadSurrogate(c));
     AddTrailSurrogate(unibrow::Utf16::TrailSurrogate(c));
diff --git a/src/regexp/regexp-utils.cc b/src/regexp/regexp-utils.cc
new file mode 100644
index 0000000..62daf3f
--- /dev/null
+++ b/src/regexp/regexp-utils.cc
@@ -0,0 +1,192 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/regexp/regexp-utils.h"
+
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/regexp/jsregexp.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<String> RegExpUtils::GenericCaptureGetter(
+    Isolate* isolate, Handle<RegExpMatchInfo> match_info, int capture,
+    bool* ok) {
+  const int index = capture * 2;
+  if (index >= match_info->NumberOfCaptureRegisters()) {
+    if (ok != nullptr) *ok = false;
+    return isolate->factory()->empty_string();
+  }
+
+  const int match_start = match_info->Capture(index);
+  const int match_end = match_info->Capture(index + 1);
+  if (match_start == -1 || match_end == -1) {
+    if (ok != nullptr) *ok = false;
+    return isolate->factory()->empty_string();
+  }
+
+  if (ok != nullptr) *ok = true;
+  Handle<String> last_subject(match_info->LastSubject());
+  return isolate->factory()->NewSubString(last_subject, match_start, match_end);
+}
+
+namespace {
+
+V8_INLINE bool HasInitialRegExpMap(Isolate* isolate, Handle<JSReceiver> recv) {
+  return recv->map() == isolate->regexp_function()->initial_map();
+}
+
+}  // namespace
+
+MaybeHandle<Object> RegExpUtils::SetLastIndex(Isolate* isolate,
+                                              Handle<JSReceiver> recv,
+                                              int value) {
+  if (HasInitialRegExpMap(isolate, recv)) {
+    JSRegExp::cast(*recv)->SetLastIndex(value);
+    return recv;
+  } else {
+    return Object::SetProperty(recv, isolate->factory()->lastIndex_string(),
+                               handle(Smi::FromInt(value), isolate), STRICT);
+  }
+}
+
+MaybeHandle<Object> RegExpUtils::GetLastIndex(Isolate* isolate,
+                                              Handle<JSReceiver> recv) {
+  if (HasInitialRegExpMap(isolate, recv)) {
+    return handle(JSRegExp::cast(*recv)->LastIndex(), isolate);
+  } else {
+    return Object::GetProperty(recv, isolate->factory()->lastIndex_string());
+  }
+}
+
+// ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
+// Also takes an optional exec method in case our caller
+// has already fetched exec.
+MaybeHandle<Object> RegExpUtils::RegExpExec(Isolate* isolate,
+                                            Handle<JSReceiver> regexp,
+                                            Handle<String> string,
+                                            Handle<Object> exec) {
+  if (exec->IsUndefined(isolate)) {
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, exec,
+        Object::GetProperty(regexp, isolate->factory()->exec_string()), Object);
+  }
+
+  if (exec->IsCallable()) {
+    const int argc = 1;
+    ScopedVector<Handle<Object>> argv(argc);
+    argv[0] = string;
+
+    Handle<Object> result;
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, result,
+        Execution::Call(isolate, exec, regexp, argc, argv.start()), Object);
+
+    if (!result->IsJSReceiver() && !result->IsNull(isolate)) {
+      THROW_NEW_ERROR(isolate,
+                      NewTypeError(MessageTemplate::kInvalidRegExpExecResult),
+                      Object);
+    }
+    return result;
+  }
+
+  if (!regexp->IsJSRegExp()) {
+    THROW_NEW_ERROR(isolate,
+                    NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+                                 isolate->factory()->NewStringFromAsciiChecked(
+                                     "RegExp.prototype.exec"),
+                                 regexp),
+                    Object);
+  }
+
+  {
+    Handle<JSFunction> regexp_exec = isolate->regexp_exec_function();
+
+    const int argc = 1;
+    ScopedVector<Handle<Object>> argv(argc);
+    argv[0] = string;
+
+    return Execution::Call(isolate, regexp_exec, regexp, argc, argv.start());
+  }
+}
+
+Maybe<bool> RegExpUtils::IsRegExp(Isolate* isolate, Handle<Object> object) {
+  if (!object->IsJSReceiver()) return Just(false);
+
+  Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
+
+  if (isolate->regexp_function()->initial_map() == receiver->map()) {
+    // Fast-path for unmodified JSRegExp instances.
+    // TODO(ishell): Adapt for new fast-path logic.
+    return Just(true);
+  }
+
+  Handle<Object> match;
+  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+      isolate, match,
+      JSObject::GetProperty(receiver, isolate->factory()->match_symbol()),
+      Nothing<bool>());
+
+  if (!match->IsUndefined(isolate)) return Just(match->BooleanValue());
+  return Just(object->IsJSRegExp());
+}
+
+bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
+  // TODO(ishell): Update this check once map changes for constant field
+  // tracking are landing.
+
+  if (!obj->IsJSReceiver()) return false;
+
+  JSReceiver* recv = JSReceiver::cast(*obj);
+
+  // Check the receiver's map.
+  Handle<JSFunction> regexp_function = isolate->regexp_function();
+  if (recv->map() != regexp_function->initial_map()) return false;
+
+  // Check the receiver's prototype's map.
+  Object* proto = recv->map()->prototype();
+  if (!proto->IsJSReceiver()) return false;
+
+  Handle<Map> initial_proto_initial_map = isolate->regexp_prototype_map();
+  return (JSReceiver::cast(proto)->map() == *initial_proto_initial_map);
+}
+
+int RegExpUtils::AdvanceStringIndex(Isolate* isolate, Handle<String> string,
+                                    int index, bool unicode) {
+  if (unicode && index < string->length()) {
+    const uint16_t first = string->Get(index);
+    if (first >= 0xD800 && first <= 0xDBFF && string->length() > index + 1) {
+      const uint16_t second = string->Get(index + 1);
+      if (second >= 0xDC00 && second <= 0xDFFF) {
+        return index + 2;
+      }
+    }
+  }
+
+  return index + 1;
+}
+
+MaybeHandle<Object> RegExpUtils::SetAdvancedStringIndex(
+    Isolate* isolate, Handle<JSReceiver> regexp, Handle<String> string,
+    bool unicode) {
+  Handle<Object> last_index_obj;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, last_index_obj,
+      Object::GetProperty(regexp, isolate->factory()->lastIndex_string()),
+      Object);
+
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, last_index_obj,
+                             Object::ToLength(isolate, last_index_obj), Object);
+
+  const int last_index = Handle<Smi>::cast(last_index_obj)->value();
+  const int new_last_index =
+      AdvanceStringIndex(isolate, string, last_index, unicode);
+
+  return SetLastIndex(isolate, regexp, new_last_index);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/regexp/regexp-utils.h b/src/regexp/regexp-utils.h
new file mode 100644
index 0000000..eff1ed7
--- /dev/null
+++ b/src/regexp/regexp-utils.h
@@ -0,0 +1,52 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_UTILS_H_
+#define V8_REGEXP_REGEXP_UTILS_H_
+
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// Helper methods for C++ regexp builtins.
+class RegExpUtils : public AllStatic {
+ public:
+  // Last match info accessors.
+  static Handle<String> GenericCaptureGetter(Isolate* isolate,
+                                             Handle<RegExpMatchInfo> match_info,
+                                             int capture, bool* ok = nullptr);
+
+  // Last index (RegExp.lastIndex) accessors.
+  static MUST_USE_RESULT MaybeHandle<Object> SetLastIndex(
+      Isolate* isolate, Handle<JSReceiver> regexp, int value);
+  static MUST_USE_RESULT MaybeHandle<Object> GetLastIndex(
+      Isolate* isolate, Handle<JSReceiver> recv);
+
+  // ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
+  static MUST_USE_RESULT MaybeHandle<Object> RegExpExec(
+      Isolate* isolate, Handle<JSReceiver> regexp, Handle<String> string,
+      Handle<Object> exec);
+
+  // ES#sec-isregexp IsRegExp ( argument )
+  // Includes checking of the match property.
+  static Maybe<bool> IsRegExp(Isolate* isolate, Handle<Object> object);
+
+  // Checks whether the given object is an unmodified JSRegExp instance.
+  // Neither the object's map, nor its prototype's map may be modified.
+  static bool IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj);
+
+  // ES#sec-advancestringindex
+  // AdvanceStringIndex ( S, index, unicode )
+  static int AdvanceStringIndex(Isolate* isolate, Handle<String> string,
+                                int index, bool unicode);
+  static MUST_USE_RESULT MaybeHandle<Object> SetAdvancedStringIndex(
+      Isolate* isolate, Handle<JSReceiver> regexp, Handle<String> string,
+      bool unicode);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_REGEXP_REGEXP_UTILS_H_
diff --git a/src/register-configuration.cc b/src/register-configuration.cc
index 1a198ea..28d0ab2 100644
--- a/src/register-configuration.cc
+++ b/src/register-configuration.cc
@@ -70,15 +70,12 @@
 #if V8_TARGET_ARCH_IA32
             kMaxAllocatableGeneralRegisterCount,
             kMaxAllocatableDoubleRegisterCount,
-            kMaxAllocatableDoubleRegisterCount,
 #elif V8_TARGET_ARCH_X87
             kMaxAllocatableGeneralRegisterCount,
             compiler == TURBOFAN ? 1 : kMaxAllocatableDoubleRegisterCount,
-            compiler == TURBOFAN ? 1 : kMaxAllocatableDoubleRegisterCount,
 #elif V8_TARGET_ARCH_X64
             kMaxAllocatableGeneralRegisterCount,
             kMaxAllocatableDoubleRegisterCount,
-            kMaxAllocatableDoubleRegisterCount,
 #elif V8_TARGET_ARCH_ARM
             FLAG_enable_embedded_constant_pool
                 ? (kMaxAllocatableGeneralRegisterCount - 1)
@@ -86,27 +83,21 @@
             CpuFeatures::IsSupported(VFP32DREGS)
                 ? kMaxAllocatableDoubleRegisterCount
                 : (ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT) 0),
-            ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT) 0,
 #elif V8_TARGET_ARCH_ARM64
             kMaxAllocatableGeneralRegisterCount,
             kMaxAllocatableDoubleRegisterCount,
-            kMaxAllocatableDoubleRegisterCount,
 #elif V8_TARGET_ARCH_MIPS
             kMaxAllocatableGeneralRegisterCount,
             kMaxAllocatableDoubleRegisterCount,
-            kMaxAllocatableDoubleRegisterCount,
 #elif V8_TARGET_ARCH_MIPS64
             kMaxAllocatableGeneralRegisterCount,
             kMaxAllocatableDoubleRegisterCount,
-            kMaxAllocatableDoubleRegisterCount,
 #elif V8_TARGET_ARCH_PPC
             kMaxAllocatableGeneralRegisterCount,
             kMaxAllocatableDoubleRegisterCount,
-            kMaxAllocatableDoubleRegisterCount,
 #elif V8_TARGET_ARCH_S390
             kMaxAllocatableGeneralRegisterCount,
             kMaxAllocatableDoubleRegisterCount,
-            kMaxAllocatableDoubleRegisterCount,
 #else
 #error Unsupported target architecture.
 #endif
@@ -145,7 +136,6 @@
 RegisterConfiguration::RegisterConfiguration(
     int num_general_registers, int num_double_registers,
     int num_allocatable_general_registers, int num_allocatable_double_registers,
-    int num_allocatable_aliased_double_registers,
     const int* allocatable_general_codes, const int* allocatable_double_codes,
     AliasingKind fp_aliasing_kind, const char* const* general_register_names,
     const char* const* float_register_names,
@@ -158,8 +148,6 @@
       num_allocatable_general_registers_(num_allocatable_general_registers),
       num_allocatable_float_registers_(0),
       num_allocatable_double_registers_(num_allocatable_double_registers),
-      num_allocatable_aliased_double_registers_(
-          num_allocatable_aliased_double_registers),
       num_allocatable_simd128_registers_(0),
       allocatable_general_codes_mask_(0),
       allocatable_float_codes_mask_(0),
diff --git a/src/register-configuration.h b/src/register-configuration.h
index 2cb7c91..946c82e 100644
--- a/src/register-configuration.h
+++ b/src/register-configuration.h
@@ -6,6 +6,7 @@
 #define V8_COMPILER_REGISTER_CONFIGURATION_H_
 
 #include "src/base/macros.h"
+#include "src/globals.h"
 #include "src/machine-type.h"
 
 namespace v8 {
@@ -13,7 +14,7 @@
 
 // An architecture independent representation of the sets of registers available
 // for instruction creation.
-class RegisterConfiguration {
+class V8_EXPORT_PRIVATE RegisterConfiguration {
  public:
   enum AliasingKind {
     // Registers alias a single register of every other size (e.g. Intel).
@@ -35,7 +36,6 @@
   RegisterConfiguration(int num_general_registers, int num_double_registers,
                         int num_allocatable_general_registers,
                         int num_allocatable_double_registers,
-                        int num_allocatable_aliased_double_registers,
                         const int* allocatable_general_codes,
                         const int* allocatable_double_codes,
                         AliasingKind fp_aliasing_kind,
@@ -57,12 +57,6 @@
   int num_allocatable_double_registers() const {
     return num_allocatable_double_registers_;
   }
-  // TODO(bbudge): This is a temporary work-around required because our
-  // register allocator does not yet support the aliasing of single/double
-  // registers on ARM.
-  int num_allocatable_aliased_double_registers() const {
-    return num_allocatable_aliased_double_registers_;
-  }
   int num_allocatable_simd128_registers() const {
     return num_allocatable_simd128_registers_;
   }
@@ -142,7 +136,6 @@
   int num_allocatable_general_registers_;
   int num_allocatable_float_registers_;
   int num_allocatable_double_registers_;
-  int num_allocatable_aliased_double_registers_;
   int num_allocatable_simd128_registers_;
   int32_t allocatable_general_codes_mask_;
   int32_t allocatable_float_codes_mask_;
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index b1e640c..0de9e1c 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -22,7 +22,7 @@
 
 // Number of times a function has to be seen on the stack before it is
 // compiled for baseline.
-static const int kProfilerTicksBeforeBaseline = 1;
+static const int kProfilerTicksBeforeBaseline = 0;
 // Number of times a function has to be seen on the stack before it is
 // optimized.
 static const int kProfilerTicksBeforeOptimization = 2;
@@ -43,17 +43,25 @@
 static const int kOSRCodeSizeAllowanceBase =
     100 * FullCodeGenerator::kCodeSizeMultiplier;
 static const int kOSRCodeSizeAllowanceBaseIgnition =
-    100 * interpreter::Interpreter::kCodeSizeMultiplier;
+    10 * interpreter::Interpreter::kCodeSizeMultiplier;
 
 static const int kOSRCodeSizeAllowancePerTick =
     4 * FullCodeGenerator::kCodeSizeMultiplier;
 static const int kOSRCodeSizeAllowancePerTickIgnition =
-    4 * interpreter::Interpreter::kCodeSizeMultiplier;
+    2 * interpreter::Interpreter::kCodeSizeMultiplier;
 
 // Maximum size in bytes of generated code for a function to be optimized
 // the very first time it is seen on the stack.
 static const int kMaxSizeEarlyOpt =
     5 * FullCodeGenerator::kCodeSizeMultiplier;
+static const int kMaxSizeEarlyOptIgnition =
+    5 * interpreter::Interpreter::kCodeSizeMultiplier;
+
+// Certain functions are simply too big to be worth optimizing.
+// We aren't using the code size multiplier here because there is no
+// "kMaxSizeOpt" with which we would need to normalize. This constant is
+// only for optimization decisions coming into TurboFan from Ignition.
+static const int kMaxSizeOptIgnition = 250 * 1024;
 
 #define OPTIMIZATION_REASON_LIST(V)                            \
   V(DoNotOptimize, "do not optimize")                          \
@@ -108,14 +116,10 @@
   // Harvest vector-ics as well
   TypeFeedbackVector* vector = function->feedback_vector();
   int with = 0, gen = 0, type_vector_ic_count = 0;
-  const bool is_interpreted =
-      function->shared()->code()->is_interpreter_trampoline_builtin();
+  const bool is_interpreted = function->shared()->IsInterpreted();
 
   vector->ComputeCounts(&with, &gen, &type_vector_ic_count, is_interpreted);
-  if (is_interpreted) {
-    DCHECK_EQ(*ic_total_count, 0);
-    *ic_total_count = type_vector_ic_count;
-  }
+  *ic_total_count += type_vector_ic_count;
   *ic_with_type_info_count += with;
   *ic_generic_count += gen;
 
@@ -158,11 +162,7 @@
                                OptimizationReason reason) {
   DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
   TraceRecompile(function, OptimizationReasonToString(reason), "baseline");
-
-  // TODO(4280): Fix this to check function is compiled for the interpreter
-  // once we have a standard way to check that. For now function will only
-  // have a bytecode array if compiled for the interpreter.
-  DCHECK(function->shared()->HasBytecodeArray());
+  DCHECK(function->shared()->IsInterpreted());
   function->MarkForBaseline();
 }
 
@@ -264,7 +264,7 @@
     }
     return;
   }
-  if (function->IsOptimized()) return;
+  if (frame->is_optimized()) return;
 
   int ticks = shared_code->profiler_ticks();
 
@@ -358,7 +358,7 @@
     return;
   }
 
-  if (function->IsOptimized()) return;
+  if (frame->is_optimized()) return;
 
   OptimizationReason reason = ShouldOptimizeIgnition(function, frame);
 
@@ -369,8 +369,6 @@
 
 bool RuntimeProfiler::MaybeOSRIgnition(JSFunction* function,
                                        JavaScriptFrame* frame) {
-  if (!FLAG_ignition_osr) return false;
-
   SharedFunctionInfo* shared = function->shared();
   int ticks = shared->profiler_ticks();
 
@@ -402,6 +400,10 @@
   SharedFunctionInfo* shared = function->shared();
   int ticks = shared->profiler_ticks();
 
+  if (shared->bytecode_array()->Size() > kMaxSizeOptIgnition) {
+    return OptimizationReason::kDoNotOptimize;
+  }
+
   if (ticks >= kProfilerTicksBeforeOptimization) {
     int typeinfo, generic, total, type_percentage, generic_percentage;
     GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
@@ -422,9 +424,18 @@
       }
       return OptimizationReason::kDoNotOptimize;
     }
+  } else if (!any_ic_changed_ &&
+             shared->bytecode_array()->Size() < kMaxSizeEarlyOptIgnition) {
+    // If no IC was patched since the last tick and this function is very
+    // small, optimistically optimize it now.
+    int typeinfo, generic, total, type_percentage, generic_percentage;
+    GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
+                &generic_percentage);
+    if (type_percentage >= FLAG_type_info_threshold &&
+        generic_percentage <= FLAG_generic_ic_threshold) {
+      return OptimizationReason::kSmallFunction;
+    }
   }
-  // TODO(rmcilroy): Consider whether we should optimize small functions when
-  // they are first seen on the stack (e.g., kMaxSizeEarlyOpt).
   return OptimizationReason::kDoNotOptimize;
 }
 
@@ -446,19 +457,9 @@
     JavaScriptFrame* frame = it.frame();
     JSFunction* function = frame->function();
 
-    List<JSFunction*> functions(4);
-    frame->GetFunctions(&functions);
-    for (int i = functions.length(); --i >= 0; ) {
-      SharedFunctionInfo* shared_function_info = functions[i]->shared();
-      int ticks = shared_function_info->profiler_ticks();
-      if (ticks < Smi::kMaxValue) {
-        shared_function_info->set_profiler_ticks(ticks + 1);
-      }
-    }
-
     Compiler::CompilationTier next_tier =
         Compiler::NextCompilationTier(function);
-    if (function->shared()->code()->is_interpreter_trampoline_builtin()) {
+    if (function->shared()->IsInterpreted()) {
       if (next_tier == Compiler::BASELINE) {
         MaybeBaselineIgnition(function, frame);
       } else {
@@ -469,6 +470,19 @@
       DCHECK_EQ(next_tier, Compiler::OPTIMIZED);
       MaybeOptimizeFullCodegen(function, frame, frame_count);
     }
+
+    // Update shared function info ticks after checking for whether functions
+    // should be optimized to keep FCG (which updates ticks on code) and
+    // Ignition (which updates ticks on shared function info) in sync.
+    List<JSFunction*> functions(4);
+    frame->GetFunctions(&functions);
+    for (int i = functions.length(); --i >= 0;) {
+      SharedFunctionInfo* shared_function_info = functions[i]->shared();
+      int ticks = shared_function_info->profiler_ticks();
+      if (ticks < Smi::kMaxValue) {
+        shared_function_info->set_profiler_ticks(ticks + 1);
+      }
+    }
   }
   any_ic_changed_ = false;
 }
diff --git a/src/runtime/runtime-array.cc b/src/runtime/runtime-array.cc
index cbde8f3..1a2d957 100644
--- a/src/runtime/runtime-array.cc
+++ b/src/runtime/runtime-array.cc
@@ -28,11 +28,13 @@
   // This is necessary to enable fast checks for absence of elements
   // on Array.prototype and below.
   prototype->set_elements(isolate->heap()->empty_fixed_array());
-  return Smi::FromInt(0);
+  return Smi::kZero;
 }
 
-static void InstallCode(Isolate* isolate, Handle<JSObject> holder,
-                        const char* name, Handle<Code> code, int argc = -1) {
+static void InstallCode(
+    Isolate* isolate, Handle<JSObject> holder, const char* name,
+    Handle<Code> code, int argc = -1,
+    BuiltinFunctionId id = static_cast<BuiltinFunctionId>(-1)) {
   Handle<String> key = isolate->factory()->InternalizeUtf8String(name);
   Handle<JSFunction> optimized =
       isolate->factory()->NewFunctionWithoutPrototype(key, code);
@@ -41,15 +43,19 @@
   } else {
     optimized->shared()->set_internal_formal_parameter_count(argc);
   }
+  if (id >= 0) {
+    optimized->shared()->set_builtin_function_id(id);
+  }
   JSObject::AddProperty(holder, key, optimized, NONE);
 }
 
-static void InstallBuiltin(Isolate* isolate, Handle<JSObject> holder,
-                           const char* name, Builtins::Name builtin_name,
-                           int argc = -1) {
+static void InstallBuiltin(
+    Isolate* isolate, Handle<JSObject> holder, const char* name,
+    Builtins::Name builtin_name, int argc = -1,
+    BuiltinFunctionId id = static_cast<BuiltinFunctionId>(-1)) {
   InstallCode(isolate, holder, name,
-              handle(isolate->builtins()->builtin(builtin_name), isolate),
-              argc);
+              handle(isolate->builtins()->builtin(builtin_name), isolate), argc,
+              id);
 }
 
 RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
@@ -71,6 +77,12 @@
   InstallBuiltin(isolate, holder, "splice", Builtins::kArraySplice);
   InstallBuiltin(isolate, holder, "includes", Builtins::kArrayIncludes, 2);
   InstallBuiltin(isolate, holder, "indexOf", Builtins::kArrayIndexOf, 2);
+  InstallBuiltin(isolate, holder, "keys", Builtins::kArrayPrototypeKeys, 0,
+                 kArrayKeys);
+  InstallBuiltin(isolate, holder, "values", Builtins::kArrayPrototypeValues, 0,
+                 kArrayValues);
+  InstallBuiltin(isolate, holder, "entries", Builtins::kArrayPrototypeEntries,
+                 0, kArrayEntries);
 
   return *holder;
 }
@@ -140,7 +152,7 @@
   to->set_length(from->length());
 
   JSObject::ResetElements(from);
-  from->set_length(Smi::FromInt(0));
+  from->set_length(Smi::kZero);
 
   JSObject::ValidateElements(to);
   return *to;
@@ -376,7 +388,7 @@
 
   if (index >= capacity) {
     if (!object->GetElementsAccessor()->GrowCapacity(object, index)) {
-      return Smi::FromInt(0);
+      return Smi::kZero;
     }
   }
 
@@ -423,21 +435,6 @@
   return isolate->heap()->ToBoolean(obj->IsJSArray());
 }
 
-RUNTIME_FUNCTION(Runtime_HasCachedArrayIndex) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
-  return isolate->heap()->false_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetCachedArrayIndex) {
-  // This can never be reached, because Runtime_HasCachedArrayIndex always
-  // returns false.
-  UNIMPLEMENTED();
-  return nullptr;
-}
-
-
 RUNTIME_FUNCTION(Runtime_ArraySpeciesConstructor) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
@@ -639,5 +636,48 @@
   return Smi::FromInt(-1);
 }
 
+RUNTIME_FUNCTION(Runtime_SpreadIterablePrepare) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, spread, 0);
+
+  if (spread->IsJSArray()) {
+    // Check that the spread arg has fast elements
+    Handle<JSArray> spread_array = Handle<JSArray>::cast(spread);
+    ElementsKind array_kind = spread_array->GetElementsKind();
+
+    // And that it has the orignal ArrayPrototype
+    JSObject* array_proto = JSObject::cast(spread_array->map()->prototype());
+    Map* iterator_map = isolate->initial_array_iterator_prototype()->map();
+
+    // Check that the iterator acts as expected.
+    // If IsArrayIteratorLookupChainIntact(), then we know that the initial
+    // ArrayIterator is being used. If the map of the prototype has changed,
+    // then take the slow path.
+
+    if (isolate->is_initial_array_prototype(array_proto) &&
+        isolate->IsArrayIteratorLookupChainIntact() &&
+        isolate->is_initial_array_iterator_prototype_map(iterator_map)) {
+      if (IsFastPackedElementsKind(array_kind)) {
+        return *spread;
+      }
+      if (IsFastHoleyElementsKind(array_kind) &&
+          isolate->IsFastArrayConstructorPrototypeChainIntact()) {
+        return *spread;
+      }
+    }
+  }
+
+  Handle<JSFunction> spread_iterable_function = isolate->spread_iterable();
+
+  Handle<Object> spreaded;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, spreaded,
+      Execution::Call(isolate, spread_iterable_function,
+                      isolate->factory()->undefined_value(), 1, &spread));
+
+  return *spreaded;
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-collections.cc b/src/runtime/runtime-collections.cc
index b25a5ef..57e5d98 100644
--- a/src/runtime/runtime-collections.cc
+++ b/src/runtime/runtime-collections.cc
@@ -95,7 +95,7 @@
         kind == JSSetIterator::kKindEntries);
   Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()));
   holder->set_table(*table);
-  holder->set_index(Smi::FromInt(0));
+  holder->set_index(Smi::kZero);
   holder->set_kind(Smi::FromInt(kind));
   return isolate->heap()->undefined_value();
 }
@@ -191,7 +191,7 @@
         kind == JSMapIterator::kKindEntries);
   Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()));
   holder->set_table(*table);
-  holder->set_index(Smi::FromInt(0));
+  holder->set_index(Smi::kZero);
   holder->set_kind(Smi::FromInt(kind));
   return isolate->heap()->undefined_value();
 }
diff --git a/src/runtime/runtime-compiler.cc b/src/runtime/runtime-compiler.cc
index 01ec73d..472e076 100644
--- a/src/runtime/runtime-compiler.cc
+++ b/src/runtime/runtime-compiler.cc
@@ -123,7 +123,7 @@
     function->shared()->ReplaceCode(
         isolate->builtins()->builtin(Builtins::kCompileLazy));
   }
-  return Smi::FromInt(0);
+  return Smi::kZero;
 }
 
 RUNTIME_FUNCTION(Runtime_NotifyStubFailure) {
diff --git a/src/runtime/runtime-debug.cc b/src/runtime/runtime-debug.cc
index 2d217b8..824ea92 100644
--- a/src/runtime/runtime-debug.cc
+++ b/src/runtime/runtime-debug.cc
@@ -16,8 +16,8 @@
 #include "src/interpreter/interpreter.h"
 #include "src/isolate-inl.h"
 #include "src/runtime/runtime.h"
-#include "src/wasm/wasm-debug.h"
 #include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
 
 namespace v8 {
 namespace internal {
@@ -47,7 +47,7 @@
   isolate->debug()->Break(it.frame());
 
   // If live-edit has dropped frames, we are not going back to dispatch.
-  if (LiveEdit::SetAfterBreakTarget(isolate->debug())) return Smi::FromInt(0);
+  if (LiveEdit::SetAfterBreakTarget(isolate->debug())) return Smi::kZero;
 
   // Return the handler from the original bytecode array.
   DCHECK(it.frame()->is_interpreted());
@@ -256,14 +256,14 @@
     const char* status = "rejected";
     int status_val = Handle<Smi>::cast(status_obj)->value();
     switch (status_val) {
-      case +1:
+      case kPromiseFulfilled:
         status = "resolved";
         break;
-      case 0:
+      case kPromisePending:
         status = "pending";
         break;
       default:
-        DCHECK_EQ(-1, status_val);
+        DCHECK_EQ(kPromiseRejected, status_val);
     }
 
     Handle<FixedArray> result = factory->NewFixedArray(2 * 2);
@@ -457,7 +457,7 @@
   StackFrame::Id id = isolate->debug()->break_frame_id();
   if (id == StackFrame::NO_ID) {
     // If there is no JavaScript stack frame count is 0.
-    return Smi::FromInt(0);
+    return Smi::kZero;
   }
 
   for (StackTraceFrameIterator it(isolate, id); !it.done(); it.Advance()) {
@@ -551,10 +551,10 @@
     details->set(kFrameDetailsFrameIdIndex, *frame_id);
 
     // Add the function name.
-    Handle<Object> wasm_obj(it.wasm_frame()->wasm_obj(), isolate);
+    Handle<Object> wasm_instance(it.wasm_frame()->wasm_instance(), isolate);
     int func_index = it.wasm_frame()->function_index();
     Handle<String> func_name =
-        wasm::GetWasmFunctionName(isolate, wasm_obj, func_index);
+        wasm::GetWasmFunctionName(isolate, wasm_instance, func_index);
     details->set(kFrameDetailsFunctionIndex, *func_name);
 
     // Add the script wrapper
@@ -563,14 +563,26 @@
     details->set(kFrameDetailsScriptIndex, *script_wrapper);
 
     // Add the arguments count.
-    details->set(kFrameDetailsArgumentCountIndex, Smi::FromInt(0));
+    details->set(kFrameDetailsArgumentCountIndex, Smi::kZero);
 
     // Add the locals count
-    details->set(kFrameDetailsLocalCountIndex, Smi::FromInt(0));
+    details->set(kFrameDetailsLocalCountIndex, Smi::kZero);
 
     // Add the source position.
+    // For wasm, it is function-local, so translate it to a module-relative
+    // position, such that together with the script it uniquely identifies the
+    // position.
+    Handle<Object> positionValue;
     if (position != kNoSourcePosition) {
-      details->set(kFrameDetailsSourcePositionIndex, Smi::FromInt(position));
+      int translated_position = position;
+      if (!wasm::WasmIsAsmJs(*wasm_instance, isolate)) {
+        Handle<WasmCompiledModule> compiled_module(
+            wasm::GetCompiledModule(JSObject::cast(*wasm_instance)), isolate);
+        translated_position +=
+            wasm::GetFunctionCodeOffset(compiled_module, func_index);
+      }
+      details->set(kFrameDetailsSourcePositionIndex,
+                   Smi::FromInt(translated_position));
     }
 
     // Add the constructor information.
@@ -929,7 +941,7 @@
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
 
-  if (!args[0]->IsJSGeneratorObject()) return Smi::FromInt(0);
+  if (!args[0]->IsJSGeneratorObject()) return Smi::kZero;
 
   // Check arguments.
   CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, gen, 0);
@@ -948,7 +960,7 @@
   DCHECK(args.length() == 2);
 
   if (!args[0]->IsJSGeneratorObject()) {
-    return *isolate->factory()->undefined_value();
+    return isolate->heap()->undefined_value();
   }
 
   // Check arguments.
@@ -1429,6 +1441,7 @@
 
 
 // Patches script source (should be called upon BeforeCompile event).
+// TODO(5530): Remove once uses in debug.js are gone.
 RUNTIME_FUNCTION(Runtime_DebugSetScriptSource) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
@@ -1569,6 +1582,7 @@
   return *Script::GetWrapper(found);
 }
 
+// TODO(5530): Remove once uses in debug.js are gone.
 RUNTIME_FUNCTION(Runtime_ScriptLineCount) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
@@ -1583,6 +1597,7 @@
   return Smi::FromInt(line_ends_array->length());
 }
 
+// TODO(5530): Remove once uses in debug.js are gone.
 RUNTIME_FUNCTION(Runtime_ScriptLineStartPosition) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
@@ -1601,7 +1616,7 @@
   if (line < 0 || line > line_count) {
     return Smi::FromInt(-1);
   } else if (line == 0) {
-    return Smi::FromInt(0);
+    return Smi::kZero;
   } else {
     DCHECK(0 < line && line <= line_count);
     const int pos = Smi::cast(line_ends_array->get(line - 1))->value() + 1;
@@ -1609,6 +1624,7 @@
   }
 }
 
+// TODO(5530): Remove once uses in debug.js are gone.
 RUNTIME_FUNCTION(Runtime_ScriptLineEndPosition) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
@@ -1634,7 +1650,7 @@
                                         Script::OffsetFlag offset_flag,
                                         Isolate* isolate) {
   Script::PositionInfo info;
-  if (!script->GetPositionInfo(position, &info, offset_flag)) {
+  if (!Script::GetPositionInfo(script, position, &info, offset_flag)) {
     return isolate->factory()->null_value();
   }
 
@@ -1661,62 +1677,49 @@
   return jsinfo;
 }
 
-// Get information on a specific source line and column possibly offset by a
-// fixed source position. This function is used to find a source position from
-// a line and column position. The fixed source position offset is typically
-// used to find a source position in a function based on a line and column in
-// the source for the function alone. The offset passed will then be the
-// start position of the source for the function within the full script source.
-// Note that incoming line and column parameters may be undefined, and are
-// assumed to be passed *with* offsets.
-RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
-  CONVERT_ARG_CHECKED(JSValue, script, 0);
+namespace {
 
-  CHECK(script->value()->IsScript());
-  Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
-
+Handle<Object> ScriptLocationFromLine(Isolate* isolate, Handle<Script> script,
+                                      Handle<Object> opt_line,
+                                      Handle<Object> opt_column,
+                                      int32_t offset) {
   // Line and column are possibly undefined and we need to handle these cases,
   // additionally subtracting corresponding offsets.
 
   int32_t line;
-  if (args[1]->IsNull(isolate) || args[1]->IsUndefined(isolate)) {
+  if (opt_line->IsNull(isolate) || opt_line->IsUndefined(isolate)) {
     line = 0;
   } else {
-    CHECK(args[1]->IsNumber());
-    line = NumberToInt32(args[1]) - script_handle->line_offset();
+    CHECK(opt_line->IsNumber());
+    line = NumberToInt32(*opt_line) - script->line_offset();
   }
 
   int32_t column;
-  if (args[2]->IsNull(isolate) || args[2]->IsUndefined(isolate)) {
+  if (opt_column->IsNull(isolate) || opt_column->IsUndefined(isolate)) {
     column = 0;
   } else {
-    CHECK(args[2]->IsNumber());
-    column = NumberToInt32(args[2]);
-    if (line == 0) column -= script_handle->column_offset();
+    CHECK(opt_column->IsNumber());
+    column = NumberToInt32(*opt_column);
+    if (line == 0) column -= script->column_offset();
   }
 
-  CONVERT_NUMBER_CHECKED(int32_t, offset_position, Int32, args[3]);
-
-  if (line < 0 || column < 0 || offset_position < 0) {
-    return isolate->heap()->null_value();
+  if (line < 0 || column < 0 || offset < 0) {
+    return isolate->factory()->null_value();
   }
 
-  Script::InitLineEnds(script_handle);
+  Script::InitLineEnds(script);
 
-  FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
+  FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
   const int line_count = line_ends_array->length();
 
   int position;
   if (line == 0) {
-    position = offset_position + column;
+    position = offset + column;
   } else {
     Script::PositionInfo info;
-    if (!script_handle->GetPositionInfo(offset_position, &info,
-                                        Script::NO_OFFSET) ||
+    if (!Script::GetPositionInfo(script, offset, &info, Script::NO_OFFSET) ||
         info.line + line >= line_count) {
-      return isolate->heap()->null_value();
+      return isolate->factory()->null_value();
     }
 
     const int offset_line = info.line + line;
@@ -1727,10 +1730,65 @@
     position = offset_line_position + column;
   }
 
-  return *GetJSPositionInfo(script_handle, position, Script::NO_OFFSET,
-                            isolate);
+  return GetJSPositionInfo(script, position, Script::NO_OFFSET, isolate);
 }
 
+// Slow traversal over all scripts on the heap.
+bool GetScriptById(Isolate* isolate, int needle, Handle<Script>* result) {
+  Script::Iterator iterator(isolate);
+  Script* script = NULL;
+  while ((script = iterator.Next()) != NULL) {
+    if (script->id() == needle) {
+      *result = handle(script);
+      return true;
+    }
+  }
+
+  return false;
+}
+
+}  // namespace
+
+// Get information on a specific source line and column possibly offset by a
+// fixed source position. This function is used to find a source position from
+// a line and column position. The fixed source position offset is typically
+// used to find a source position in a function based on a line and column in
+// the source for the function alone. The offset passed will then be the
+// start position of the source for the function within the full script source.
+// Note that incoming line and column parameters may be undefined, and are
+// assumed to be passed *with* offsets.
+// TODO(5530): Remove once uses in debug.js are gone.
+RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 4);
+  CONVERT_ARG_HANDLE_CHECKED(JSValue, script, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, opt_line, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, opt_column, 2);
+  CONVERT_NUMBER_CHECKED(int32_t, offset, Int32, args[3]);
+
+  CHECK(script->value()->IsScript());
+  Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+
+  return *ScriptLocationFromLine(isolate, script_handle, opt_line, opt_column,
+                                 offset);
+}
+
+// TODO(5530): Rename once conflicting function has been deleted.
+RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine2) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 4);
+  CONVERT_NUMBER_CHECKED(int32_t, scriptid, Int32, args[0]);
+  CONVERT_ARG_HANDLE_CHECKED(Object, opt_line, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, opt_column, 2);
+  CONVERT_NUMBER_CHECKED(int32_t, offset, Int32, args[3]);
+
+  Handle<Script> script;
+  CHECK(GetScriptById(isolate, scriptid, &script));
+
+  return *ScriptLocationFromLine(isolate, script, opt_line, opt_column, offset);
+}
+
+// TODO(5530): Remove once uses in debug.js are gone.
 RUNTIME_FUNCTION(Runtime_ScriptPositionInfo) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 3);
@@ -1748,6 +1806,7 @@
 
 // Returns the given line as a string, or null if line is out of bounds.
 // The parameter line is expected to include the script's line offset.
+// TODO(5530): Remove once uses in debug.js are gone.
 RUNTIME_FUNCTION(Runtime_ScriptSourceLine) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
@@ -1822,12 +1881,19 @@
   return isolate->heap()->undefined_value();
 }
 
+RUNTIME_FUNCTION(Runtime_DebugNextMicrotaskId) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 0);
+  return Smi::FromInt(isolate->GetNextDebugMicrotaskId());
+}
 
 RUNTIME_FUNCTION(Runtime_DebugAsyncTaskEvent) {
-  DCHECK(args.length() == 1);
+  DCHECK(args.length() == 3);
   HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, data, 0);
-  isolate->debug()->OnAsyncTaskEvent(data);
+  CONVERT_ARG_HANDLE_CHECKED(String, type, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, id, 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 2);
+  isolate->debug()->OnAsyncTaskEvent(type, id, name);
   return isolate->heap()->undefined_value();
 }
 
@@ -1843,34 +1909,5 @@
   return NULL;
 }
 
-RUNTIME_FUNCTION(Runtime_GetWasmFunctionOffsetTable) {
-  DCHECK(args.length() == 1);
-  HandleScope scope(isolate);
-  CONVERT_ARG_CHECKED(JSValue, script_val, 0);
-
-  CHECK(script_val->value()->IsScript());
-  Handle<Script> script = Handle<Script>(Script::cast(script_val->value()));
-
-  Handle<wasm::WasmDebugInfo> debug_info =
-      wasm::GetDebugInfo(handle(script->wasm_object(), isolate));
-  Handle<FixedArray> elements = wasm::WasmDebugInfo::GetFunctionOffsetTable(
-      debug_info, script->wasm_function_index());
-  return *isolate->factory()->NewJSArrayWithElements(elements);
-}
-
-RUNTIME_FUNCTION(Runtime_DisassembleWasmFunction) {
-  DCHECK(args.length() == 1);
-  HandleScope scope(isolate);
-  CONVERT_ARG_CHECKED(JSValue, script_val, 0);
-
-  CHECK(script_val->value()->IsScript());
-  Handle<Script> script = Handle<Script>(Script::cast(script_val->value()));
-
-  Handle<wasm::WasmDebugInfo> debug_info =
-      wasm::GetDebugInfo(handle(script->wasm_object(), isolate));
-  return *wasm::WasmDebugInfo::DisassembleFunction(
-      debug_info, script->wasm_function_index());
-}
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-function.cc b/src/runtime/runtime-function.cc
index fa50941..a91ab28 100644
--- a/src/runtime/runtime-function.cc
+++ b/src/runtime/runtime-function.cc
@@ -55,7 +55,7 @@
   return isolate->heap()->undefined_value();
 }
 
-
+// TODO(5530): Remove once uses in debug.js are gone.
 RUNTIME_FUNCTION(Runtime_FunctionGetScript) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
@@ -71,6 +71,20 @@
   return isolate->heap()->undefined_value();
 }
 
+RUNTIME_FUNCTION(Runtime_FunctionGetScriptId) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
+
+  if (function->IsJSFunction()) {
+    Handle<Object> script(
+        Handle<JSFunction>::cast(function)->shared()->script(), isolate);
+    if (script->IsScript()) {
+      return Smi::FromInt(Handle<Script>::cast(script)->id());
+    }
+  }
+  return Smi::FromInt(-1);
+}
 
 RUNTIME_FUNCTION(Runtime_FunctionGetSourceCode) {
   HandleScope scope(isolate);
diff --git a/src/runtime/runtime-i18n.cc b/src/runtime/runtime-i18n.cc
index 7fcb802..75e0952 100644
--- a/src/runtime/runtime-i18n.cc
+++ b/src/runtime/runtime-i18n.cc
@@ -926,7 +926,7 @@
   if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
     return *isolate->factory()->NewStringFromStaticChars("none");
   } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
-    return *isolate->factory()->number_string();
+    return isolate->heap()->number_string();
   } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
     return *isolate->factory()->NewStringFromStaticChars("letter");
   } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
@@ -939,55 +939,10 @@
 }
 
 namespace {
-void ConvertCaseWithTransliterator(icu::UnicodeString* input,
-                                   const char* transliterator_id) {
-  UErrorCode status = U_ZERO_ERROR;
-  std::unique_ptr<icu::Transliterator> translit(
-      icu::Transliterator::createInstance(
-          icu::UnicodeString(transliterator_id, -1, US_INV), UTRANS_FORWARD,
-          status));
-  if (U_FAILURE(status)) return;
-  translit->transliterate(*input);
-}
-
 MUST_USE_RESULT Object* LocaleConvertCase(Handle<String> s, Isolate* isolate,
                                           bool is_to_upper, const char* lang) {
-  int32_t src_length = s->length();
-
-  // Greek uppercasing has to be done via transliteration.
-  // TODO(jshin): Drop this special-casing once ICU's regular case conversion
-  // API supports Greek uppercasing. See
-  // http://bugs.icu-project.org/trac/ticket/10582 .
-  // In the meantime, if there's no Greek character in |s|, call this
-  // function again with the root locale (lang="").
-  // ICU's C API for transliteration is nasty and we just use C++ API.
-  if (V8_UNLIKELY(is_to_upper && lang[0] == 'e' && lang[1] == 'l')) {
-    icu::UnicodeString converted;
-    std::unique_ptr<uc16[]> sap;
-    {
-      DisallowHeapAllocation no_gc;
-      String::FlatContent flat = s->GetFlatContent();
-      const UChar* src = GetUCharBufferFromFlat(flat, &sap, src_length);
-      // Starts with the source string (read-only alias with copy-on-write
-      // semantics) and will be modified to contain the converted result.
-      // Using read-only alias at first saves one copy operation if
-      // transliteration does not change the input, which is rather rare.
-      // Moreover, transliteration takes rather long so that saving one copy
-      // helps only a little bit.
-      converted.setTo(false, src, src_length);
-      ConvertCaseWithTransliterator(&converted, "el-Upper");
-      // If no change is made, just return |s|.
-      if (converted.getBuffer() == src) return *s;
-    }
-    RETURN_RESULT_OR_FAILURE(
-        isolate,
-        isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
-            reinterpret_cast<const uint16_t*>(converted.getBuffer()),
-            converted.length())));
-  }
-
   auto case_converter = is_to_upper ? u_strToUpper : u_strToLower;
-
+  int32_t src_length = s->length();
   int32_t dest_length = src_length;
   UErrorCode status;
   Handle<SeqTwoByteString> result;
@@ -1065,7 +1020,7 @@
 inline uint16_t ToLatin1Upper(uint16_t ch) {
   DCHECK(ch != 0xDF && ch != 0xB5 && ch != 0xFF);
   return ch &
-         ~(((ch >= 'a' && ch <= 'z') || (((ch & 0xE0) == 0xE0) && ch != 0xE7))
+         ~(((ch >= 'a' && ch <= 'z') || (((ch & 0xE0) == 0xE0) && ch != 0xF7))
            << 5);
 }
 
@@ -1138,7 +1093,7 @@
   s = String::Flatten(s);
   // First scan the string for uppercase and non-ASCII characters:
   if (s->HasOnlyOneByteChars()) {
-    unsigned first_index_to_lower = length;
+    int first_index_to_lower = length;
     for (int index = 0; index < length; ++index) {
       // Blink specializes this path for one-byte strings, so it
       // does not need to do a generic get, but can do the equivalent
@@ -1165,14 +1120,16 @@
     String::FlatContent flat = s->GetFlatContent();
     if (flat.IsOneByte()) {
       const uint8_t* src = flat.ToOneByteVector().start();
-      CopyChars(result->GetChars(), src, first_index_to_lower);
+      CopyChars(result->GetChars(), src,
+                static_cast<size_t>(first_index_to_lower));
       for (int index = first_index_to_lower; index < length; ++index) {
         uint16_t ch = static_cast<uint16_t>(src[index]);
         result->SeqOneByteStringSet(index, ToLatin1Lower(ch));
       }
     } else {
       const uint16_t* src = flat.ToUC16Vector().start();
-      CopyChars(result->GetChars(), src, first_index_to_lower);
+      CopyChars(result->GetChars(), src,
+                static_cast<size_t>(first_index_to_lower));
       for (int index = first_index_to_lower; index < length; ++index) {
         uint16_t ch = src[index];
         result->SeqOneByteStringSet(index, ToLatin1Lower(ch));
@@ -1283,7 +1240,7 @@
   if (!isolate->eternal_handles()->Exists(EternalHandles::DATE_CACHE_VERSION)) {
     Handle<FixedArray> date_cache_version =
         isolate->factory()->NewFixedArray(1, TENURED);
-    date_cache_version->set(0, Smi::FromInt(0));
+    date_cache_version->set(0, Smi::kZero);
     isolate->eternal_handles()->CreateSingleton(
         isolate, *date_cache_version, EternalHandles::DATE_CACHE_VERSION);
   }
diff --git a/src/runtime/runtime-internal.cc b/src/runtime/runtime-internal.cc
index 26882b5..621f335 100644
--- a/src/runtime/runtime-internal.cc
+++ b/src/runtime/runtime-internal.cc
@@ -100,12 +100,29 @@
   return isolate->StackOverflow();
 }
 
+RUNTIME_FUNCTION(Runtime_ThrowTypeError) {
+  HandleScope scope(isolate);
+  DCHECK_LE(1, args.length());
+  CONVERT_SMI_ARG_CHECKED(message_id_smi, 0);
+
+  Handle<Object> undefined = isolate->factory()->undefined_value();
+  Handle<Object> arg0 = (args.length() > 1) ? args.at<Object>(1) : undefined;
+  Handle<Object> arg1 = (args.length() > 2) ? args.at<Object>(2) : undefined;
+  Handle<Object> arg2 = (args.length() > 3) ? args.at<Object>(3) : undefined;
+
+  MessageTemplate::Template message_id =
+      static_cast<MessageTemplate::Template>(message_id_smi);
+
+  THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+                                 NewTypeError(message_id, arg0, arg1, arg2));
+}
+
 RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
   CONVERT_SMI_ARG_CHECKED(message_id, 0);
   CONVERT_SMI_ARG_CHECKED(byte_offset, 1);
-  Handle<Object> error_obj = isolate->factory()->NewError(
+  Handle<Object> error_obj = isolate->factory()->NewWasmRuntimeError(
       static_cast<MessageTemplate::Template>(message_id));
 
   // For wasm traps, the byte offset (a.k.a source position) can not be
@@ -270,64 +287,6 @@
       isolate, NewTypeError(MessageTemplate::kApplyNonFunction, object, type));
 }
 
-namespace {
-
-void PromiseRejectEvent(Isolate* isolate, Handle<JSObject> promise,
-                        Handle<Object> rejected_promise, Handle<Object> value,
-                        bool debug_event) {
-  if (isolate->debug()->is_active() && debug_event) {
-    isolate->debug()->OnPromiseReject(rejected_promise, value);
-  }
-  Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
-  // Do not report if we actually have a handler.
-  if (JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate)) {
-    isolate->ReportPromiseReject(promise, value,
-                                 v8::kPromiseRejectWithNoHandler);
-  }
-}
-
-}  // namespace
-
-RUNTIME_FUNCTION(Runtime_PromiseRejectEvent) {
-  DCHECK(args.length() == 3);
-  HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
-  CONVERT_BOOLEAN_ARG_CHECKED(debug_event, 2);
-
-  PromiseRejectEvent(isolate, promise, promise, value, debug_event);
-  return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
-  DCHECK(args.length() == 2);
-  HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
-
-  Handle<Object> rejected_promise = promise;
-  if (isolate->debug()->is_active()) {
-    // If the Promise.reject call is caught, then this will return
-    // undefined, which will be interpreted by PromiseRejectEvent
-    // as being a caught exception event.
-    rejected_promise = isolate->GetPromiseOnStackOnThrow();
-  }
-  PromiseRejectEvent(isolate, promise, rejected_promise, value, true);
-  return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
-  DCHECK(args.length() == 1);
-  HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
-  Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
-  // At this point, no revocation has been issued before
-  CHECK(JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate));
-  isolate->ReportPromiseReject(promise, Handle<Object>(),
-                               v8::kPromiseHandlerAddedAfterReject);
-  return isolate->heap()->undefined_value();
-}
-
 
 RUNTIME_FUNCTION(Runtime_StackGuard) {
   SealHandleScope shs(isolate);
@@ -430,10 +389,10 @@
 Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object) {
   MessageLocation location;
   if (ComputeLocation(isolate, &location)) {
-    Zone zone(isolate->allocator());
+    Zone zone(isolate->allocator(), ZONE_NAME);
     std::unique_ptr<ParseInfo> info(
         location.function()->shared()->is_function()
-            ? new ParseInfo(&zone, location.function())
+            ? new ParseInfo(&zone, handle(location.function()->shared()))
             : new ParseInfo(&zone, location.script()));
     if (Parser::ParseStatic(info.get())) {
       CallPrinter printer(isolate, location.function()->shared()->IsBuiltin());
@@ -554,36 +513,6 @@
   }
 }
 
-RUNTIME_FUNCTION(Runtime_EnqueuePromiseResolveThenableJob) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 6);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, resolution, 0);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, then, 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, resolve, 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, reject, 3);
-  CONVERT_ARG_HANDLE_CHECKED(Object, before_debug_event, 4);
-  CONVERT_ARG_HANDLE_CHECKED(Object, after_debug_event, 5);
-  Handle<PromiseContainer> container = isolate->factory()->NewPromiseContainer(
-      resolution, then, resolve, reject, before_debug_event, after_debug_event);
-  isolate->EnqueueMicrotask(container);
-  return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, microtask, 0);
-  isolate->EnqueueMicrotask(microtask);
-  return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_RunMicrotasks) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
-  isolate->RunMicrotasks();
-  return isolate->heap()->undefined_value();
-}
-
 RUNTIME_FUNCTION(Runtime_OrdinaryHasInstance) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -593,13 +522,13 @@
       isolate, Object::OrdinaryHasInstance(isolate, callable, object));
 }
 
-RUNTIME_FUNCTION(Runtime_IsWasmObject) {
+RUNTIME_FUNCTION(Runtime_IsWasmInstance) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(Object, object, 0);
-  bool is_wasm_object =
-      object->IsJSObject() && wasm::IsWasmObject(JSObject::cast(object));
-  return *isolate->factory()->ToBoolean(is_wasm_object);
+  bool is_wasm_instance =
+      object->IsJSObject() && wasm::IsWasmInstance(JSObject::cast(object));
+  return *isolate->factory()->ToBoolean(is_wasm_instance);
 }
 
 RUNTIME_FUNCTION(Runtime_Typeof) {
diff --git a/src/runtime/runtime-interpreter.cc b/src/runtime/runtime-interpreter.cc
index ce71e2c..62eee17 100644
--- a/src/runtime/runtime-interpreter.cc
+++ b/src/runtime/runtime-interpreter.cc
@@ -171,5 +171,19 @@
   return isolate->heap()->undefined_value();
 }
 
+RUNTIME_FUNCTION(Runtime_InterpreterAdvanceBytecodeOffset) {
+  SealHandleScope shs(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(BytecodeArray, bytecode_array, 0);
+  CONVERT_SMI_ARG_CHECKED(bytecode_offset, 1);
+  interpreter::BytecodeArrayIterator it(bytecode_array);
+  int offset = bytecode_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
+  while (it.current_offset() < offset) it.Advance();
+  DCHECK_EQ(offset, it.current_offset());
+  it.Advance();  // Advance by one bytecode.
+  offset = it.current_offset() + BytecodeArray::kHeaderSize - kHeapObjectTag;
+  return Smi::FromInt(offset);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-literals.cc b/src/runtime/runtime-literals.cc
index ebdf04c..8bb4522 100644
--- a/src/runtime/runtime-literals.cc
+++ b/src/runtime/runtime-literals.cc
@@ -86,7 +86,7 @@
     if (key->ToArrayIndex(&element_index)) {
       // Array index (uint32).
       if (value->IsUninitialized(isolate)) {
-        value = handle(Smi::FromInt(0), isolate);
+        value = handle(Smi::kZero, isolate);
       }
       maybe_result = JSObject::SetOwnElementIgnoreAttributes(
           boilerplate, element_index, value, NONE);
diff --git a/src/runtime/runtime-maths.cc b/src/runtime/runtime-maths.cc
index 47e560d..404305a 100644
--- a/src/runtime/runtime-maths.cc
+++ b/src/runtime/runtime-maths.cc
@@ -15,58 +15,49 @@
 
 RUNTIME_FUNCTION(Runtime_GenerateRandomNumbers) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  if (isolate->serializer_enabled()) {
-    // Random numbers in the snapshot are not really that random. And we cannot
-    // return a typed array as it cannot be serialized. To make calling
-    // Math.random possible when creating a custom startup snapshot, we simply
-    // return a normal array with a single random number.
-    Handle<HeapNumber> random_number = isolate->factory()->NewHeapNumber(
-        isolate->random_number_generator()->NextDouble());
-    Handle<FixedArray> array_backing = isolate->factory()->NewFixedArray(1);
-    array_backing->set(0, *random_number);
-    return *isolate->factory()->NewJSArrayWithElements(array_backing);
-  }
+  DCHECK(args.length() == 0);
 
-  static const int kState0Offset = 0;
-  static const int kState1Offset = 1;
-  static const int kRandomBatchSize = 64;
-  CONVERT_ARG_HANDLE_CHECKED(Object, maybe_typed_array, 0);
-  Handle<JSTypedArray> typed_array;
-  // Allocate typed array if it does not yet exist.
-  if (maybe_typed_array->IsJSTypedArray()) {
-    typed_array = Handle<JSTypedArray>::cast(maybe_typed_array);
+  Handle<Context> native_context = isolate->native_context();
+  DCHECK_EQ(0, native_context->math_random_index()->value());
+
+  static const int kCacheSize = 64;
+  static const int kState0Offset = kCacheSize - 1;
+  static const int kState1Offset = kState0Offset - 1;
+  // The index is decremented before used to access the cache.
+  static const int kInitialIndex = kState1Offset;
+
+  Handle<FixedDoubleArray> cache;
+  uint64_t state0 = 0;
+  uint64_t state1 = 0;
+  if (native_context->math_random_cache()->IsFixedDoubleArray()) {
+    cache = Handle<FixedDoubleArray>(
+        FixedDoubleArray::cast(native_context->math_random_cache()), isolate);
+    state0 = double_to_uint64(cache->get_scalar(kState0Offset));
+    state1 = double_to_uint64(cache->get_scalar(kState1Offset));
   } else {
-    static const int kByteLength = kRandomBatchSize * kDoubleSize;
-    Handle<JSArrayBuffer> buffer =
-        isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, TENURED);
-    JSArrayBuffer::SetupAllocatingData(buffer, isolate, kByteLength, true,
-                                       SharedFlag::kNotShared);
-    typed_array = isolate->factory()->NewJSTypedArray(
-        kExternalFloat64Array, buffer, 0, kRandomBatchSize);
+    cache = Handle<FixedDoubleArray>::cast(
+        isolate->factory()->NewFixedDoubleArray(kCacheSize, TENURED));
+    native_context->set_math_random_cache(*cache);
+    // Initialize state if not yet initialized.
+    while (state0 == 0 || state1 == 0) {
+      isolate->random_number_generator()->NextBytes(&state0, sizeof(state0));
+      isolate->random_number_generator()->NextBytes(&state1, sizeof(state1));
+    }
   }
 
   DisallowHeapAllocation no_gc;
-  double* array =
-      reinterpret_cast<double*>(typed_array->GetBuffer()->backing_store());
-  // Fetch existing state.
-  uint64_t state0 = double_to_uint64(array[kState0Offset]);
-  uint64_t state1 = double_to_uint64(array[kState1Offset]);
-  // Initialize state if not yet initialized.
-  while (state0 == 0 || state1 == 0) {
-    isolate->random_number_generator()->NextBytes(&state0, sizeof(state0));
-    isolate->random_number_generator()->NextBytes(&state1, sizeof(state1));
-  }
+  FixedDoubleArray* raw_cache = *cache;
   // Create random numbers.
-  for (int i = kState1Offset + 1; i < kRandomBatchSize; i++) {
+  for (int i = 0; i < kInitialIndex; i++) {
     // Generate random numbers using xorshift128+.
     base::RandomNumberGenerator::XorShift128(&state0, &state1);
-    array[i] = base::RandomNumberGenerator::ToDouble(state0, state1);
+    raw_cache->set(i, base::RandomNumberGenerator::ToDouble(state0, state1));
   }
+
   // Persist current state.
-  array[kState0Offset] = uint64_to_double(state0);
-  array[kState1Offset] = uint64_to_double(state1);
-  return *typed_array;
+  raw_cache->set(kState0Offset, uint64_to_double(state0));
+  raw_cache->set(kState1Offset, uint64_to_double(state1));
+  return Smi::FromInt(kInitialIndex);
 }
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-module.cc b/src/runtime/runtime-module.cc
new file mode 100644
index 0000000..2b81343
--- /dev/null
+++ b/src/runtime/runtime-module.cc
@@ -0,0 +1,39 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/runtime/runtime-utils.h"
+
+#include "src/arguments.h"
+
+namespace v8 {
+namespace internal {
+
+RUNTIME_FUNCTION(Runtime_GetModuleNamespace) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_SMI_ARG_CHECKED(module_request, 0);
+  Handle<Module> module(isolate->context()->module());
+  return *Module::GetModuleNamespace(module, module_request);
+}
+
+RUNTIME_FUNCTION(Runtime_LoadModuleVariable) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_SMI_ARG_CHECKED(index, 0);
+  Handle<Module> module(isolate->context()->module());
+  return *Module::LoadVariable(module, index);
+}
+
+RUNTIME_FUNCTION(Runtime_StoreModuleVariable) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_SMI_ARG_CHECKED(index, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+  Handle<Module> module(isolate->context()->module());
+  Module::StoreVariable(module, index, value);
+  return isolate->heap()->undefined_value();
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/runtime/runtime-numbers.cc b/src/runtime/runtime-numbers.cc
index 9f43c0a..bfe8763 100644
--- a/src/runtime/runtime-numbers.cc
+++ b/src/runtime/runtime-numbers.cc
@@ -33,28 +33,40 @@
 // ES6 18.2.5 parseInt(string, radix) slow path
 RUNTIME_FUNCTION(Runtime_StringParseInt) {
   HandleScope handle_scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
-  CONVERT_NUMBER_CHECKED(int, radix, Int32, args[1]);
-  // Step 8.a. is already handled in the JS function.
-  CHECK(radix == 0 || (2 <= radix && radix <= 36));
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, string, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, radix, 1);
 
+  // Convert {string} to a String first, and flatten it.
+  Handle<String> subject;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, subject,
+                                     Object::ToString(isolate, string));
   subject = String::Flatten(subject);
-  double value;
 
+  // Convert {radix} to Int32.
+  if (!radix->IsNumber()) {
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, radix, Object::ToNumber(radix));
+  }
+  int radix32 = DoubleToInt32(radix->Number());
+  if (radix32 != 0 && (radix32 < 2 || radix32 > 36)) {
+    return isolate->heap()->nan_value();
+  }
+
+  double result;
   {
     DisallowHeapAllocation no_gc;
     String::FlatContent flat = subject->GetFlatContent();
 
     if (flat.IsOneByte()) {
-      value =
-          StringToInt(isolate->unicode_cache(), flat.ToOneByteVector(), radix);
+      result = StringToInt(isolate->unicode_cache(), flat.ToOneByteVector(),
+                           radix32);
     } else {
-      value = StringToInt(isolate->unicode_cache(), flat.ToUC16Vector(), radix);
+      result =
+          StringToInt(isolate->unicode_cache(), flat.ToUC16Vector(), radix32);
     }
   }
 
-  return *isolate->factory()->NewNumber(value);
+  return *isolate->factory()->NewNumber(result);
 }
 
 
diff --git a/src/runtime/runtime-object.cc b/src/runtime/runtime-object.cc
index 70ed23b..c7e9cf3 100644
--- a/src/runtime/runtime-object.cc
+++ b/src/runtime/runtime-object.cc
@@ -207,6 +207,70 @@
   return isolate->heap()->false_value();
 }
 
+// ES6 section 19.1.2.2 Object.create ( O [ , Properties ] )
+// TODO(verwaest): Support the common cases with precached map directly in
+// an Object.create stub.
+RUNTIME_FUNCTION(Runtime_ObjectCreate) {
+  HandleScope scope(isolate);
+  Handle<Object> prototype = args.at<Object>(0);
+  if (!prototype->IsNull(isolate) && !prototype->IsJSReceiver()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, prototype));
+  }
+
+  // Generate the map with the specified {prototype} based on the Object
+  // function's initial map from the current native context.
+  // TODO(bmeurer): Use a dedicated cache for Object.create; think about
+  // slack tracking for Object.create.
+  Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
+                  isolate);
+  if (map->prototype() != *prototype) {
+    if (prototype->IsNull(isolate)) {
+      map = isolate->slow_object_with_null_prototype_map();
+    } else if (prototype->IsJSObject()) {
+      Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
+      if (!js_prototype->map()->is_prototype_map()) {
+        JSObject::OptimizeAsPrototype(js_prototype, FAST_PROTOTYPE);
+      }
+      Handle<PrototypeInfo> info =
+          Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
+      // TODO(verwaest): Use inobject slack tracking for this map.
+      if (info->HasObjectCreateMap()) {
+        map = handle(info->ObjectCreateMap(), isolate);
+      } else {
+        map = Map::CopyInitialMap(map);
+        Map::SetPrototype(map, prototype, FAST_PROTOTYPE);
+        PrototypeInfo::SetObjectCreateMap(info, map);
+      }
+    } else {
+      map = Map::TransitionToPrototype(map, prototype, REGULAR_PROTOTYPE);
+    }
+  }
+
+  bool is_dictionary_map = map->is_dictionary_map();
+  Handle<FixedArray> object_properties;
+  if (is_dictionary_map) {
+    // Allocate the actual properties dictionay up front to avoid invalid object
+    // state.
+    object_properties =
+        NameDictionary::New(isolate, NameDictionary::kInitialCapacity);
+  }
+  // Actually allocate the object.
+  Handle<JSObject> object = isolate->factory()->NewJSObjectFromMap(map);
+  if (is_dictionary_map) {
+    object->set_properties(*object_properties);
+  }
+
+  // Define the properties if properties was specified and is not undefined.
+  Handle<Object> properties = args.at<Object>(1);
+  if (!properties->IsUndefined(isolate)) {
+    RETURN_FAILURE_ON_EXCEPTION(
+        isolate, JSReceiver::DefineProperties(isolate, object, properties));
+  }
+
+  return *object;
+}
+
 MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
                                                Handle<Object> object,
                                                Handle<Object> key,
@@ -250,18 +314,6 @@
   return *obj;
 }
 
-
-RUNTIME_FUNCTION(Runtime_SetPrototype) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
-  MAYBE_RETURN(
-      JSReceiver::SetPrototype(obj, prototype, true, Object::THROW_ON_ERROR),
-      isolate->heap()->exception());
-  return *obj;
-}
-
 RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
@@ -277,64 +329,6 @@
 }
 
 
-namespace {
-
-Object* StoreGlobalViaContext(Isolate* isolate, int slot, Handle<Object> value,
-                              LanguageMode language_mode) {
-  // Go up context chain to the script context.
-  Handle<Context> script_context(isolate->context()->script_context(), isolate);
-  DCHECK(script_context->IsScriptContext());
-  DCHECK(script_context->get(slot)->IsPropertyCell());
-
-  // Lookup the named property on the global object.
-  Handle<ScopeInfo> scope_info(script_context->scope_info(), isolate);
-  Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
-  Handle<JSGlobalObject> global_object(script_context->global_object(),
-                                       isolate);
-  LookupIterator it(global_object, name, global_object, LookupIterator::OWN);
-
-  // Switch to fast mode only if there is a data property and it's not on
-  // a hidden prototype.
-  if (it.state() == LookupIterator::DATA &&
-      it.GetHolder<Object>().is_identical_to(global_object)) {
-    // Now update cell in the script context.
-    Handle<PropertyCell> cell = it.GetPropertyCell();
-    script_context->set(slot, *cell);
-  } else {
-    // This is not a fast case, so keep this access in a slow mode.
-    // Store empty_property_cell here to release the outdated property cell.
-    script_context->set(slot, isolate->heap()->empty_property_cell());
-  }
-
-  MAYBE_RETURN(Object::SetProperty(&it, value, language_mode,
-                                   Object::CERTAINLY_NOT_STORE_FROM_KEYED),
-               isolate->heap()->exception());
-  return *value;
-}
-
-}  // namespace
-
-
-RUNTIME_FUNCTION(Runtime_StoreGlobalViaContext_Sloppy) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_SMI_ARG_CHECKED(slot, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
-
-  return StoreGlobalViaContext(isolate, slot, value, SLOPPY);
-}
-
-
-RUNTIME_FUNCTION(Runtime_StoreGlobalViaContext_Strict) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_SMI_ARG_CHECKED(slot, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
-
-  return StoreGlobalViaContext(isolate, slot, value, STRICT);
-}
-
-
 RUNTIME_FUNCTION(Runtime_GetProperty) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
@@ -530,7 +524,7 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   if (!args[0]->IsJSObject()) {
-    return Smi::FromInt(0);
+    return Smi::kZero;
   }
   CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
 
@@ -604,14 +598,14 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
-  if (!object->IsJSObject()) return Smi::FromInt(0);
+  if (!object->IsJSObject()) return Smi::kZero;
   Handle<JSObject> js_object = Handle<JSObject>::cast(object);
-  if (!js_object->map()->is_deprecated()) return Smi::FromInt(0);
+  if (!js_object->map()->is_deprecated()) return Smi::kZero;
   // This call must not cause lazy deopts, because it's called from deferred
   // code where we can't handle lazy deopts for lack of a suitable bailout
   // ID. So we just try migration and signal failure if necessary,
   // which will also trigger a deopt.
-  if (!JSObject::TryMigrateInstance(js_object)) return Smi::FromInt(0);
+  if (!JSObject::TryMigrateInstance(js_object)) return Smi::kZero;
   return *object;
 }
 
@@ -928,13 +922,20 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, done, 1);
-  Handle<JSObject> result =
-      isolate->factory()->NewJSObjectFromMap(isolate->iterator_result_map());
-  result->InObjectPropertyAtPut(JSIteratorResult::kValueIndex, *value);
-  result->InObjectPropertyAtPut(JSIteratorResult::kDoneIndex, *done);
-  return *result;
+  return *isolate->factory()->NewJSIteratorResult(value, done->BooleanValue());
 }
 
+RUNTIME_FUNCTION(Runtime_CreateKeyValueArray) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, key, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+  Handle<FixedArray> elements = isolate->factory()->NewFixedArray(2);
+  elements->set(0, *key);
+  elements->set(1, *value);
+  return *isolate->factory()->NewJSArrayWithElements(elements, FAST_ELEMENTS,
+                                                     2);
+}
 
 RUNTIME_FUNCTION(Runtime_IsAccessCheckNeeded) {
   SealHandleScope shs(isolate);
@@ -960,32 +961,6 @@
   return *value;
 }
 
-RUNTIME_FUNCTION(Runtime_LoadModuleExport) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
-  Handle<Module> module(isolate->context()->module());
-  return *Module::LoadExport(module, name);
-}
-
-RUNTIME_FUNCTION(Runtime_LoadModuleImport) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Smi, module_request, 1);
-  Handle<Module> module(isolate->context()->module());
-  return *Module::LoadImport(module, name, module_request->value());
-}
-
-RUNTIME_FUNCTION(Runtime_StoreModuleExport) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
-  Handle<Module> module(isolate->context()->module());
-  Module::StoreExport(module, name, value);
-  return isolate->heap()->undefined_value();
-}
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-promise.cc b/src/runtime/runtime-promise.cc
new file mode 100644
index 0000000..226993a
--- /dev/null
+++ b/src/runtime/runtime-promise.cc
@@ -0,0 +1,193 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/runtime/runtime-utils.h"
+
+#include "src/debug/debug.h"
+#include "src/elements.h"
+#include "src/promise-utils.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+void PromiseRejectEvent(Isolate* isolate, Handle<JSReceiver> promise,
+                        Handle<Object> rejected_promise, Handle<Object> value,
+                        bool debug_event) {
+  if (isolate->debug()->is_active() && debug_event) {
+    isolate->debug()->OnPromiseReject(rejected_promise, value);
+  }
+  Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
+  // Do not report if we actually have a handler.
+  if (JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate)) {
+    isolate->ReportPromiseReject(Handle<JSObject>::cast(promise), value,
+                                 v8::kPromiseRejectWithNoHandler);
+  }
+}
+
+}  // namespace
+
+RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
+  DCHECK(args.length() == 2);
+  HandleScope scope(isolate);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+
+  Handle<Object> rejected_promise = promise;
+  if (isolate->debug()->is_active()) {
+    // If the Promise.reject call is caught, then this will return
+    // undefined, which will be interpreted by PromiseRejectEvent
+    // as being a caught exception event.
+    rejected_promise = isolate->GetPromiseOnStackOnThrow();
+  }
+  PromiseRejectEvent(isolate, promise, rejected_promise, value, true);
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
+  DCHECK(args.length() == 1);
+  HandleScope scope(isolate);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+  Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
+  // At this point, no revocation has been issued before
+  CHECK(JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate));
+  isolate->ReportPromiseReject(promise, Handle<Object>(),
+                               v8::kPromiseHandlerAddedAfterReject);
+  return isolate->heap()->undefined_value();
+}
+
+namespace {
+void EnqueuePromiseReactionJob(Isolate* isolate, Handle<Object> value,
+                               Handle<Object> tasks, Handle<Object> deferred,
+                               Handle<Object> status) {
+  Handle<Object> debug_id = isolate->factory()->undefined_value();
+  Handle<Object> debug_name = isolate->factory()->undefined_value();
+  if (isolate->debug()->is_active()) {
+    MaybeHandle<Object> maybe_result;
+    Handle<Object> argv[] = {deferred, status};
+    maybe_result = Execution::TryCall(
+        isolate, isolate->promise_debug_get_info(),
+        isolate->factory()->undefined_value(), arraysize(argv), argv);
+    Handle<Object> result;
+    if ((maybe_result).ToHandle(&result)) {
+      CHECK(result->IsJSArray());
+      Handle<JSArray> array = Handle<JSArray>::cast(result);
+      ElementsAccessor* accessor = array->GetElementsAccessor();
+      DCHECK(accessor->HasElement(array, 0));
+      DCHECK(accessor->HasElement(array, 1));
+      debug_id = accessor->Get(array, 0);
+      debug_name = accessor->Get(array, 1);
+    }
+  }
+  Handle<PromiseReactionJobInfo> info =
+      isolate->factory()->NewPromiseReactionJobInfo(value, tasks, deferred,
+                                                    debug_id, debug_name,
+                                                    isolate->native_context());
+  isolate->EnqueueMicrotask(info);
+}
+
+void PromiseFulfill(Isolate* isolate, Handle<JSReceiver> promise,
+                    Handle<Smi> status, Handle<Object> value,
+                    Handle<Symbol> reaction) {
+  Handle<Object> tasks = JSReceiver::GetDataProperty(promise, reaction);
+  if (!tasks->IsUndefined(isolate)) {
+    Handle<Object> deferred = JSReceiver::GetDataProperty(
+        promise, isolate->factory()->promise_deferred_reaction_symbol());
+    EnqueuePromiseReactionJob(isolate, value, tasks, deferred, status);
+  }
+}
+}  // namespace
+
+RUNTIME_FUNCTION(Runtime_PromiseReject) {
+  DCHECK(args.length() == 3);
+  HandleScope scope(isolate);
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, promise, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, reason, 1);
+  CONVERT_BOOLEAN_ARG_CHECKED(debug_event, 2);
+
+  PromiseRejectEvent(isolate, promise, promise, reason, debug_event);
+
+  Handle<Smi> status = handle(Smi::FromInt(kPromiseRejected), isolate);
+  Handle<Symbol> reaction =
+      isolate->factory()->promise_reject_reactions_symbol();
+  PromiseFulfill(isolate, promise, status, reason, reaction);
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseFulfill) {
+  DCHECK(args.length() == 4);
+  HandleScope scope(isolate);
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, promise, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Smi, status, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+  CONVERT_ARG_HANDLE_CHECKED(Symbol, reaction, 3);
+  PromiseFulfill(isolate, promise, status, value, reaction);
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_EnqueuePromiseReactionJob) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 4);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, tasks, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, deferred, 2);
+  CONVERT_ARG_HANDLE_CHECKED(Object, status, 3);
+  EnqueuePromiseReactionJob(isolate, value, tasks, deferred, status);
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_EnqueuePromiseResolveThenableJob) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 3);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, resolution, 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, then, 2);
+
+  // TODO(gsathya): Add fast path for native promises with unmodified
+  // PromiseThen (which don't need these resolving functions, but
+  // instead can just call resolve/reject directly).
+  Handle<JSFunction> resolve, reject;
+  PromiseUtils::CreateResolvingFunctions(
+      isolate, promise, isolate->factory()->false_value(), &resolve, &reject);
+
+  Handle<Object> debug_id, debug_name;
+  if (isolate->debug()->is_active()) {
+    debug_id =
+        handle(Smi::FromInt(isolate->GetNextDebugMicrotaskId()), isolate);
+    debug_name = isolate->factory()->PromiseResolveThenableJob_string();
+    isolate->debug()->OnAsyncTaskEvent(isolate->factory()->enqueue_string(),
+                                       debug_id,
+                                       Handle<String>::cast(debug_name));
+  } else {
+    debug_id = isolate->factory()->undefined_value();
+    debug_name = isolate->factory()->undefined_value();
+  }
+
+  Handle<PromiseResolveThenableJobInfo> info =
+      isolate->factory()->NewPromiseResolveThenableJobInfo(
+          resolution, then, resolve, reject, debug_id, debug_name,
+          isolate->native_context());
+  isolate->EnqueueMicrotask(info);
+
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, microtask, 0);
+  isolate->EnqueueMicrotask(microtask);
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_RunMicrotasks) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 0);
+  isolate->RunMicrotasks();
+  return isolate->heap()->undefined_value();
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/runtime/runtime-regexp.cc b/src/runtime/runtime-regexp.cc
index 977e6bc..d572eed 100644
--- a/src/runtime/runtime-regexp.cc
+++ b/src/runtime/runtime-regexp.cc
@@ -10,6 +10,7 @@
 #include "src/messages.h"
 #include "src/regexp/jsregexp-inl.h"
 #include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-utils.h"
 #include "src/string-builder.h"
 #include "src/string-search.h"
 
@@ -279,10 +280,8 @@
   }
 }
 
-
 void FindOneByteStringIndices(Vector<const uint8_t> subject, uint8_t pattern,
-                              ZoneList<int>* indices, unsigned int limit,
-                              Zone* zone) {
+                              List<int>* indices, unsigned int limit) {
   DCHECK(limit > 0);
   // Collect indices of pattern in subject using memchr.
   // Stop after finding at most limit values.
@@ -293,32 +292,29 @@
     pos = reinterpret_cast<const uint8_t*>(
         memchr(pos, pattern, subject_end - pos));
     if (pos == NULL) return;
-    indices->Add(static_cast<int>(pos - subject_start), zone);
+    indices->Add(static_cast<int>(pos - subject_start));
     pos++;
     limit--;
   }
 }
 
-
 void FindTwoByteStringIndices(const Vector<const uc16> subject, uc16 pattern,
-                              ZoneList<int>* indices, unsigned int limit,
-                              Zone* zone) {
+                              List<int>* indices, unsigned int limit) {
   DCHECK(limit > 0);
   const uc16* subject_start = subject.start();
   const uc16* subject_end = subject_start + subject.length();
   for (const uc16* pos = subject_start; pos < subject_end && limit > 0; pos++) {
     if (*pos == pattern) {
-      indices->Add(static_cast<int>(pos - subject_start), zone);
+      indices->Add(static_cast<int>(pos - subject_start));
       limit--;
     }
   }
 }
 
-
 template <typename SubjectChar, typename PatternChar>
 void FindStringIndices(Isolate* isolate, Vector<const SubjectChar> subject,
-                       Vector<const PatternChar> pattern,
-                       ZoneList<int>* indices, unsigned int limit, Zone* zone) {
+                       Vector<const PatternChar> pattern, List<int>* indices,
+                       unsigned int limit) {
   DCHECK(limit > 0);
   // Collect indices of pattern in subject.
   // Stop after finding at most limit values.
@@ -328,16 +324,15 @@
   while (limit > 0) {
     index = search.Search(subject, index);
     if (index < 0) return;
-    indices->Add(index, zone);
+    indices->Add(index);
     index += pattern_length;
     limit--;
   }
 }
 
-
 void FindStringIndicesDispatch(Isolate* isolate, String* subject,
-                               String* pattern, ZoneList<int>* indices,
-                               unsigned int limit, Zone* zone) {
+                               String* pattern, List<int>* indices,
+                               unsigned int limit) {
   {
     DisallowHeapAllocation no_gc;
     String::FlatContent subject_content = subject->GetFlatContent();
@@ -351,14 +346,14 @@
             pattern_content.ToOneByteVector();
         if (pattern_vector.length() == 1) {
           FindOneByteStringIndices(subject_vector, pattern_vector[0], indices,
-                                   limit, zone);
+                                   limit);
         } else {
           FindStringIndices(isolate, subject_vector, pattern_vector, indices,
-                            limit, zone);
+                            limit);
         }
       } else {
         FindStringIndices(isolate, subject_vector,
-                          pattern_content.ToUC16Vector(), indices, limit, zone);
+                          pattern_content.ToUC16Vector(), indices, limit);
       }
     } else {
       Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
@@ -367,34 +362,51 @@
             pattern_content.ToOneByteVector();
         if (pattern_vector.length() == 1) {
           FindTwoByteStringIndices(subject_vector, pattern_vector[0], indices,
-                                   limit, zone);
+                                   limit);
         } else {
           FindStringIndices(isolate, subject_vector, pattern_vector, indices,
-                            limit, zone);
+                            limit);
         }
       } else {
         Vector<const uc16> pattern_vector = pattern_content.ToUC16Vector();
         if (pattern_vector.length() == 1) {
           FindTwoByteStringIndices(subject_vector, pattern_vector[0], indices,
-                                   limit, zone);
+                                   limit);
         } else {
           FindStringIndices(isolate, subject_vector, pattern_vector, indices,
-                            limit, zone);
+                            limit);
         }
       }
     }
   }
 }
 
+namespace {
+List<int>* GetRewoundRegexpIndicesList(Isolate* isolate) {
+  List<int>* list = isolate->regexp_indices();
+  list->Rewind(0);
+  return list;
+}
+
+void TruncateRegexpIndicesList(Isolate* isolate) {
+  // Same size as smallest zone segment, preserving behavior from the
+  // runtime zone.
+  static const int kMaxRegexpIndicesListCapacity = 8 * KB;
+  if (isolate->regexp_indices()->capacity() > kMaxRegexpIndicesListCapacity) {
+    isolate->regexp_indices()->Clear();  //  Throw away backing storage
+  }
+}
+}  // namespace
+
 template <typename ResultSeqString>
 MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
     Isolate* isolate, Handle<String> subject, Handle<JSRegExp> pattern_regexp,
-    Handle<String> replacement, Handle<JSObject> last_match_info) {
+    Handle<String> replacement, Handle<RegExpMatchInfo> last_match_info) {
   DCHECK(subject->IsFlat());
   DCHECK(replacement->IsFlat());
 
-  ZoneScope zone_scope(isolate->runtime_zone());
-  ZoneList<int> indices(8, zone_scope.zone());
+  List<int>* indices = GetRewoundRegexpIndicesList(isolate);
+
   DCHECK_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
   String* pattern =
       String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
@@ -402,10 +414,9 @@
   int pattern_len = pattern->length();
   int replacement_len = replacement->length();
 
-  FindStringIndicesDispatch(isolate, *subject, pattern, &indices, 0xffffffff,
-                            zone_scope.zone());
+  FindStringIndicesDispatch(isolate, *subject, pattern, indices, 0xffffffff);
 
-  int matches = indices.length();
+  int matches = indices->length();
   if (matches == 0) return *subject;
 
   // Detect integer overflow.
@@ -436,10 +447,10 @@
 
   for (int i = 0; i < matches; i++) {
     // Copy non-matched subject content.
-    if (subject_pos < indices.at(i)) {
+    if (subject_pos < indices->at(i)) {
       String::WriteToFlat(*subject, result->GetChars() + result_pos,
-                          subject_pos, indices.at(i));
-      result_pos += indices.at(i) - subject_pos;
+                          subject_pos, indices->at(i));
+      result_pos += indices->at(i) - subject_pos;
     }
 
     // Replace match.
@@ -449,7 +460,7 @@
       result_pos += replacement_len;
     }
 
-    subject_pos = indices.at(i) + pattern_len;
+    subject_pos = indices->at(i) + pattern_len;
   }
   // Add remaining subject content at the end.
   if (subject_pos < subject_len) {
@@ -457,16 +468,18 @@
                         subject_len);
   }
 
-  int32_t match_indices[] = {indices.at(matches - 1),
-                             indices.at(matches - 1) + pattern_len};
+  int32_t match_indices[] = {indices->at(matches - 1),
+                             indices->at(matches - 1) + pattern_len};
   RegExpImpl::SetLastMatchInfo(last_match_info, subject, 0, match_indices);
 
+  TruncateRegexpIndicesList(isolate);
+
   return *result;
 }
 
 MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithString(
     Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp,
-    Handle<String> replacement, Handle<JSObject> last_match_info) {
+    Handle<String> replacement, Handle<RegExpMatchInfo> last_match_info) {
   DCHECK(subject->IsFlat());
   DCHECK(replacement->IsFlat());
 
@@ -474,8 +487,8 @@
   int subject_length = subject->length();
 
   // CompiledReplacement uses zone allocation.
-  ZoneScope zone_scope(isolate->runtime_zone());
-  CompiledReplacement compiled_replacement(zone_scope.zone());
+  Zone zone(isolate->allocator(), ZONE_NAME);
+  CompiledReplacement compiled_replacement(&zone);
   bool simple_replace =
       compiled_replacement.Compile(replacement, capture_count, subject_length);
 
@@ -548,7 +561,7 @@
 template <typename ResultSeqString>
 MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
     Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp,
-    Handle<JSObject> last_match_info) {
+    Handle<RegExpMatchInfo> last_match_info) {
   DCHECK(subject->IsFlat());
 
   // Shortcut for simple non-regexp global replacements
@@ -643,18 +656,12 @@
   return *answer;
 }
 
+namespace {
 
-RUNTIME_FUNCTION(Runtime_StringReplaceGlobalRegExpWithString) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
-
-  CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, last_match_info, 3);
-
+Object* StringReplaceGlobalRegExpWithStringHelper(
+    Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+    Handle<String> replacement, Handle<RegExpMatchInfo> last_match_info) {
   CHECK(regexp->GetFlags() & JSRegExp::kGlobal);
-  CHECK(last_match_info->HasFastObjectElements());
 
   subject = String::Flatten(subject);
 
@@ -674,6 +681,20 @@
                                              replacement, last_match_info);
 }
 
+}  // namespace
+
+RUNTIME_FUNCTION(Runtime_StringReplaceGlobalRegExpWithString) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 4);
+
+  CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
+  CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 3);
+
+  return StringReplaceGlobalRegExpWithStringHelper(
+      isolate, regexp, subject, replacement, last_match_info);
+}
 
 RUNTIME_FUNCTION(Runtime_StringSplit) {
   HandleScope handle_scope(isolate);
@@ -694,7 +715,7 @@
                                    &last_match_cache_unused,
                                    RegExpResultsCache::STRING_SPLIT_SUBSTRINGS),
         isolate);
-    if (*cached_answer != Smi::FromInt(0)) {
+    if (*cached_answer != Smi::kZero) {
       // The cache FixedArray is a COW-array and can therefore be reused.
       Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(
           Handle<FixedArray>::cast(cached_answer));
@@ -709,25 +730,18 @@
   subject = String::Flatten(subject);
   pattern = String::Flatten(pattern);
 
-  static const int kMaxInitialListCapacity = 16;
+  List<int>* indices = GetRewoundRegexpIndicesList(isolate);
 
-  ZoneScope zone_scope(isolate->runtime_zone());
+  FindStringIndicesDispatch(isolate, *subject, *pattern, indices, limit);
 
-  // Find (up to limit) indices of separator and end-of-string in subject
-  int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
-  ZoneList<int> indices(initial_capacity, zone_scope.zone());
-
-  FindStringIndicesDispatch(isolate, *subject, *pattern, &indices, limit,
-                            zone_scope.zone());
-
-  if (static_cast<uint32_t>(indices.length()) < limit) {
-    indices.Add(subject_length, zone_scope.zone());
+  if (static_cast<uint32_t>(indices->length()) < limit) {
+    indices->Add(subject_length);
   }
 
   // The list indices now contains the end of each part to create.
 
   // Create JSArray of substrings separated by separator.
-  int part_count = indices.length();
+  int part_count = indices->length();
 
   Handle<JSArray> result =
       isolate->factory()->NewJSArray(FAST_ELEMENTS, part_count, part_count,
@@ -737,12 +751,12 @@
 
   Handle<FixedArray> elements(FixedArray::cast(result->elements()));
 
-  if (part_count == 1 && indices.at(0) == subject_length) {
+  if (part_count == 1 && indices->at(0) == subject_length) {
     elements->set(0, *subject);
   } else {
     int part_start = 0;
     FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < part_count, i++, {
-      int part_end = indices.at(i);
+      int part_end = indices->at(i);
       Handle<String> substring =
           isolate->factory()->NewProperSubString(subject, part_start, part_end);
       elements->set(i, *substring);
@@ -758,9 +772,37 @@
     }
   }
 
+  TruncateRegexpIndicesList(isolate);
+
   return *result;
 }
 
+// ES##sec-regexpcreate
+// RegExpCreate ( P, F )
+RUNTIME_FUNCTION(Runtime_RegExpCreate) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, source_object, 0);
+
+  Handle<String> source;
+  if (source_object->IsUndefined(isolate)) {
+    source = isolate->factory()->empty_string();
+  } else {
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, source, Object::ToString(isolate, source_object));
+  }
+
+  Handle<Map> map(isolate->regexp_function()->initial_map());
+  Handle<JSRegExp> regexp =
+      Handle<JSRegExp>::cast(isolate->factory()->NewJSObjectFromMap(map));
+
+  JSRegExp::Flags flags = JSRegExp::kNone;
+
+  RETURN_FAILURE_ON_EXCEPTION(isolate,
+                              JSRegExp::Initialize(regexp, source, flags));
+
+  return *regexp;
+}
 
 RUNTIME_FUNCTION(Runtime_RegExpExec) {
   HandleScope scope(isolate);
@@ -768,7 +810,7 @@
   CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
   CONVERT_INT32_ARG_CHECKED(index, 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, last_match_info, 3);
+  CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 3);
   // Due to the way the JS calls are constructed this must be less than the
   // length of a string, i.e. it is always a Smi.  We check anyway for security.
   CHECK(index >= 0);
@@ -778,64 +820,116 @@
       isolate, RegExpImpl::Exec(regexp, subject, index, last_match_info));
 }
 
-
-RUNTIME_FUNCTION(Runtime_RegExpFlags) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
-  return regexp->flags();
-}
-
-
-RUNTIME_FUNCTION(Runtime_RegExpSource) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
-  return regexp->source();
-}
-
-// TODO(jgruber): Remove this once all uses in regexp.js have been removed.
-RUNTIME_FUNCTION(Runtime_RegExpConstructResult) {
-  HandleScope handle_scope(isolate);
-  DCHECK(args.length() == 3);
-  CONVERT_SMI_ARG_CHECKED(size, 0);
-  CHECK(size >= 0 && size <= FixedArray::kMaxLength);
-  CONVERT_ARG_HANDLE_CHECKED(Object, index, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, input, 2);
-  Handle<FixedArray> elements = isolate->factory()->NewFixedArray(size);
-  Handle<Map> regexp_map(isolate->native_context()->regexp_result_map());
-  Handle<JSObject> object =
-      isolate->factory()->NewJSObjectFromMap(regexp_map, NOT_TENURED);
-  Handle<JSArray> array = Handle<JSArray>::cast(object);
-  array->set_elements(*elements);
-  array->set_length(Smi::FromInt(size));
-  // Write in-object properties after the length of the array.
-  array->InObjectPropertyAtPut(JSRegExpResult::kIndexIndex, *index);
-  array->InObjectPropertyAtPut(JSRegExpResult::kInputIndex, *input);
-  return *array;
-}
-
-
-RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
+RUNTIME_FUNCTION(Runtime_RegExpInternalReplace) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
-  CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
+  CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
 
-  RETURN_FAILURE_ON_EXCEPTION(isolate,
-                              JSRegExp::Initialize(regexp, source, flags));
+  Handle<RegExpMatchInfo> internal_match_info =
+      isolate->regexp_internal_match_info();
 
-  return *regexp;
+  return StringReplaceGlobalRegExpWithStringHelper(
+      isolate, regexp, subject, replacement, internal_match_info);
 }
 
+namespace {
+
+class MatchInfoBackedMatch : public String::Match {
+ public:
+  MatchInfoBackedMatch(Isolate* isolate, Handle<String> subject,
+                       Handle<RegExpMatchInfo> match_info)
+      : isolate_(isolate), match_info_(match_info) {
+    subject_ = String::Flatten(subject);
+  }
+
+  Handle<String> GetMatch() override {
+    return RegExpUtils::GenericCaptureGetter(isolate_, match_info_, 0, nullptr);
+  }
+
+  MaybeHandle<String> GetCapture(int i, bool* capture_exists) override {
+    Handle<Object> capture_obj = RegExpUtils::GenericCaptureGetter(
+        isolate_, match_info_, i, capture_exists);
+    return (*capture_exists) ? Object::ToString(isolate_, capture_obj)
+                             : isolate_->factory()->empty_string();
+  }
+
+  Handle<String> GetPrefix() override {
+    const int match_start = match_info_->Capture(0);
+    return isolate_->factory()->NewSubString(subject_, 0, match_start);
+  }
+
+  Handle<String> GetSuffix() override {
+    const int match_end = match_info_->Capture(1);
+    return isolate_->factory()->NewSubString(subject_, match_end,
+                                             subject_->length());
+  }
+
+  int CaptureCount() override {
+    return match_info_->NumberOfCaptureRegisters() / 2;
+  }
+
+  virtual ~MatchInfoBackedMatch() {}
+
+ private:
+  Isolate* isolate_;
+  Handle<String> subject_;
+  Handle<RegExpMatchInfo> match_info_;
+};
+
+class VectorBackedMatch : public String::Match {
+ public:
+  VectorBackedMatch(Isolate* isolate, Handle<String> subject,
+                    Handle<String> match, int match_position,
+                    ZoneVector<Handle<Object>>* captures)
+      : isolate_(isolate),
+        match_(match),
+        match_position_(match_position),
+        captures_(captures) {
+    subject_ = String::Flatten(subject);
+  }
+
+  Handle<String> GetMatch() override { return match_; }
+
+  MaybeHandle<String> GetCapture(int i, bool* capture_exists) override {
+    Handle<Object> capture_obj = captures_->at(i);
+    if (capture_obj->IsUndefined(isolate_)) {
+      *capture_exists = false;
+      return isolate_->factory()->empty_string();
+    }
+    *capture_exists = true;
+    return Object::ToString(isolate_, capture_obj);
+  }
+
+  Handle<String> GetPrefix() override {
+    return isolate_->factory()->NewSubString(subject_, 0, match_position_);
+  }
+
+  Handle<String> GetSuffix() override {
+    const int match_end_position = match_position_ + match_->length();
+    return isolate_->factory()->NewSubString(subject_, match_end_position,
+                                             subject_->length());
+  }
+
+  int CaptureCount() override { return static_cast<int>(captures_->size()); }
+
+  virtual ~VectorBackedMatch() {}
+
+ private:
+  Isolate* isolate_;
+  Handle<String> subject_;
+  Handle<String> match_;
+  const int match_position_;
+  ZoneVector<Handle<Object>>* captures_;
+};
 
 // Only called from Runtime_RegExpExecMultiple so it doesn't need to maintain
 // separate last match info.  See comment on that function.
 template <bool has_capture>
 static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
                                     Handle<JSRegExp> regexp,
-                                    Handle<JSObject> last_match_array,
+                                    Handle<RegExpMatchInfo> last_match_array,
                                     Handle<JSArray> result_array) {
   DCHECK(subject->IsFlat());
   DCHECK_NE(has_capture, regexp->CaptureCount() == 0);
@@ -858,8 +952,11 @@
       }
       Handle<FixedArray> cached_fixed_array =
           Handle<FixedArray>(FixedArray::cast(cached_answer));
-      // The cache FixedArray is a COW-array and can therefore be reused.
-      JSArray::SetContent(result_array, cached_fixed_array);
+      // The cache FixedArray is a COW-array and we need to return a copy.
+      Handle<FixedArray> copied_fixed_array =
+          isolate->factory()->CopyFixedArrayWithMap(
+              cached_fixed_array, isolate->factory()->fixed_array_map());
+      JSArray::SetContent(result_array, copied_fixed_array);
       RegExpImpl::SetLastMatchInfo(last_match_array, subject, capture_count,
                                    last_match);
       DeleteArray(last_match);
@@ -964,9 +1061,12 @@
       }
       Handle<FixedArray> result_fixed_array = builder.array();
       result_fixed_array->Shrink(builder.length());
-      // Cache the result and turn the FixedArray into a COW array.
+      // Cache the result and copy the FixedArray into a COW array.
+      Handle<FixedArray> copied_fixed_array =
+          isolate->factory()->CopyFixedArrayWithMap(
+              result_fixed_array, isolate->factory()->fixed_array_map());
       RegExpResultsCache::Enter(
-          isolate, subject, handle(regexp->data(), isolate), result_fixed_array,
+          isolate, subject, handle(regexp->data(), isolate), copied_fixed_array,
           last_match_cache, RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
     }
     return *builder.ToJSArray(result_array);
@@ -975,19 +1075,174 @@
   }
 }
 
+MUST_USE_RESULT MaybeHandle<String> StringReplaceNonGlobalRegExpWithFunction(
+    Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp,
+    Handle<Object> replace_obj) {
+  Factory* factory = isolate->factory();
+  Handle<RegExpMatchInfo> last_match_info = isolate->regexp_last_match_info();
 
-// This is only called for StringReplaceGlobalRegExpWithFunction.  This sets
-// lastMatchInfoOverride to maintain the last match info, so we don't need to
-// set any other last match array info.
+  // TODO(jgruber): This is a pattern we could refactor.
+  Handle<Object> match_indices_obj;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, match_indices_obj,
+      RegExpImpl::Exec(regexp, subject, 0, last_match_info), String);
+
+  if (match_indices_obj->IsNull(isolate)) {
+    RETURN_ON_EXCEPTION(isolate, RegExpUtils::SetLastIndex(isolate, regexp, 0),
+                        String);
+    return subject;
+  }
+
+  Handle<RegExpMatchInfo> match_indices =
+      Handle<RegExpMatchInfo>::cast(match_indices_obj);
+
+  const int index = match_indices->Capture(0);
+  const int end_of_match = match_indices->Capture(1);
+
+  IncrementalStringBuilder builder(isolate);
+  builder.AppendString(factory->NewSubString(subject, 0, index));
+
+  // Compute the parameter list consisting of the match, captures, index,
+  // and subject for the replace function invocation.
+  // The number of captures plus one for the match.
+  const int m = match_indices->NumberOfCaptureRegisters() / 2;
+
+  const int argc = m + 2;
+  ScopedVector<Handle<Object>> argv(argc);
+
+  for (int j = 0; j < m; j++) {
+    bool ok;
+    Handle<String> capture =
+        RegExpUtils::GenericCaptureGetter(isolate, match_indices, j, &ok);
+    if (ok) {
+      argv[j] = capture;
+    } else {
+      argv[j] = factory->undefined_value();
+    }
+  }
+
+  argv[argc - 2] = handle(Smi::FromInt(index), isolate);
+  argv[argc - 1] = subject;
+
+  Handle<Object> replacement_obj;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, replacement_obj,
+      Execution::Call(isolate, replace_obj, factory->undefined_value(), argc,
+                      argv.start()),
+      String);
+
+  Handle<String> replacement;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, replacement, Object::ToString(isolate, replacement_obj), String);
+
+  builder.AppendString(replacement);
+  builder.AppendString(
+      factory->NewSubString(subject, end_of_match, subject->length()));
+
+  return builder.Finish();
+}
+
+// Legacy implementation of RegExp.prototype[Symbol.replace] which
+// doesn't properly call the underlying exec method.
+MUST_USE_RESULT MaybeHandle<String> RegExpReplace(Isolate* isolate,
+                                                  Handle<JSRegExp> regexp,
+                                                  Handle<String> string,
+                                                  Handle<Object> replace_obj) {
+  Factory* factory = isolate->factory();
+
+  // TODO(jgruber): We need the even stricter guarantee of an unmodified
+  // JSRegExp map here for access to GetFlags to be legal.
+  const int flags = regexp->GetFlags();
+  const bool global = (flags & JSRegExp::kGlobal) != 0;
+
+  // Functional fast-paths are dispatched directly by replace builtin.
+  DCHECK(!replace_obj->IsCallable());
+
+  Handle<String> replace;
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, replace,
+                             Object::ToString(isolate, replace_obj), String);
+  replace = String::Flatten(replace);
+
+  Handle<RegExpMatchInfo> last_match_info = isolate->regexp_last_match_info();
+
+  if (!global) {
+    // Non-global regexp search, string replace.
+
+    Handle<Object> match_indices_obj;
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, match_indices_obj,
+        RegExpImpl::Exec(regexp, string, 0, last_match_info), String);
+
+    if (match_indices_obj->IsNull(isolate)) {
+      RETURN_ON_EXCEPTION(
+          isolate, RegExpUtils::SetLastIndex(isolate, regexp, 0), String);
+      return string;
+    }
+
+    auto match_indices = Handle<RegExpMatchInfo>::cast(match_indices_obj);
+
+    const int start_index = match_indices->Capture(0);
+    const int end_index = match_indices->Capture(1);
+
+    IncrementalStringBuilder builder(isolate);
+    builder.AppendString(factory->NewSubString(string, 0, start_index));
+
+    if (replace->length() > 0) {
+      MatchInfoBackedMatch m(isolate, string, match_indices);
+      Handle<String> replacement;
+      ASSIGN_RETURN_ON_EXCEPTION(isolate, replacement,
+                                 String::GetSubstitution(isolate, &m, replace),
+                                 String);
+      builder.AppendString(replacement);
+    }
+
+    builder.AppendString(
+        factory->NewSubString(string, end_index, string->length()));
+    return builder.Finish();
+  } else {
+    // Global regexp search, string replace.
+    DCHECK(global);
+    RETURN_ON_EXCEPTION(isolate, RegExpUtils::SetLastIndex(isolate, regexp, 0),
+                        String);
+
+    if (replace->length() == 0) {
+      if (string->HasOnlyOneByteChars()) {
+        Object* result =
+            StringReplaceGlobalRegExpWithEmptyString<SeqOneByteString>(
+                isolate, string, regexp, last_match_info);
+        return handle(String::cast(result), isolate);
+      } else {
+        Object* result =
+            StringReplaceGlobalRegExpWithEmptyString<SeqTwoByteString>(
+                isolate, string, regexp, last_match_info);
+        return handle(String::cast(result), isolate);
+      }
+    }
+
+    Object* result = StringReplaceGlobalRegExpWithString(
+        isolate, string, regexp, replace, last_match_info);
+    if (result->IsString()) {
+      return handle(String::cast(result), isolate);
+    } else {
+      return MaybeHandle<String>();
+    }
+  }
+
+  UNREACHABLE();
+  return MaybeHandle<String>();
+}
+
+}  // namespace
+
+// This is only called for StringReplaceGlobalRegExpWithFunction.
 RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
   HandleScope handles(isolate);
   DCHECK(args.length() == 4);
 
   CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, last_match_info, 2);
+  CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 2);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
-  CHECK(last_match_info->HasFastObjectElements());
   CHECK(result_array->HasFastObjectElements());
 
   subject = String::Flatten(subject);
@@ -1002,6 +1257,188 @@
   }
 }
 
+RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 3);
+
+  CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, replace, 2);
+
+  RETURN_RESULT_OR_FAILURE(isolate, StringReplaceNonGlobalRegExpWithFunction(
+                                        isolate, subject, regexp, replace));
+}
+
+// Slow path for:
+// ES#sec-regexp.prototype-@@replace
+// RegExp.prototype [ @@replace ] ( string, replaceValue )
+RUNTIME_FUNCTION(Runtime_RegExpReplace) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 3);
+
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, recv, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, string, 1);
+  Handle<Object> replace_obj = args.at<Object>(2);
+
+  Factory* factory = isolate->factory();
+
+  string = String::Flatten(string);
+
+  // Fast-path for unmodified JSRegExps.
+  if (RegExpUtils::IsUnmodifiedRegExp(isolate, recv)) {
+    RETURN_RESULT_OR_FAILURE(
+        isolate, RegExpReplace(isolate, Handle<JSRegExp>::cast(recv), string,
+                               replace_obj));
+  }
+
+  const int length = string->length();
+  const bool functional_replace = replace_obj->IsCallable();
+
+  Handle<String> replace;
+  if (!functional_replace) {
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, replace,
+                                       Object::ToString(isolate, replace_obj));
+  }
+
+  Handle<Object> global_obj;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, global_obj,
+      JSReceiver::GetProperty(recv, factory->global_string()));
+  const bool global = global_obj->BooleanValue();
+
+  bool unicode = false;
+  if (global) {
+    Handle<Object> unicode_obj;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, unicode_obj,
+        JSReceiver::GetProperty(recv, factory->unicode_string()));
+    unicode = unicode_obj->BooleanValue();
+
+    RETURN_FAILURE_ON_EXCEPTION(isolate,
+                                RegExpUtils::SetLastIndex(isolate, recv, 0));
+  }
+
+  Zone zone(isolate->allocator(), ZONE_NAME);
+  ZoneVector<Handle<Object>> results(&zone);
+
+  while (true) {
+    Handle<Object> result;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, result, RegExpUtils::RegExpExec(isolate, recv, string,
+                                                 factory->undefined_value()));
+
+    if (result->IsNull(isolate)) break;
+
+    results.push_back(result);
+    if (!global) break;
+
+    Handle<Object> match_obj;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match_obj,
+                                       Object::GetElement(isolate, result, 0));
+
+    Handle<String> match;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match,
+                                       Object::ToString(isolate, match_obj));
+
+    if (match->length() == 0) {
+      RETURN_FAILURE_ON_EXCEPTION(isolate, RegExpUtils::SetAdvancedStringIndex(
+                                               isolate, recv, string, unicode));
+    }
+  }
+
+  // TODO(jgruber): Look into ReplacementStringBuilder instead.
+  IncrementalStringBuilder builder(isolate);
+  int next_source_position = 0;
+
+  for (const auto& result : results) {
+    Handle<Object> captures_length_obj;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, captures_length_obj,
+        Object::GetProperty(result, factory->length_string()));
+
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, captures_length_obj,
+        Object::ToLength(isolate, captures_length_obj));
+    const int captures_length =
+        std::max(Handle<Smi>::cast(captures_length_obj)->value(), 0);
+
+    Handle<Object> match_obj;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match_obj,
+                                       Object::GetElement(isolate, result, 0));
+
+    Handle<String> match;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match,
+                                       Object::ToString(isolate, match_obj));
+
+    const int match_length = match->length();
+
+    Handle<Object> position_obj;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, position_obj,
+        Object::GetProperty(result, factory->index_string()));
+
+    // TODO(jgruber): Extract and correct error handling. Since we can go up to
+    // 2^53 - 1 (at least for ToLength), we might actually need uint64_t here?
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, position_obj, Object::ToInteger(isolate, position_obj));
+    const int position =
+        std::max(std::min(Handle<Smi>::cast(position_obj)->value(), length), 0);
+
+    ZoneVector<Handle<Object>> captures(&zone);
+    for (int n = 0; n < captures_length; n++) {
+      Handle<Object> capture;
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+          isolate, capture, Object::GetElement(isolate, result, n));
+
+      if (!capture->IsUndefined(isolate)) {
+        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, capture,
+                                           Object::ToString(isolate, capture));
+      }
+      captures.push_back(capture);
+    }
+
+    Handle<String> replacement;
+    if (functional_replace) {
+      const int argc = captures_length + 2;
+      ScopedVector<Handle<Object>> argv(argc);
+
+      for (int j = 0; j < captures_length; j++) {
+        argv[j] = captures[j];
+      }
+
+      argv[captures_length] = handle(Smi::FromInt(position), isolate);
+      argv[captures_length + 1] = string;
+
+      Handle<Object> replacement_obj;
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+          isolate, replacement_obj,
+          Execution::Call(isolate, replace_obj, factory->undefined_value(),
+                          argc, argv.start()));
+
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+          isolate, replacement, Object::ToString(isolate, replacement_obj));
+    } else {
+      VectorBackedMatch m(isolate, string, match, position, &captures);
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+          isolate, replacement, String::GetSubstitution(isolate, &m, replace));
+    }
+
+    if (position >= next_source_position) {
+      builder.AppendString(
+          factory->NewSubString(string, next_source_position, position));
+      builder.AppendString(replacement);
+
+      next_source_position = position + match_length;
+    }
+  }
+
+  if (next_source_position < length) {
+    builder.AppendString(
+        factory->NewSubString(string, next_source_position, length));
+  }
+
+  RETURN_RESULT_OR_FAILURE(isolate, builder.Finish());
+}
 
 RUNTIME_FUNCTION(Runtime_RegExpExecReThrow) {
   SealHandleScope shs(isolate);
@@ -1018,5 +1455,6 @@
   CONVERT_ARG_CHECKED(Object, obj, 0);
   return isolate->heap()->ToBoolean(obj->IsJSRegExp());
 }
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-scopes.cc b/src/runtime/runtime-scopes.cc
index 0c037db..377799f 100644
--- a/src/runtime/runtime-scopes.cc
+++ b/src/runtime/runtime-scopes.cc
@@ -903,7 +903,7 @@
   // The property was found in a context slot.
   if (index != Context::kNotFound) {
     if (flag == kNeedsInitialization &&
-        Handle<Context>::cast(holder)->is_the_hole(index)) {
+        Handle<Context>::cast(holder)->is_the_hole(isolate, index)) {
       THROW_NEW_ERROR(isolate,
                       NewReferenceError(MessageTemplate::kNotDefined, name),
                       Object);
diff --git a/src/runtime/runtime-strings.cc b/src/runtime/runtime-strings.cc
index f5bda59..328bdce 100644
--- a/src/runtime/runtime-strings.cc
+++ b/src/runtime/runtime-strings.cc
@@ -90,17 +90,8 @@
 RUNTIME_FUNCTION(Runtime_StringIndexOf) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 3);
-
-  CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, index, 2);
-
-  uint32_t start_index = 0;
-  if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
-
-  CHECK(start_index <= static_cast<uint32_t>(sub->length()));
-  int position = String::IndexOf(isolate, sub, pat, start_index);
-  return Smi::FromInt(position);
+  return String::IndexOf(isolate, args.at<Object>(0), args.at<Object>(1),
+                         args.at<Object>(2));
 }
 
 RUNTIME_FUNCTION(Runtime_StringLastIndexOf) {
@@ -166,59 +157,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_StringMatch) {
-  HandleScope handles(isolate);
-  DCHECK(args.length() == 3);
-
-  CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
-  CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSArray, regexp_info, 2);
-
-  CHECK(regexp_info->HasFastObjectElements());
-
-  RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
-  if (global_cache.HasException()) return isolate->heap()->exception();
-
-  int capture_count = regexp->CaptureCount();
-
-  ZoneScope zone_scope(isolate->runtime_zone());
-  ZoneList<int> offsets(8, zone_scope.zone());
-
-  while (true) {
-    int32_t* match = global_cache.FetchNext();
-    if (match == NULL) break;
-    offsets.Add(match[0], zone_scope.zone());  // start
-    offsets.Add(match[1], zone_scope.zone());  // end
-  }
-
-  if (global_cache.HasException()) return isolate->heap()->exception();
-
-  if (offsets.length() == 0) {
-    // Not a single match.
-    return isolate->heap()->null_value();
-  }
-
-  RegExpImpl::SetLastMatchInfo(regexp_info, subject, capture_count,
-                               global_cache.LastSuccessfulMatch());
-
-  int matches = offsets.length() / 2;
-  Handle<FixedArray> elements = isolate->factory()->NewFixedArray(matches);
-  Handle<String> substring =
-      isolate->factory()->NewSubString(subject, offsets.at(0), offsets.at(1));
-  elements->set(0, *substring);
-  FOR_WITH_HANDLE_SCOPE(isolate, int, i = 1, i, i < matches, i++, {
-    int from = offsets.at(i * 2);
-    int to = offsets.at(i * 2 + 1);
-    Handle<String> substring =
-        isolate->factory()->NewProperSubString(subject, from, to);
-    elements->set(i, *substring);
-  });
-  Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(elements);
-  result->set_length(Smi::FromInt(matches));
-  return *result;
-}
-
-
 RUNTIME_FUNCTION(Runtime_StringCharCodeAtRT) {
   HandleScope handle_scope(isolate);
   DCHECK(args.length() == 2);
@@ -256,7 +194,7 @@
       break;
   }
   UNREACHABLE();
-  return Smi::FromInt(0);
+  return Smi::kZero;
 }
 
 
@@ -573,13 +511,13 @@
     elements->set(i, value, mode);
   }
   if (i < length) {
-    DCHECK(Smi::FromInt(0) == 0);
+    DCHECK(Smi::kZero == 0);
     memset(elements->data_start() + i, 0, kPointerSize * (length - i));
   }
 #ifdef DEBUG
   for (int j = 0; j < length; ++j) {
     Object* element = elements->get(j);
-    DCHECK(element == Smi::FromInt(0) ||
+    DCHECK(element == Smi::kZero ||
            (element->IsString() && String::cast(element)->LooksValid()));
   }
 #endif
@@ -942,7 +880,7 @@
       break;
   }
   UNREACHABLE();
-  return Smi::FromInt(0);
+  return Smi::kZero;
 }
 
 RUNTIME_FUNCTION(Runtime_StringLessThanOrEqual) {
@@ -960,7 +898,7 @@
       break;
   }
   UNREACHABLE();
-  return Smi::FromInt(0);
+  return Smi::kZero;
 }
 
 RUNTIME_FUNCTION(Runtime_StringGreaterThan) {
@@ -978,7 +916,7 @@
       break;
   }
   UNREACHABLE();
-  return Smi::FromInt(0);
+  return Smi::kZero;
 }
 
 RUNTIME_FUNCTION(Runtime_StringGreaterThanOrEqual) {
@@ -996,7 +934,7 @@
       break;
   }
   UNREACHABLE();
-  return Smi::FromInt(0);
+  return Smi::kZero;
 }
 
 RUNTIME_FUNCTION(Runtime_StringEqual) {
diff --git a/src/runtime/runtime-test.cc b/src/runtime/runtime-test.cc
index 8100d2c..7054192 100644
--- a/src/runtime/runtime-test.cc
+++ b/src/runtime/runtime-test.cc
@@ -17,6 +17,7 @@
 #include "src/snapshot/code-serializer.h"
 #include "src/snapshot/natives.h"
 #include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
 
 namespace v8 {
 namespace internal {
@@ -267,6 +268,9 @@
   if (function->IsOptimized() && function->code()->is_turbofanned()) {
     return Smi::FromInt(7);  // 7 == "TurboFan compiler".
   }
+  if (function->IsInterpreted()) {
+    return Smi::FromInt(8);  // 8 == "Interpreted".
+  }
   return function->IsOptimized() ? Smi::FromInt(1)   // 1 == "yes".
                                  : Smi::FromInt(2);  // 2 == "no".
 }
@@ -444,7 +448,7 @@
 
   OFStream os(stdout);
 #ifdef DEBUG
-  if (args[0]->IsString()) {
+  if (args[0]->IsString() && isolate->context() != nullptr) {
     // If we have a string, assume it's a code "marker"
     // and print some interesting cpu debugging info.
     JavaScriptFrameIterator it(isolate);
@@ -546,8 +550,7 @@
   return Smi::FromInt(Natives::GetBuiltinsCount());
 }
 
-
-// Returns V8 version as a string.
+// TODO(5510): remove this.
 RUNTIME_FUNCTION(Runtime_GetV8Version) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 0);
@@ -755,21 +758,37 @@
 // Return undefined if unsuccessful.
 RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
   HandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, wire_bytes, 1);
 
   Address mem_start = static_cast<Address>(buffer->backing_store());
   int mem_size = static_cast<int>(buffer->byte_length()->Number());
 
+  // DeserializeWasmModule will allocate. We assume JSArrayBuffer doesn't
+  // get relocated.
   ScriptData sc(mem_start, mem_size);
+  bool already_external = wire_bytes->is_external();
+  if (!already_external) {
+    wire_bytes->set_is_external(true);
+    isolate->heap()->UnregisterArrayBuffer(*wire_bytes);
+  }
   MaybeHandle<FixedArray> maybe_compiled_module =
-      WasmCompiledModuleSerializer::DeserializeWasmModule(isolate, &sc);
+      WasmCompiledModuleSerializer::DeserializeWasmModule(
+          isolate, &sc,
+          Vector<const uint8_t>(
+              reinterpret_cast<uint8_t*>(wire_bytes->backing_store()),
+              static_cast<int>(wire_bytes->byte_length()->Number())));
+  if (!already_external) {
+    wire_bytes->set_is_external(false);
+    isolate->heap()->RegisterNewArrayBuffer(*wire_bytes);
+  }
   Handle<FixedArray> compiled_module;
   if (!maybe_compiled_module.ToHandle(&compiled_module)) {
     return isolate->heap()->undefined_value();
   }
-  return *wasm::CreateCompiledModuleObject(isolate, compiled_module,
-                                           wasm::ModuleOrigin::kWasmOrigin);
+  return *WasmModuleObject::New(
+      isolate, Handle<WasmCompiledModule>::cast(compiled_module));
 }
 
 RUNTIME_FUNCTION(Runtime_ValidateWasmInstancesChain) {
diff --git a/src/runtime/runtime-typedarray.cc b/src/runtime/runtime-typedarray.cc
index ba422bf..cb0e062 100644
--- a/src/runtime/runtime-typedarray.cc
+++ b/src/runtime/runtime-typedarray.cc
@@ -59,7 +59,7 @@
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, array_buffer, 0);
   if (array_buffer->backing_store() == NULL) {
-    CHECK(Smi::FromInt(0) == array_buffer->byte_length());
+    CHECK(Smi::kZero == array_buffer->byte_length());
     return isolate->heap()->undefined_value();
   }
   // Shared array buffers should never be neutered.
@@ -142,7 +142,7 @@
   DCHECK_EQ(v8::ArrayBufferView::kInternalFieldCount,
             holder->GetInternalFieldCount());
   for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
-    holder->SetInternalField(i, Smi::FromInt(0));
+    holder->SetInternalField(i, Smi::kZero);
   }
   Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(length);
   holder->set_length(*length_obj);
@@ -215,7 +215,7 @@
   DCHECK_EQ(v8::ArrayBufferView::kInternalFieldCount,
             holder->GetInternalFieldCount());
   for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
-    holder->SetInternalField(i, Smi::FromInt(0));
+    holder->SetInternalField(i, Smi::kZero);
   }
 
   // NOTE: not initializing backing store.
@@ -241,7 +241,7 @@
   }
 
   holder->set_buffer(*buffer);
-  holder->set_byte_offset(Smi::FromInt(0));
+  holder->set_byte_offset(Smi::kZero);
   Handle<Object> byte_length_obj(
       isolate->factory()->NewNumberFromSize(byte_length));
   holder->set_byte_length(*byte_length_obj);
diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h
index cbdaf0f..8e2e83c 100644
--- a/src/runtime/runtime.h
+++ b/src/runtime/runtime.h
@@ -9,6 +9,7 @@
 
 #include "src/allocation.h"
 #include "src/base/platform/time.h"
+#include "src/globals.h"
 #include "src/objects.h"
 #include "src/unicode.h"
 #include "src/zone/zone.h"
@@ -51,13 +52,12 @@
   F(HasComplexElements, 1, 1)        \
   F(IsArray, 1, 1)                   \
   F(ArrayIsArray, 1, 1)              \
-  F(HasCachedArrayIndex, 1, 1)       \
-  F(GetCachedArrayIndex, 1, 1)       \
   F(FixedArrayGet, 2, 1)             \
   F(FixedArraySet, 3, 1)             \
   F(ArraySpeciesConstructor, 1, 1)   \
   F(ArrayIncludes_Slow, 3, 1)        \
-  F(ArrayIndexOf, 3, 1)
+  F(ArrayIndexOf, 3, 1)              \
+  F(SpreadIterablePrepare, 1, 1)
 
 #define FOR_EACH_INTRINSIC_ATOMICS(F)           \
   F(ThrowNotIntegerSharedTypedArrayError, 1, 1) \
@@ -188,6 +188,7 @@
   F(ScriptLineStartPosition, 2, 1)              \
   F(ScriptLineEndPosition, 2, 1)                \
   F(ScriptLocationFromLine, 4, 1)               \
+  F(ScriptLocationFromLine2, 4, 1)              \
   F(ScriptPositionInfo, 3, 1)                   \
   F(ScriptSourceLine, 2, 1)                     \
   F(DebugPrepareStepInIfStepping, 1, 1)         \
@@ -195,11 +196,10 @@
   F(DebugRecordAsyncFunction, 1, 1)             \
   F(DebugPushPromise, 1, 1)                     \
   F(DebugPopPromise, 0, 1)                      \
-  F(DebugAsyncTaskEvent, 1, 1)                  \
+  F(DebugNextMicrotaskId, 0, 1)                 \
+  F(DebugAsyncTaskEvent, 3, 1)                  \
   F(DebugIsActive, 0, 1)                        \
-  F(DebugBreakInOptimizedCode, 0, 1)            \
-  F(GetWasmFunctionOffsetTable, 1, 1)           \
-  F(DisassembleWasmFunction, 1, 1)
+  F(DebugBreakInOptimizedCode, 0, 1)
 
 #define FOR_EACH_INTRINSIC_ERROR(F) F(ErrorToString, 1, 1)
 
@@ -214,13 +214,15 @@
   F(InterpreterTraceBytecodeEntry, 3, 1)  \
   F(InterpreterTraceBytecodeExit, 3, 1)   \
   F(InterpreterClearPendingMessage, 0, 1) \
-  F(InterpreterSetPendingMessage, 1, 1)
+  F(InterpreterSetPendingMessage, 1, 1)   \
+  F(InterpreterAdvanceBytecodeOffset, 2, 1)
 
 #define FOR_EACH_INTRINSIC_FUNCTION(F)     \
   F(FunctionGetName, 1, 1)                 \
   F(FunctionSetName, 2, 1)                 \
   F(FunctionRemovePrototype, 1, 1)         \
   F(FunctionGetScript, 1, 1)               \
+  F(FunctionGetScriptId, 1, 1)             \
   F(FunctionGetSourceCode, 1, 1)           \
   F(FunctionGetScriptSourcePosition, 1, 1) \
   F(FunctionGetContextData, 1, 1)          \
@@ -290,7 +292,8 @@
   F(CheckIsBootstrapping, 0, 1)                     \
   F(CreateListFromArrayLike, 1, 1)                  \
   F(EnqueueMicrotask, 1, 1)                         \
-  F(EnqueuePromiseResolveThenableJob, 6, 1)         \
+  F(EnqueuePromiseReactionJob, 4, 1)                \
+  F(EnqueuePromiseResolveThenableJob, 3, 1)         \
   F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1)  \
   F(ExportExperimentalFromRuntime, 1, 1)            \
   F(ExportFromRuntime, 1, 1)                        \
@@ -298,12 +301,13 @@
   F(InstallToContext, 1, 1)                         \
   F(Interrupt, 0, 1)                                \
   F(IS_VAR, 1, 1)                                   \
-  F(IsWasmObject, 1, 1)                             \
+  F(IsWasmInstance, 1, 1)                           \
   F(NewReferenceError, 2, 1)                        \
   F(NewSyntaxError, 2, 1)                           \
   F(NewTypeError, 2, 1)                             \
   F(OrdinaryHasInstance, 2, 1)                      \
-  F(PromiseRejectEvent, 3, 1)                       \
+  F(PromiseReject, 3, 1)                            \
+  F(PromiseFulfill, 4, 1)                           \
   F(PromiseRejectEventFromStack, 2, 1)              \
   F(PromiseRevokeReject, 1, 1)                      \
   F(PromoteScheduledException, 0, 1)                \
@@ -325,6 +329,7 @@
   F(ThrowNotGeneric, 1, 1)                          \
   F(ThrowReferenceError, 1, 1)                      \
   F(ThrowStackOverflow, 0, 1)                       \
+  F(ThrowTypeError, -1 /* >= 1 */, 1)               \
   F(ThrowWasmError, 2, 1)                           \
   F(ThrowUndefinedOrNullToObject, 1, 1)             \
   F(Typeof, 1, 1)                                   \
@@ -350,7 +355,12 @@
   F(LiveEditCompareStrings, 2, 1)                   \
   F(LiveEditRestartFrame, 2, 1)
 
-#define FOR_EACH_INTRINSIC_MATHS(F) F(GenerateRandomNumbers, 1, 1)
+#define FOR_EACH_INTRINSIC_MATHS(F) F(GenerateRandomNumbers, 0, 1)
+
+#define FOR_EACH_INTRINSIC_MODULE(F) \
+  F(GetModuleNamespace, 1, 1)        \
+  F(LoadModuleVariable, 1, 1)        \
+  F(StoreModuleVariable, 2, 1)
 
 #define FOR_EACH_INTRINSIC_NUMBERS(F)  \
   F(IsValidSmi, 1, 1)                  \
@@ -370,13 +380,11 @@
 #define FOR_EACH_INTRINSIC_OBJECT(F)                 \
   F(GetPrototype, 1, 1)                              \
   F(ObjectHasOwnProperty, 2, 1)                      \
+  F(ObjectCreate, 2, 1)                              \
   F(InternalSetPrototype, 2, 1)                      \
-  F(SetPrototype, 2, 1)                              \
   F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
   F(GetProperty, 2, 1)                               \
   F(KeyedGetProperty, 2, 1)                          \
-  F(StoreGlobalViaContext_Sloppy, 2, 1)              \
-  F(StoreGlobalViaContext_Strict, 2, 1)              \
   F(AddNamedProperty, 4, 1)                          \
   F(SetProperty, 4, 1)                               \
   F(AddElement, 3, 1)                                \
@@ -417,11 +425,9 @@
   F(Compare, 3, 1)                                   \
   F(HasInPrototypeChain, 2, 1)                       \
   F(CreateIterResultObject, 2, 1)                    \
+  F(CreateKeyValueArray, 2, 1)                       \
   F(IsAccessCheckNeeded, 1, 1)                       \
-  F(CreateDataProperty, 3, 1)                        \
-  F(LoadModuleExport, 1, 1)                          \
-  F(LoadModuleImport, 2, 1)                          \
-  F(StoreModuleExport, 2, 1)
+  F(CreateDataProperty, 3, 1)
 
 #define FOR_EACH_INTRINSIC_OPERATORS(F) \
   F(Multiply, 2, 1)                     \
@@ -453,17 +459,17 @@
   F(JSProxyGetHandler, 1, 1)            \
   F(JSProxyRevoke, 1, 1)
 
-#define FOR_EACH_INTRINSIC_REGEXP(F)           \
-  F(StringReplaceGlobalRegExpWithString, 4, 1) \
-  F(StringSplit, 3, 1)                         \
-  F(RegExpExec, 4, 1)                          \
-  F(RegExpFlags, 1, 1)                         \
-  F(RegExpSource, 1, 1)                        \
-  F(RegExpConstructResult, 3, 1)               \
-  F(RegExpInitializeAndCompile, 3, 1)          \
-  F(RegExpExecMultiple, 4, 1)                  \
-  F(RegExpExecReThrow, 4, 1)                   \
-  F(IsRegExp, 1, 1)
+#define FOR_EACH_INTRINSIC_REGEXP(F)                \
+  F(IsRegExp, 1, 1)                                 \
+  F(RegExpCreate, 1, 1)                             \
+  F(RegExpExec, 4, 1)                               \
+  F(RegExpExecMultiple, 4, 1)                       \
+  F(RegExpExecReThrow, 4, 1)                        \
+  F(RegExpInternalReplace, 3, 1)                    \
+  F(RegExpReplace, 3, 1)                            \
+  F(StringReplaceGlobalRegExpWithString, 4, 1)      \
+  F(StringReplaceNonGlobalRegExpWithFunction, 3, 1) \
+  F(StringSplit, 3, 1)
 
 #define FOR_EACH_INTRINSIC_SCOPES(F)    \
   F(ThrowConstAssignError, 0, 1)        \
@@ -807,7 +813,6 @@
   F(SubString, 3, 1)                      \
   F(StringAdd, 2, 1)                      \
   F(InternalizeString, 1, 1)              \
-  F(StringMatch, 3, 1)                    \
   F(StringCharCodeAtRT, 2, 1)             \
   F(StringCompare, 2, 1)                  \
   F(StringBuilderConcat, 3, 1)            \
@@ -891,7 +896,7 @@
   F(HasFixedUint8ClampedElements, 1, 1)       \
   F(SpeciesProtector, 0, 1)                   \
   F(SerializeWasmModule, 1, 1)                \
-  F(DeserializeWasmModule, 1, 1)              \
+  F(DeserializeWasmModule, 2, 1)              \
   F(IsAsmWasmCode, 1, 1)                      \
   F(IsNotAsmWasmCode, 1, 1)                   \
   F(ValidateWasmInstancesChain, 2, 1)         \
@@ -971,6 +976,7 @@
   FOR_EACH_INTRINSIC_LITERALS(F)            \
   FOR_EACH_INTRINSIC_LIVEEDIT(F)            \
   FOR_EACH_INTRINSIC_MATHS(F)               \
+  FOR_EACH_INTRINSIC_MODULE(F)              \
   FOR_EACH_INTRINSIC_NUMBERS(F)             \
   FOR_EACH_INTRINSIC_OBJECT(F)              \
   FOR_EACH_INTRINSIC_OPERATORS(F)           \
@@ -1039,7 +1045,7 @@
   static const Function* FunctionForName(const unsigned char* name, int length);
 
   // Get the intrinsic function with the given FunctionId.
-  static const Function* FunctionForId(FunctionId id);
+  V8_EXPORT_PRIVATE static const Function* FunctionForId(FunctionId id);
 
   // Get the intrinsic function with the given function entry address.
   static const Function* FunctionForEntry(Address ref);
@@ -1114,8 +1120,7 @@
   DISALLOW_COPY_AND_ASSIGN(RuntimeState);
 };
 
-
-std::ostream& operator<<(std::ostream&, Runtime::FunctionId);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, Runtime::FunctionId);
 
 //---------------------------------------------------------------------------
 // Constants used by interface to runtime functions.
diff --git a/src/s390/assembler-s390.h b/src/s390/assembler-s390.h
index ffe0ac4..65f0126 100644
--- a/src/s390/assembler-s390.h
+++ b/src/s390/assembler-s390.h
@@ -1254,7 +1254,8 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+  void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+                         int id);
 
   // Writes a single byte or word of data in the code stream.  Used
   // for inline tables, e.g., jump-tables.
diff --git a/src/s390/code-stubs-s390.cc b/src/s390/code-stubs-s390.cc
index b1bf02d..553d6d8 100644
--- a/src/s390/code-stubs-s390.cc
+++ b/src/s390/code-stubs-s390.cc
@@ -553,7 +553,7 @@
   // If either is a Smi (we know that not both are), then they can only
   // be strictly equal if the other is a HeapNumber.
   STATIC_ASSERT(kSmiTag == 0);
-  DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+  DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
   __ AndP(r4, lhs, rhs);
   __ JumpIfNotSmi(r4, &not_smis);
   // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
@@ -1586,13 +1586,10 @@
   __ SmiToShortArrayOffset(r3, r3);
   __ AddP(r3, Operand(2));
 
-  __ LoadP(r2, MemOperand(sp, kLastMatchInfoOffset));
-  __ JumpIfSmi(r2, &runtime);
-  __ CompareObjectType(r2, r4, r4, JS_OBJECT_TYPE);
-  __ bne(&runtime);
+  // Check that the last match info is a FixedArray.
+  __ LoadP(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
+  __ JumpIfSmi(last_match_info_elements, &runtime);
   // Check that the object has fast elements.
-  __ LoadP(last_match_info_elements,
-           FieldMemOperand(r2, JSArray::kElementsOffset));
   __ LoadP(r2,
            FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
   __ CompareRoot(r2, Heap::kFixedArrayMapRootIndex);
@@ -1601,7 +1598,7 @@
   // additional information.
   __ LoadP(
       r2, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
-  __ AddP(r4, r3, Operand(RegExpImpl::kLastMatchOverhead));
+  __ AddP(r4, r3, Operand(RegExpMatchInfo::kLastMatchOverhead));
   __ SmiUntag(r0, r2);
   __ CmpP(r4, r0);
   __ bgt(&runtime);
@@ -1611,18 +1608,20 @@
   // Store the capture count.
   __ SmiTag(r4, r3);
   __ StoreP(r4, FieldMemOperand(last_match_info_elements,
-                                RegExpImpl::kLastCaptureCountOffset));
+                                RegExpMatchInfo::kNumberOfCapturesOffset));
   // Store last subject and last input.
   __ StoreP(subject, FieldMemOperand(last_match_info_elements,
-                                     RegExpImpl::kLastSubjectOffset));
+                                     RegExpMatchInfo::kLastSubjectOffset));
   __ LoadRR(r4, subject);
-  __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastSubjectOffset,
-                      subject, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ RecordWriteField(last_match_info_elements,
+                      RegExpMatchInfo::kLastSubjectOffset, subject, r9,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs);
   __ LoadRR(subject, r4);
   __ StoreP(subject, FieldMemOperand(last_match_info_elements,
-                                     RegExpImpl::kLastInputOffset));
-  __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastInputOffset,
-                      subject, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
+                                     RegExpMatchInfo::kLastInputOffset));
+  __ RecordWriteField(last_match_info_elements,
+                      RegExpMatchInfo::kLastInputOffset, subject, r9,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   ExternalReference address_of_static_offsets_vector =
@@ -1633,10 +1632,10 @@
   // r4: offsets vector
   Label next_capture;
   // Capture register counter starts from number of capture registers and
-  // counts down until wraping after zero.
-  __ AddP(
-      r2, last_match_info_elements,
-      Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag - kPointerSize));
+  // counts down until wrapping after zero.
+  __ AddP(r2, last_match_info_elements,
+          Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag -
+                  kPointerSize));
   __ AddP(r4, Operand(-kIntSize));  // bias down for lwzu
   __ bind(&next_capture);
   // Read the value from the static offsets vector buffer.
@@ -1649,7 +1648,7 @@
   __ BranchOnCount(r3, &next_capture);
 
   // Return last match info.
-  __ LoadP(r2, MemOperand(sp, kLastMatchInfoOffset));
+  __ LoadRR(r2, last_match_info_elements);
   __ la(sp, MemOperand(sp, (4 * kPointerSize)));
   __ Ret();
 
@@ -1874,6 +1873,7 @@
 }
 
 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+  // r2 - number of arguments
   // r3 - function
   // r5 - slot id
   // r4 - vector
@@ -1882,24 +1882,21 @@
   __ CmpP(r3, r7);
   __ bne(miss);
 
-  __ mov(r2, Operand(arg_count()));
-
   // Increment the call count for monomorphic function calls.
   IncrementCallCount(masm, r4, r5, r1);
 
   __ LoadRR(r4, r6);
   __ LoadRR(r5, r3);
-  ArrayConstructorStub stub(masm->isolate(), arg_count());
+  ArrayConstructorStub stub(masm->isolate());
   __ TailCallStub(&stub);
 }
 
 void CallICStub::Generate(MacroAssembler* masm) {
+  // r2 - number of arguments
   // r3 - function
   // r5 - slot id (Smi)
   // r4 - vector
   Label extra_checks_or_miss, call, call_function, call_count_incremented;
-  int argc = arg_count();
-  ParameterCount actual(argc);
 
   // The checks. First, does r3 match the recorded monomorphic target?
   __ SmiToPtrArrayOffset(r8, r5);
@@ -1933,7 +1930,6 @@
   // Increment the call count for monomorphic function calls.
   IncrementCallCount(masm, r4, r5, r1);
 
-  __ mov(r2, Operand(argc));
   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
                                                     tail_call_mode()),
           RelocInfo::CODE_TARGET);
@@ -1977,7 +1973,6 @@
   IncrementCallCount(masm, r4, r5, r1);
 
   __ bind(&call_count_incremented);
-  __ mov(r2, Operand(argc));
   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
@@ -2010,13 +2005,12 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     CreateWeakCellStub create_stub(masm->isolate());
-    __ Push(r4);
-    __ Push(r5);
-    __ Push(cp, r3);
+    __ SmiTag(r2);
+    __ Push(r2, r4, r5, cp, r3);
     __ CallStub(&create_stub);
-    __ Pop(cp, r3);
-    __ Pop(r5);
-    __ Pop(r4);
+    __ Pop(r4, r5, cp, r3);
+    __ Pop(r2);
+    __ SmiUntag(r2);
   }
 
   __ b(&call_function);
@@ -2032,14 +2026,21 @@
 void CallICStub::GenerateMiss(MacroAssembler* masm) {
   FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push the function and feedback info.
-  __ Push(r3, r4, r5);
+  // Preserve the number of arguments as Smi.
+  __ SmiTag(r2);
+
+  // Push the receiver and the function and feedback info.
+  __ Push(r2, r3, r4, r5);
 
   // Call the entry.
   __ CallRuntime(Runtime::kCallIC_Miss);
 
   // Move result to r3 and exit the internal frame.
   __ LoadRR(r3, r2);
+
+  // Restore number of arguments.
+  __ Pop(r2);
+  __ SmiUntag(r2);
 }
 
 // StringCharCodeAtGenerator
@@ -3138,30 +3139,12 @@
   __ Ret();
 }
 
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate());
-  stub.GenerateForTrampoline(masm);
-}
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate());
-  stub.GenerateForTrampoline(masm);
-}
-
 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(r4);
   CallICStub stub(isolate(), state());
   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
 }
 
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
                              Register receiver_map, Register scratch1,
                              Register scratch2, bool is_polymorphic,
@@ -3252,177 +3235,12 @@
   __ Jump(ip);
 }
 
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // r3
-  Register name = LoadWithVectorDescriptor::NameRegister();          // r4
-  Register vector = LoadWithVectorDescriptor::VectorRegister();      // r5
-  Register slot = LoadWithVectorDescriptor::SlotRegister();          // r2
-  Register feedback = r6;
-  Register receiver_map = r7;
-  Register scratch1 = r8;
-
-  __ SmiToPtrArrayOffset(r1, slot);
-  __ LoadP(feedback, FieldMemOperand(r1, vector, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
-  __ bne(&not_array, Label::kNear);
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
-
-  __ bind(&not_array);
-  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
-  __ bne(&miss);
-  masm->isolate()->load_stub_cache()->GenerateProbe(
-      masm, receiver, name, feedback, receiver_map, scratch1, r9);
-
-  __ bind(&miss);
-  LoadIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ b(&compare_map);
-}
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
-  GenerateImpl(masm, false);
-}
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // r3
-  Register key = LoadWithVectorDescriptor::NameRegister();           // r4
-  Register vector = LoadWithVectorDescriptor::VectorRegister();      // r5
-  Register slot = LoadWithVectorDescriptor::SlotRegister();          // r2
-  Register feedback = r6;
-  Register receiver_map = r7;
-  Register scratch1 = r8;
-
-  __ SmiToPtrArrayOffset(r1, slot);
-  __ LoadP(feedback, FieldMemOperand(r1, vector, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  __ bind(&try_array);
-  // Is it a fixed array?
-  __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
-  __ bne(&not_array);
-
-  // We have a polymorphic element handler.
-  Label polymorphic, try_poly_name;
-  __ bind(&polymorphic);
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
-
-  __ bind(&not_array);
-  // Is it generic?
-  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
-  __ bne(&try_poly_name);
-  Handle<Code> megamorphic_stub =
-      KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
-  __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
-  __ bind(&try_poly_name);
-  // We might have a name in feedback, and a fixed array in the next slot.
-  __ CmpP(key, feedback);
-  __ bne(&miss);
-  // If the name comparison succeeded, we know we have a fixed array with
-  // at least one map/handler pair.
-  __ SmiToPtrArrayOffset(r1, slot);
-  __ LoadP(feedback,
-           FieldMemOperand(r1, vector, FixedArray::kHeaderSize + kPointerSize));
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, false, &miss);
-
-  __ bind(&miss);
-  KeyedLoadIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ b(&compare_map);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
-  StoreICStub stub(isolate(), state());
-  stub.GenerateForTrampoline(masm);
-}
-
 void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
   KeyedStoreICStub stub(isolate(), state());
   stub.GenerateForTrampoline(masm);
 }
 
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // r3
-  Register key = StoreWithVectorDescriptor::NameRegister();           // r4
-  Register vector = StoreWithVectorDescriptor::VectorRegister();      // r5
-  Register slot = StoreWithVectorDescriptor::SlotRegister();          // r6
-  DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r2));          // r2
-  Register feedback = r7;
-  Register receiver_map = r8;
-  Register scratch1 = r9;
-
-  __ SmiToPtrArrayOffset(r0, slot);
-  __ AddP(feedback, vector, r0);
-  __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
-  __ bne(&not_array);
-
-  Register scratch2 = ip;
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
-                   &miss);
-
-  __ bind(&not_array);
-  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
-  __ bne(&miss);
-  masm->isolate()->store_stub_cache()->GenerateProbe(
-      masm, receiver, key, feedback, receiver_map, scratch1, scratch2);
-
-  __ bind(&miss);
-  StoreIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ b(&compare_map);
-}
-
 void KeyedStoreICStub::Generate(MacroAssembler* masm) {
   GenerateImpl(masm, false);
 }
@@ -3795,30 +3613,19 @@
 
 void ArrayConstructorStub::GenerateDispatchToArrayStub(
     MacroAssembler* masm, AllocationSiteOverrideMode mode) {
-  if (argument_count() == ANY) {
-    Label not_zero_case, not_one_case;
-    __ CmpP(r2, Operand::Zero());
-    __ bne(&not_zero_case);
-    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+  Label not_zero_case, not_one_case;
+  __ CmpP(r2, Operand::Zero());
+  __ bne(&not_zero_case);
+  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
 
-    __ bind(&not_zero_case);
-    __ CmpP(r2, Operand(1));
-    __ bgt(&not_one_case);
-    CreateArrayDispatchOneArgument(masm, mode);
+  __ bind(&not_zero_case);
+  __ CmpP(r2, Operand(1));
+  __ bgt(&not_one_case);
+  CreateArrayDispatchOneArgument(masm, mode);
 
-    __ bind(&not_one_case);
-    ArrayNArgumentsConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else if (argument_count() == NONE) {
-    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-  } else if (argument_count() == ONE) {
-    CreateArrayDispatchOneArgument(masm, mode);
-  } else if (argument_count() == MORE_THAN_ONE) {
-    ArrayNArgumentsConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else {
-    UNREACHABLE();
-  }
+  __ bind(&not_one_case);
+  ArrayNArgumentsConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
 }
 
 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
@@ -3869,23 +3676,9 @@
   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
 
   __ bind(&subclassing);
-  switch (argument_count()) {
-    case ANY:
-    case MORE_THAN_ONE:
-      __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
-      __ StoreP(r3, MemOperand(sp, r1));
-      __ AddP(r2, r2, Operand(3));
-      break;
-    case NONE:
-      __ StoreP(r3, MemOperand(sp, 0 * kPointerSize));
-      __ LoadImmP(r2, Operand(3));
-      break;
-    case ONE:
-      __ StoreP(r3, MemOperand(sp, 1 * kPointerSize));
-      __ LoadImmP(r2, Operand(4));
-      break;
-  }
-
+  __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
+  __ StoreP(r3, MemOperand(sp, r1));
+  __ AddP(r2, r2, Operand(3));
   __ Push(r5, r4);
   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
 }
@@ -4315,7 +4108,7 @@
   const int kParameterMapHeaderSize =
       FixedArray::kHeaderSize + 2 * kPointerSize;
   // If there are no mapped parameters, we do not need the parameter_map.
-  __ CmpSmiLiteral(r8, Smi::FromInt(0), r0);
+  __ CmpSmiLiteral(r8, Smi::kZero, r0);
   Label skip2, skip3;
   __ bne(&skip2);
   __ LoadImmP(r1, Operand::Zero());
@@ -4385,7 +4178,7 @@
   // r8 = mapped parameter count (tagged)
   // Initialize parameter map. If there are no mapped arguments, we're done.
   Label skip_parameter_map;
-  __ CmpSmiLiteral(r8, Smi::FromInt(0), r0);
+  __ CmpSmiLiteral(r8, Smi::kZero, r0);
   Label skip6;
   __ bne(&skip6);
   // Move backing store address to r3, because it is
@@ -4609,133 +4402,6 @@
   __ TailCallRuntime(Runtime::kNewStrictArguments);
 }
 
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register value = r2;
-  Register slot = r4;
-
-  Register cell = r3;
-  Register cell_details = r5;
-  Register cell_value = r6;
-  Register cell_value_map = r7;
-  Register scratch = r8;
-
-  Register context = cp;
-  Register context_temp = cell;
-
-  Label fast_heapobject_case, fast_smi_case, slow_case;
-
-  if (FLAG_debug_code) {
-    __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
-    __ Check(ne, kUnexpectedValue);
-  }
-
-  // Go up the context chain to the script context.
-  for (int i = 0; i < depth(); i++) {
-    __ LoadP(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
-    context = context_temp;
-  }
-
-  // Load the PropertyCell at the specified slot.
-  __ ShiftLeftP(r0, slot, Operand(kPointerSizeLog2));
-  __ AddP(cell, context, r0);
-  __ LoadP(cell, ContextMemOperand(cell));
-
-  // Load PropertyDetails for the cell (actually only the cell_type and kind).
-  __ LoadP(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
-  __ SmiUntag(cell_details);
-  __ AndP(cell_details, cell_details,
-          Operand(PropertyDetails::PropertyCellTypeField::kMask |
-                  PropertyDetails::KindField::kMask |
-                  PropertyDetails::kAttributesReadOnlyMask));
-
-  // Check if PropertyCell holds mutable data.
-  Label not_mutable_data;
-  __ CmpP(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
-                                    PropertyCellType::kMutable) |
-                                PropertyDetails::KindField::encode(kData)));
-  __ bne(&not_mutable_data);
-  __ JumpIfSmi(value, &fast_smi_case);
-
-  __ bind(&fast_heapobject_case);
-  __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
-  // RecordWriteField clobbers the value register, so we copy it before the
-  // call.
-  __ LoadRR(r5, value);
-  __ RecordWriteField(cell, PropertyCell::kValueOffset, r5, scratch,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ Ret();
-
-  __ bind(&not_mutable_data);
-  // Check if PropertyCell value matches the new value (relevant for Constant,
-  // ConstantType and Undefined cells).
-  Label not_same_value;
-  __ LoadP(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
-  __ CmpP(cell_value, value);
-  __ bne(&not_same_value);
-
-  // Make sure the PropertyCell is not marked READ_ONLY.
-  __ AndP(r0, cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask));
-  __ bne(&slow_case);
-
-  if (FLAG_debug_code) {
-    Label done;
-    // This can only be true for Constant, ConstantType and Undefined cells,
-    // because we never store the_hole via this stub.
-    __ CmpP(cell_details,
-            Operand(PropertyDetails::PropertyCellTypeField::encode(
-                        PropertyCellType::kConstant) |
-                    PropertyDetails::KindField::encode(kData)));
-    __ beq(&done);
-    __ CmpP(cell_details,
-            Operand(PropertyDetails::PropertyCellTypeField::encode(
-                        PropertyCellType::kConstantType) |
-                    PropertyDetails::KindField::encode(kData)));
-    __ beq(&done);
-    __ CmpP(cell_details,
-            Operand(PropertyDetails::PropertyCellTypeField::encode(
-                        PropertyCellType::kUndefined) |
-                    PropertyDetails::KindField::encode(kData)));
-    __ Check(eq, kUnexpectedValue);
-    __ bind(&done);
-  }
-  __ Ret();
-  __ bind(&not_same_value);
-
-  // Check if PropertyCell contains data with constant type (and is not
-  // READ_ONLY).
-  __ CmpP(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
-                                    PropertyCellType::kConstantType) |
-                                PropertyDetails::KindField::encode(kData)));
-  __ bne(&slow_case);
-
-  // Now either both old and new values must be smis or both must be heap
-  // objects with same map.
-  Label value_is_heap_object;
-  __ JumpIfNotSmi(value, &value_is_heap_object);
-  __ JumpIfNotSmi(cell_value, &slow_case);
-  // Old and new values are smis, no need for a write barrier here.
-  __ bind(&fast_smi_case);
-  __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
-  __ Ret();
-
-  __ bind(&value_is_heap_object);
-  __ JumpIfSmi(cell_value, &slow_case);
-
-  __ LoadP(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
-  __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ CmpP(cell_value_map, scratch);
-  __ beq(&fast_heapobject_case);
-
-  // Fallback to runtime.
-  __ bind(&slow_case);
-  __ SmiTag(slot);
-  __ Push(slot, value);
-  __ TailCallRuntime(is_strict(language_mode())
-                         ? Runtime::kStoreGlobalViaContext_Strict
-                         : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   return ref0.address() - ref1.address();
 }
@@ -5022,7 +4688,7 @@
   __ Push(scratch, scratch);
   __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
   __ Push(scratch, holder);
-  __ Push(Smi::FromInt(0));  // should_throw_on_error -> false
+  __ Push(Smi::kZero);  // should_throw_on_error -> false
   __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
   __ push(scratch);
 
diff --git a/src/s390/interface-descriptors-s390.cc b/src/s390/interface-descriptors-s390.cc
index ca40a0c..7fdf993 100644
--- a/src/s390/interface-descriptors-s390.cc
+++ b/src/s390/interface-descriptors-s390.cc
@@ -31,6 +31,8 @@
 
 const Register LoadWithVectorDescriptor::VectorRegister() { return r5; }
 
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return r6; }
+
 const Register StoreDescriptor::ReceiverRegister() { return r3; }
 const Register StoreDescriptor::NameRegister() { return r4; }
 const Register StoreDescriptor::ValueRegister() { return r2; }
@@ -42,9 +44,6 @@
 const Register StoreTransitionDescriptor::VectorRegister() { return r5; }
 const Register StoreTransitionDescriptor::MapRegister() { return r7; }
 
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r4; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r2; }
-
 const Register StringCompareDescriptor::LeftRegister() { return r3; }
 const Register StringCompareDescriptor::RightRegister() { return r2; }
 
@@ -143,7 +142,7 @@
 
 void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {r3, r5, r4};
+  Register registers[] = {r3, r2, r5, r4};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -187,12 +186,6 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r4, r3, r2};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
 void TransitionElementsKindDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {r2, r3};
diff --git a/src/s390/macro-assembler-s390.cc b/src/s390/macro-assembler-s390.cc
index 769d3dc..fbf82cc 100644
--- a/src/s390/macro-assembler-s390.cc
+++ b/src/s390/macro-assembler-s390.cc
@@ -1502,87 +1502,6 @@
   StoreP(r3, MemOperand(ip));
 }
 
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
-                                            Register scratch, Label* miss) {
-  Label same_contexts;
-
-  DCHECK(!holder_reg.is(scratch));
-  DCHECK(!holder_reg.is(ip));
-  DCHECK(!scratch.is(ip));
-
-  // Load current lexical context from the active StandardFrame, which
-  // may require crawling past STUB frames.
-  Label load_context;
-  Label has_context;
-  DCHECK(!ip.is(scratch));
-  LoadRR(ip, fp);
-  bind(&load_context);
-  LoadP(scratch,
-        MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
-  JumpIfNotSmi(scratch, &has_context);
-  LoadP(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
-  b(&load_context);
-  bind(&has_context);
-
-// In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
-  CmpP(scratch, Operand::Zero());
-  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
-#endif
-
-  // Load the native context of the current context.
-  LoadP(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
-
-  // Check the context is a native context.
-  if (emit_debug_code()) {
-    // Cannot use ip as a temporary in this verification code. Due to the fact
-    // that ip is clobbered as part of cmp with an object Operand.
-    push(holder_reg);  // Temporarily save holder on the stack.
-    // Read the first word and compare to the native_context_map.
-    LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
-    CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
-    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
-    pop(holder_reg);  // Restore holder.
-  }
-
-  // Check if both contexts are the same.
-  LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-  CmpP(scratch, ip);
-  beq(&same_contexts, Label::kNear);
-
-  // Check the context is a native context.
-  if (emit_debug_code()) {
-    // TODO(119): avoid push(holder_reg)/pop(holder_reg)
-    // Cannot use ip as a temporary in this verification code. Due to the fact
-    // that ip is clobbered as part of cmp with an object Operand.
-    push(holder_reg);        // Temporarily save holder on the stack.
-    LoadRR(holder_reg, ip);  // Move ip to its holding place.
-    CompareRoot(holder_reg, Heap::kNullValueRootIndex);
-    Check(ne, kJSGlobalProxyContextShouldNotBeNull);
-
-    LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
-    CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
-    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
-    // Restore ip is not needed. ip is reloaded below.
-    pop(holder_reg);  // Restore holder.
-    // Restore ip to holder's context.
-    LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-  }
-
-  // Check that the security token in the calling global object is
-  // compatible with the security token in the receiving global
-  // object.
-  int token_offset =
-      Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
-  LoadP(scratch, FieldMemOperand(scratch, token_offset));
-  LoadP(ip, FieldMemOperand(ip, token_offset));
-  CmpP(scratch, ip);
-  bne(miss);
-
-  bind(&same_contexts);
-}
-
 // Compute the hash code from the untagged key.  This must be kept in sync with
 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
 // code-stub-hydrogen.cc
@@ -1624,85 +1543,6 @@
   ExtractBitRange(t0, t0, 29, 0);
 }
 
-void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
-                                              Register key, Register result,
-                                              Register t0, Register t1,
-                                              Register t2) {
-  // Register use:
-  //
-  // elements - holds the slow-case elements of the receiver on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // key      - holds the smi key on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // result   - holds the result on exit if the load succeeded.
-  //            Allowed to be the same as 'key' or 'result'.
-  //            Unchanged on bailout so 'key' or 'result' can be used
-  //            in further computation.
-  //
-  // Scratch registers:
-  //
-  // t0 - holds the untagged key on entry and holds the hash once computed.
-  //
-  // t1 - used to hold the capacity mask of the dictionary
-  //
-  // t2 - used for the index into the dictionary.
-  Label done;
-
-  GetNumberHash(t0, t1);
-
-  // Compute the capacity mask.
-  LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
-  SmiUntag(t1);
-  SubP(t1, Operand(1));
-
-  // Generate an unrolled loop that performs a few probes before giving up.
-  for (int i = 0; i < kNumberDictionaryProbes; i++) {
-    // Use t2 for index calculations and keep the hash intact in t0.
-    LoadRR(t2, t0);
-    // Compute the masked index: (hash + i + i * i) & mask.
-    if (i > 0) {
-      AddP(t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
-    }
-    AndP(t2, t1);
-
-    // Scale the index by multiplying by the element size.
-    DCHECK(SeededNumberDictionary::kEntrySize == 3);
-    LoadRR(ip, t2);
-    sll(ip, Operand(1));
-    AddP(t2, ip);  // t2 = t2 * 3
-
-    // Check if the key is identical to the name.
-    sll(t2, Operand(kPointerSizeLog2));
-    AddP(t2, elements);
-    LoadP(ip,
-          FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
-    CmpP(key, ip);
-    if (i != kNumberDictionaryProbes - 1) {
-      beq(&done, Label::kNear);
-    } else {
-      bne(miss);
-    }
-  }
-
-  bind(&done);
-  // Check that the value is a field property.
-  // t2: elements + (index * kPointerSize)
-  const int kDetailsOffset =
-      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
-  LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
-  DCHECK_EQ(DATA, 0);
-  AndP(r0, ip, t1);
-  bne(miss);
-
-  // Get the value at the masked, scaled index and return.
-  const int kValueOffset =
-      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
-  LoadP(result, FieldMemOperand(t2, kValueOffset));
-}
-
 void MacroAssembler::Allocate(int object_size, Register result,
                               Register scratch1, Register scratch2,
                               Label* gc_required, AllocationFlags flags) {
@@ -2116,18 +1956,6 @@
   CmpP(obj, MemOperand(kRootRegister, index << kPointerSizeLog2));
 }
 
-void MacroAssembler::CheckFastElements(Register map, Register scratch,
-                                       Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  STATIC_ASSERT(FAST_ELEMENTS == 2);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
-  STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
-  CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
-                 Operand(Map::kMaximumBitField2FastHoleyElementValue));
-  bgt(fail);
-}
-
 void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
                                              Label* fail) {
   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
@@ -2302,16 +2130,6 @@
   return has_frame_ || !stub->SometimesSetsUpAFrame();
 }
 
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
-  // If the hash field contains an array index pick it out. The assert checks
-  // that the constants for the maximum number of digits for an array index
-  // cached in the hash field and the number of bits reserved for it does not
-  // conflict.
-  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
-         (1 << String::kArrayIndexValueBits));
-  DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
-}
-
 void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
                                        Register scratch1, Register scratch2,
                                        DoubleRegister double_scratch) {
@@ -3022,51 +2840,6 @@
   STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
 }
 
-void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
-                               Register scratch) {
-  Label big_loop, left_bytes, done, fake_call;
-
-  DCHECK(!scratch.is(r0));
-
-  // big loop moves 256 bytes at a time
-  bind(&big_loop);
-  CmpP(length, Operand(static_cast<intptr_t>(0x100)));
-  blt(&left_bytes);
-
-  mvc(MemOperand(dst), MemOperand(src), 0x100);
-
-  AddP(src, Operand(static_cast<intptr_t>(0x100)));
-  AddP(dst, Operand(static_cast<intptr_t>(0x100)));
-  SubP(length, Operand(static_cast<intptr_t>(0x100)));
-  b(&big_loop);
-
-  bind(&left_bytes);
-  CmpP(length, Operand::Zero());
-  beq(&done);
-
-  // TODO(john.yan): More optimal version is to use MVC
-  // Sequence below has some undiagnosed issue.
-  /*
-  b(scratch, &fake_call);  // use brasl to Save mvc addr to scratch
-  mvc(MemOperand(dst), MemOperand(src), 1);
-  bind(&fake_call);
-  SubP(length, Operand(static_cast<intptr_t>(-1)));
-  ex(length, MemOperand(scratch));  // execute mvc instr above
-  AddP(src, length);
-  AddP(dst, length);
-  AddP(src, Operand(static_cast<intptr_t>(0x1)));
-  AddP(dst, Operand(static_cast<intptr_t>(0x1)));
-  */
-
-  mvc(MemOperand(dst), MemOperand(src), 1);
-  AddP(src, Operand(static_cast<intptr_t>(0x1)));
-  AddP(dst, Operand(static_cast<intptr_t>(0x1)));
-  SubP(length, Operand(static_cast<intptr_t>(0x1)));
-
-  b(&left_bytes);
-  bind(&done);
-}
-
 void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
                                                  Register count,
                                                  Register filler) {
@@ -3171,7 +2944,7 @@
   CmpP(index, ip);
   Check(lt, kIndexIsTooLarge);
 
-  DCHECK(Smi::FromInt(0) == 0);
+  DCHECK(Smi::kZero == 0);
   CmpP(index, Operand::Zero());
   Check(ge, kIndexIsNegative);
 
@@ -3496,7 +3269,7 @@
 
   // For all objects but the receiver, check that the cache is empty.
   EnumLength(r5, r3);
-  CmpSmiLiteral(r5, Smi::FromInt(0), r0);
+  CmpSmiLiteral(r5, Smi::kZero, r0);
   bne(call_runtime);
 
   bind(&start);
@@ -3581,7 +3354,8 @@
   ExternalReference new_space_allocation_top_adr =
       ExternalReference::new_space_allocation_top_address(isolate());
   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
-  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+  const int kMementoLastWordOffset =
+      kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
 
   DCHECK(!AreAliased(receiver_reg, scratch_reg));
 
@@ -3592,7 +3366,7 @@
 
   // If the object is in new space, we need to check whether it is on the same
   // page as the current top.
-  AddP(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  AddP(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
   mov(ip, Operand(new_space_allocation_top_adr));
   LoadP(ip, MemOperand(ip));
   XorP(r0, scratch_reg, ip);
@@ -3610,7 +3384,7 @@
   // we are below top.
   bind(&top_check);
   CmpP(scratch_reg, ip);
-  bgt(no_memento_found);
+  bge(no_memento_found);
   // Memento map check.
   bind(&map_check);
   LoadP(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
diff --git a/src/s390/macro-assembler-s390.h b/src/s390/macro-assembler-s390.h
index 7f2d042..06fcaf0 100644
--- a/src/s390/macro-assembler-s390.h
+++ b/src/s390/macro-assembler-s390.h
@@ -932,18 +932,8 @@
   // ---------------------------------------------------------------------------
   // Inline caching support
 
-  // Generate code for checking access rights - used for security checks
-  // on access to global objects across environments. The holder register
-  // is left untouched, whereas both scratch registers are clobbered.
-  void CheckAccessGlobalProxy(Register holder_reg, Register scratch,
-                              Label* miss);
-
   void GetNumberHash(Register t0, Register scratch);
 
-  void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
-                                Register result, Register t0, Register t1,
-                                Register t2);
-
   inline void MarkCode(NopMarkerTypes type) { nop(type); }
 
   // Check if the given instruction is a 'type' marker.
@@ -1036,11 +1026,6 @@
                        Register scratch1, Register scratch2,
                        Label* gc_required);
 
-  // Copies a number of bytes from src to dst. All registers are clobbered. On
-  // exit src and dst will point to the place just after where the last byte was
-  // read or written and length will be zero.
-  void CopyBytes(Register src, Register dst, Register length, Register scratch);
-
   // Initialize fields with filler values.  |count| fields starting at
   // |current_address| are overwritten with the value in |filler|.  At the end
   // the loop, |current_address| points at the next uninitialized field.
@@ -1086,10 +1071,6 @@
   // sets the flags and leaves the object type in the type_reg register.
   void CompareInstanceType(Register map, Register type_reg, InstanceType type);
 
-  // Check if a map for a JSObject indicates that the object has fast elements.
-  // Jump to the specified label if it does not.
-  void CheckFastElements(Register map, Register scratch, Label* fail);
-
   // Check if a map for a JSObject indicates that the object can have both smi
   // and HeapObject elements.  Jump to the specified label if it does not.
   void CheckFastObjectElements(Register map, Register scratch, Label* fail);
@@ -1177,12 +1158,6 @@
     return eq;
   }
 
-  // Picks out an array index from the hash field.
-  // Register use:
-  //   hash - holds the index's hash. Clobbered.
-  //   index - holds the overwritten index on exit.
-  void IndexFromHash(Register hash, Register index);
-
   // Get the number of least significant bits from a register
   void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
   void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
diff --git a/src/s390/simulator-s390.cc b/src/s390/simulator-s390.cc
index 78bc939..74d37bc 100644
--- a/src/s390/simulator-s390.cc
+++ b/src/s390/simulator-s390.cc
@@ -5682,7 +5682,7 @@
 
   // Set up the non-volatile registers with a known value. To be able to check
   // that they are preserved properly across JS execution.
-  intptr_t callee_saved_value = icount_;
+  uintptr_t callee_saved_value = icount_;
   if (reg_arg_count < 5) {
     set_register(r6, callee_saved_value + 6);
   }
@@ -5700,15 +5700,15 @@
 // Check that the non-volatile registers have been preserved.
 #ifndef V8_TARGET_ARCH_S390X
   if (reg_arg_count < 5) {
-    DCHECK_EQ(callee_saved_value + 6, get_low_register<int32_t>(r6));
+    DCHECK_EQ(callee_saved_value + 6, get_low_register<uint32_t>(r6));
   }
-  DCHECK_EQ(callee_saved_value + 7, get_low_register<int32_t>(r7));
-  DCHECK_EQ(callee_saved_value + 8, get_low_register<int32_t>(r8));
-  DCHECK_EQ(callee_saved_value + 9, get_low_register<int32_t>(r9));
-  DCHECK_EQ(callee_saved_value + 10, get_low_register<int32_t>(r10));
-  DCHECK_EQ(callee_saved_value + 11, get_low_register<int32_t>(r11));
-  DCHECK_EQ(callee_saved_value + 12, get_low_register<int32_t>(r12));
-  DCHECK_EQ(callee_saved_value + 13, get_low_register<int32_t>(r13));
+  DCHECK_EQ(callee_saved_value + 7, get_low_register<uint32_t>(r7));
+  DCHECK_EQ(callee_saved_value + 8, get_low_register<uint32_t>(r8));
+  DCHECK_EQ(callee_saved_value + 9, get_low_register<uint32_t>(r9));
+  DCHECK_EQ(callee_saved_value + 10, get_low_register<uint32_t>(r10));
+  DCHECK_EQ(callee_saved_value + 11, get_low_register<uint32_t>(r11));
+  DCHECK_EQ(callee_saved_value + 12, get_low_register<uint32_t>(r12));
+  DCHECK_EQ(callee_saved_value + 13, get_low_register<uint32_t>(r13));
 #else
   if (reg_arg_count < 5) {
     DCHECK_EQ(callee_saved_value + 6, get_register(r6));
@@ -5762,7 +5762,7 @@
   // Remaining arguments passed on stack.
   int64_t original_stack = get_register(sp);
   // Compute position of stack on entry to generated code.
-  intptr_t entry_stack =
+  uintptr_t entry_stack =
       (original_stack -
        (kCalleeRegisterSaveAreaSize + stack_arg_count * sizeof(intptr_t)));
   if (base::OS::ActivationFrameAlignment() != 0) {
@@ -5798,7 +5798,7 @@
 
   // Set up the non-volatile registers with a known value. To be able to check
   // that they are preserved properly across JS execution.
-  intptr_t callee_saved_value = icount_;
+  uintptr_t callee_saved_value = icount_;
   if (reg_arg_count < 5) {
     set_register(r6, callee_saved_value + 6);
   }
@@ -5816,15 +5816,15 @@
 // Check that the non-volatile registers have been preserved.
 #ifndef V8_TARGET_ARCH_S390X
   if (reg_arg_count < 5) {
-    DCHECK_EQ(callee_saved_value + 6, get_low_register<int32_t>(r6));
+    DCHECK_EQ(callee_saved_value + 6, get_low_register<uint32_t>(r6));
   }
-  DCHECK_EQ(callee_saved_value + 7, get_low_register<int32_t>(r7));
-  DCHECK_EQ(callee_saved_value + 8, get_low_register<int32_t>(r8));
-  DCHECK_EQ(callee_saved_value + 9, get_low_register<int32_t>(r9));
-  DCHECK_EQ(callee_saved_value + 10, get_low_register<int32_t>(r10));
-  DCHECK_EQ(callee_saved_value + 11, get_low_register<int32_t>(r11));
-  DCHECK_EQ(callee_saved_value + 12, get_low_register<int32_t>(r12));
-  DCHECK_EQ(callee_saved_value + 13, get_low_register<int32_t>(r13));
+  DCHECK_EQ(callee_saved_value + 7, get_low_register<uint32_t>(r7));
+  DCHECK_EQ(callee_saved_value + 8, get_low_register<uint32_t>(r8));
+  DCHECK_EQ(callee_saved_value + 9, get_low_register<uint32_t>(r9));
+  DCHECK_EQ(callee_saved_value + 10, get_low_register<uint32_t>(r10));
+  DCHECK_EQ(callee_saved_value + 11, get_low_register<uint32_t>(r11));
+  DCHECK_EQ(callee_saved_value + 12, get_low_register<uint32_t>(r12));
+  DCHECK_EQ(callee_saved_value + 13, get_low_register<uint32_t>(r13));
 #else
   if (reg_arg_count < 5) {
     DCHECK_EQ(callee_saved_value + 6, get_register(r6));
@@ -5850,7 +5850,7 @@
 // Pop stack passed arguments.
 
 #ifndef V8_TARGET_ARCH_S390X
-  DCHECK_EQ(entry_stack, get_low_register<int32_t>(sp));
+  DCHECK_EQ(entry_stack, get_low_register<uint32_t>(sp));
 #else
   DCHECK_EQ(entry_stack, get_register(sp));
 #endif
@@ -6504,7 +6504,6 @@
   DCHECK_OPCODE(LCR);
   DECODE_RR_INSTRUCTION(r1, r2);
   int32_t r2_val = get_low_register<int32_t>(r2);
-  int32_t original_r2_val = r2_val;
   r2_val = ~r2_val;
   r2_val = r2_val + 1;
   set_low_register(r1, r2_val);
@@ -6513,7 +6512,7 @@
   // Cannot do int comparison due to GCC 4.8 bug on x86.
   // Detect INT_MIN alternatively, as it is the only value where both
   // original and result are negative due to overflow.
-  if (r2_val < 0 && original_r2_val < 0) {
+  if (r2_val == (static_cast<int32_t>(1) << 31)) {
     SetS390OverflowCode(true);
   }
   return length;
@@ -9837,7 +9836,7 @@
   set_register(r1, r2_val);
   SetS390ConditionCode<int64_t>(r2_val, 0);
   // if the input is INT_MIN, loading its compliment would be overflowing
-  if (r2_val < 0 && (r2_val + 1) > 0) {
+  if (r2_val == (static_cast<int64_t>(1) << 63)) {
     SetS390OverflowCode(true);
   }
   return length;
diff --git a/src/signature.h b/src/signature.h
index 97238b6..32050fe 100644
--- a/src/signature.h
+++ b/src/signature.h
@@ -32,7 +32,16 @@
     return reps_[index];
   }
 
-  const T* raw_data() const { return reps_; }
+  bool Equals(Signature* that) {
+    if (this == that) return true;
+    if (this->parameter_count() != that->parameter_count()) return false;
+    if (this->return_count() != that->return_count()) return false;
+    size_t size = this->return_count() + this->parameter_count();
+    for (size_t i = 0; i < size; i++) {
+      if (this->reps_[i] != that->reps_[i]) return false;
+    }
+    return true;
+  }
 
   // For incrementally building signatures.
   class Builder {
diff --git a/src/snapshot/code-serializer.cc b/src/snapshot/code-serializer.cc
index 16044a5..86a9164 100644
--- a/src/snapshot/code-serializer.cc
+++ b/src/snapshot/code-serializer.cc
@@ -12,6 +12,8 @@
 #include "src/snapshot/deserializer.h"
 #include "src/snapshot/snapshot.h"
 #include "src/version.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
 
 namespace v8 {
 namespace internal {
@@ -99,8 +101,8 @@
   }
 
   if (ElideObject(obj)) {
-    return SerializeObject(*isolate()->factory()->undefined_value(),
-                           how_to_code, where_to_point, skip);
+    return SerializeObject(isolate()->heap()->undefined_value(), how_to_code,
+                           where_to_point, skip);
   }
   // Past this point we should not see any (context-specific) maps anymore.
   CHECK(!obj->IsMap());
@@ -217,15 +219,19 @@
 }
 
 std::unique_ptr<ScriptData> WasmCompiledModuleSerializer::SerializeWasmModule(
-    Isolate* isolate, Handle<FixedArray> compiled_module) {
+    Isolate* isolate, Handle<FixedArray> input) {
+  Handle<WasmCompiledModule> compiled_module =
+      Handle<WasmCompiledModule>::cast(input);
   WasmCompiledModuleSerializer wasm_cs(isolate, 0);
   wasm_cs.reference_map()->AddAttachedReference(*isolate->native_context());
+  wasm_cs.reference_map()->AddAttachedReference(
+      *compiled_module->module_bytes());
   ScriptData* data = wasm_cs.Serialize(compiled_module);
   return std::unique_ptr<ScriptData>(data);
 }
 
 MaybeHandle<FixedArray> WasmCompiledModuleSerializer::DeserializeWasmModule(
-    Isolate* isolate, ScriptData* data) {
+    Isolate* isolate, ScriptData* data, Vector<const byte> wire_bytes) {
   SerializedCodeData::SanityCheckResult sanity_check_result =
       SerializedCodeData::CHECK_SUCCESS;
   MaybeHandle<FixedArray> nothing;
@@ -239,6 +245,15 @@
   Deserializer deserializer(&scd, true);
   deserializer.AddAttachedObject(isolate->native_context());
 
+  MaybeHandle<String> maybe_wire_bytes_as_string =
+      isolate->factory()->NewStringFromOneByte(wire_bytes, TENURED);
+  Handle<String> wire_bytes_as_string;
+  if (!maybe_wire_bytes_as_string.ToHandle(&wire_bytes_as_string)) {
+    return nothing;
+  }
+  deserializer.AddAttachedObject(
+      handle(SeqOneByteString::cast(*wire_bytes_as_string)));
+
   Vector<const uint32_t> stub_keys = scd.CodeStubKeys();
   for (int i = 0; i < stub_keys.length(); ++i) {
     deserializer.AddAttachedObject(
@@ -247,7 +262,11 @@
 
   MaybeHandle<HeapObject> obj = deserializer.DeserializeObject(isolate);
   if (obj.is_null() || !obj.ToHandleChecked()->IsFixedArray()) return nothing;
-  return Handle<FixedArray>::cast(obj.ToHandleChecked());
+  Handle<WasmCompiledModule> compiled_module =
+      Handle<WasmCompiledModule>::cast(obj.ToHandleChecked());
+
+  WasmCompiledModule::RecreateModuleWrapper(isolate, compiled_module);
+  return compiled_module;
 }
 
 class Checksum {
@@ -340,6 +359,7 @@
 
 SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
     Isolate* isolate, uint32_t expected_source_hash) const {
+  if (this->size_ < kHeaderSize) return INVALID_HEADER;
   uint32_t magic_number = GetMagicNumber();
   if (magic_number != ComputeMagicNumber(isolate)) return MAGIC_NUMBER_MISMATCH;
   uint32_t version_hash = GetHeaderValue(kVersionHashOffset);
diff --git a/src/snapshot/code-serializer.h b/src/snapshot/code-serializer.h
index b3c54d1..1575737 100644
--- a/src/snapshot/code-serializer.h
+++ b/src/snapshot/code-serializer.h
@@ -59,8 +59,8 @@
  public:
   static std::unique_ptr<ScriptData> SerializeWasmModule(
       Isolate* isolate, Handle<FixedArray> compiled_module);
-  static MaybeHandle<FixedArray> DeserializeWasmModule(Isolate* isolate,
-                                                       ScriptData* data);
+  static MaybeHandle<FixedArray> DeserializeWasmModule(
+      Isolate* isolate, ScriptData* data, Vector<const byte> wire_bytes);
 
  protected:
   void SerializeCodeObject(Code* code_object, HowToCode how_to_code,
@@ -74,7 +74,9 @@
     }
   }
 
-  bool ElideObject(Object* obj) override { return obj->IsWeakCell(); };
+  bool ElideObject(Object* obj) override {
+    return obj->IsWeakCell() || obj->IsForeign();
+  };
 
  private:
   WasmCompiledModuleSerializer(Isolate* isolate, uint32_t source_hash)
@@ -92,9 +94,35 @@
     SOURCE_MISMATCH = 3,
     CPU_FEATURES_MISMATCH = 4,
     FLAGS_MISMATCH = 5,
-    CHECKSUM_MISMATCH = 6
+    CHECKSUM_MISMATCH = 6,
+    INVALID_HEADER = 7
   };
 
+  // The data header consists of uint32_t-sized entries:
+  // [0] magic number and external reference count
+  // [1] version hash
+  // [2] source hash
+  // [3] cpu features
+  // [4] flag hash
+  // [5] number of code stub keys
+  // [6] number of reservation size entries
+  // [7] payload length
+  // [8] payload checksum part 1
+  // [9] payload checksum part 2
+  // ...  reservations
+  // ...  code stub keys
+  // ...  serialized payload
+  static const int kVersionHashOffset = kMagicNumberOffset + kInt32Size;
+  static const int kSourceHashOffset = kVersionHashOffset + kInt32Size;
+  static const int kCpuFeaturesOffset = kSourceHashOffset + kInt32Size;
+  static const int kFlagHashOffset = kCpuFeaturesOffset + kInt32Size;
+  static const int kNumReservationsOffset = kFlagHashOffset + kInt32Size;
+  static const int kNumCodeStubKeysOffset = kNumReservationsOffset + kInt32Size;
+  static const int kPayloadLengthOffset = kNumCodeStubKeysOffset + kInt32Size;
+  static const int kChecksum1Offset = kPayloadLengthOffset + kInt32Size;
+  static const int kChecksum2Offset = kChecksum1Offset + kInt32Size;
+  static const int kHeaderSize = kChecksum2Offset + kInt32Size;
+
   // Used when consuming.
   static const SerializedCodeData FromCachedData(
       Isolate* isolate, ScriptData* cached_data, uint32_t expected_source_hash,
@@ -124,30 +152,6 @@
 
   SanityCheckResult SanityCheck(Isolate* isolate,
                                 uint32_t expected_source_hash) const;
-  // The data header consists of uint32_t-sized entries:
-  // [0] magic number and external reference count
-  // [1] version hash
-  // [2] source hash
-  // [3] cpu features
-  // [4] flag hash
-  // [5] number of code stub keys
-  // [6] number of reservation size entries
-  // [7] payload length
-  // [8] payload checksum part 1
-  // [9] payload checksum part 2
-  // ...  reservations
-  // ...  code stub keys
-  // ...  serialized payload
-  static const int kVersionHashOffset = kMagicNumberOffset + kInt32Size;
-  static const int kSourceHashOffset = kVersionHashOffset + kInt32Size;
-  static const int kCpuFeaturesOffset = kSourceHashOffset + kInt32Size;
-  static const int kFlagHashOffset = kCpuFeaturesOffset + kInt32Size;
-  static const int kNumReservationsOffset = kFlagHashOffset + kInt32Size;
-  static const int kNumCodeStubKeysOffset = kNumReservationsOffset + kInt32Size;
-  static const int kPayloadLengthOffset = kNumCodeStubKeysOffset + kInt32Size;
-  static const int kChecksum1Offset = kPayloadLengthOffset + kInt32Size;
-  static const int kChecksum2Offset = kChecksum1Offset + kInt32Size;
-  static const int kHeaderSize = kChecksum2Offset + kInt32Size;
 };
 
 }  // namespace internal
diff --git a/src/snapshot/deserializer.cc b/src/snapshot/deserializer.cc
index b90a2c5..aabd806 100644
--- a/src/snapshot/deserializer.cc
+++ b/src/snapshot/deserializer.cc
@@ -99,7 +99,7 @@
       isolate_->heap()->undefined_value());
   // The allocation site list is build during root iteration, but if no sites
   // were encountered then it needs to be initialized to undefined.
-  if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
+  if (isolate_->heap()->allocation_sites_list() == Smi::kZero) {
     isolate_->heap()->set_allocation_sites_list(
         isolate_->heap()->undefined_value());
   }
@@ -128,6 +128,7 @@
   Object* root;
   VisitPointer(&root);
   DeserializeDeferredObjects();
+  DeserializeInternalFields();
 
   isolate->heap()->RegisterReservationsForBlackAllocation(reservations_);
 
@@ -212,6 +213,31 @@
   }
 }
 
+void Deserializer::DeserializeInternalFields() {
+  if (!source_.HasMore() || source_.Get() != kInternalFieldsData) return;
+  DisallowHeapAllocation no_gc;
+  DisallowJavascriptExecution no_js(isolate_);
+  DisallowCompilation no_compile(isolate_);
+  v8::DeserializeInternalFieldsCallback callback =
+      isolate_->deserialize_internal_fields_callback();
+  DCHECK_NOT_NULL(callback);
+  for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
+    HandleScope scope(isolate_);
+    int space = code & kSpaceMask;
+    DCHECK(space <= kNumberOfSpaces);
+    DCHECK(code - space == kNewObject);
+    Handle<JSObject> obj(JSObject::cast(GetBackReferencedObject(space)),
+                         isolate_);
+    int index = source_.GetInt();
+    int size = source_.GetInt();
+    byte* data = new byte[size];
+    source_.CopyRaw(data, size);
+    callback(v8::Utils::ToLocal(obj), index,
+             {reinterpret_cast<char*>(data), size});
+    delete[] data;
+  }
+}
+
 // Used to insert a deserialized internalized string into the string table.
 class StringTableInsertionKey : public HashTableKey {
  public:
@@ -277,7 +303,7 @@
     // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
     // as a (weak) root. If this root is relocated correctly, this becomes
     // unnecessary.
-    if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
+    if (isolate_->heap()->allocation_sites_list() == Smi::kZero) {
       site->set_weak_next(isolate_->heap()->undefined_value());
     } else {
       site->set_weak_next(isolate_->heap()->allocation_sites_list());
@@ -502,7 +528,7 @@
         int skip = source_.GetInt();                                           \
         current = reinterpret_cast<Object**>(                                  \
             reinterpret_cast<Address>(current) + skip);                        \
-        int reference_id = source_.GetInt();                                   \
+        uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());       \
         Address address = external_reference_table_->address(reference_id);    \
         new_object = reinterpret_cast<Object*>(address);                       \
       } else if (where == kAttachedReference) {                                \
diff --git a/src/snapshot/deserializer.h b/src/snapshot/deserializer.h
index 634d80e..db79962 100644
--- a/src/snapshot/deserializer.h
+++ b/src/snapshot/deserializer.h
@@ -88,6 +88,7 @@
   }
 
   void DeserializeDeferredObjects();
+  void DeserializeInternalFields();
 
   void FlushICacheForNewIsolate();
   void FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
diff --git a/src/snapshot/partial-serializer.cc b/src/snapshot/partial-serializer.cc
index b46f675..e89f44f 100644
--- a/src/snapshot/partial-serializer.cc
+++ b/src/snapshot/partial-serializer.cc
@@ -10,9 +10,12 @@
 namespace v8 {
 namespace internal {
 
-PartialSerializer::PartialSerializer(Isolate* isolate,
-                                     StartupSerializer* startup_serializer)
-    : Serializer(isolate), startup_serializer_(startup_serializer) {
+PartialSerializer::PartialSerializer(
+    Isolate* isolate, StartupSerializer* startup_serializer,
+    v8::SerializeInternalFieldsCallback callback)
+    : Serializer(isolate),
+      startup_serializer_(startup_serializer),
+      serialize_internal_fields_(callback) {
   InitializeCodeAddressMap();
 }
 
@@ -33,10 +36,14 @@
       context->set(Context::NEXT_CONTEXT_LINK,
                    isolate_->heap()->undefined_value());
       DCHECK(!context->global_object()->IsUndefined(context->GetIsolate()));
+      // Reset math random cache to get fresh random numbers.
+      context->set_math_random_index(Smi::kZero);
+      context->set_math_random_cache(isolate_->heap()->undefined_value());
     }
   }
   VisitPointer(o);
   SerializeDeferredObjects();
+  SerializeInternalFields();
   Pad();
 }
 
@@ -93,6 +100,11 @@
     function->ClearTypeFeedbackInfo();
   }
 
+  if (obj->IsJSObject()) {
+    JSObject* jsobj = JSObject::cast(obj);
+    if (jsobj->GetInternalFieldCount() > 0) internal_field_holders_.Add(jsobj);
+  }
+
   // Object has not yet been serialized.  Serialize it here.
   ObjectSerializer serializer(this, obj, &sink_, how_to_code, where_to_point);
   serializer.Serialize();
@@ -106,9 +118,39 @@
   DCHECK(!o->IsScript());
   return o->IsName() || o->IsSharedFunctionInfo() || o->IsHeapNumber() ||
          o->IsCode() || o->IsScopeInfo() || o->IsAccessorInfo() ||
+         o->IsTemplateInfo() ||
          o->map() ==
              startup_serializer_->isolate()->heap()->fixed_cow_array_map();
 }
 
+void PartialSerializer::SerializeInternalFields() {
+  int count = internal_field_holders_.length();
+  if (count == 0) return;
+  DisallowHeapAllocation no_gc;
+  DisallowJavascriptExecution no_js(isolate());
+  DisallowCompilation no_compile(isolate());
+  DCHECK_NOT_NULL(serialize_internal_fields_);
+  sink_.Put(kInternalFieldsData, "internal fields data");
+  while (internal_field_holders_.length() > 0) {
+    HandleScope scope(isolate());
+    Handle<JSObject> obj(internal_field_holders_.RemoveLast(), isolate());
+    SerializerReference reference = reference_map_.Lookup(*obj);
+    DCHECK(reference.is_back_reference());
+    int internal_fields_count = obj->GetInternalFieldCount();
+    for (int i = 0; i < internal_fields_count; i++) {
+      if (obj->GetInternalField(i)->IsHeapObject()) continue;
+      StartupData data = serialize_internal_fields_(v8::Utils::ToLocal(obj), i);
+      sink_.Put(kNewObject + reference.space(), "internal field holder");
+      PutBackReference(*obj, reference);
+      sink_.PutInt(i, "internal field index");
+      sink_.PutInt(data.raw_size, "internal fields data size");
+      sink_.PutRaw(reinterpret_cast<const byte*>(data.data), data.raw_size,
+                   "internal fields data");
+      delete[] data.data;
+    }
+  }
+  sink_.Put(kSynchronize, "Finished with internal fields data");
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/snapshot/partial-serializer.h b/src/snapshot/partial-serializer.h
index 282f76e..45d64e4 100644
--- a/src/snapshot/partial-serializer.h
+++ b/src/snapshot/partial-serializer.h
@@ -15,7 +15,8 @@
 
 class PartialSerializer : public Serializer {
  public:
-  PartialSerializer(Isolate* isolate, StartupSerializer* startup_serializer);
+  PartialSerializer(Isolate* isolate, StartupSerializer* startup_serializer,
+                    v8::SerializeInternalFieldsCallback callback);
 
   ~PartialSerializer() override;
 
@@ -28,7 +29,11 @@
 
   bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
 
+  void SerializeInternalFields();
+
   StartupSerializer* startup_serializer_;
+  List<JSObject*> internal_field_holders_;
+  v8::SerializeInternalFieldsCallback serialize_internal_fields_;
   DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
 };
 
diff --git a/src/snapshot/serializer-common.cc b/src/snapshot/serializer-common.cc
index adfd6e4..f188793 100644
--- a/src/snapshot/serializer-common.cc
+++ b/src/snapshot/serializer-common.cc
@@ -13,37 +13,42 @@
 
 ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
   map_ = isolate->external_reference_map();
-  if (map_ != NULL) return;
-  map_ = new base::HashMap();
+#ifdef DEBUG
+  table_ = ExternalReferenceTable::instance(isolate);
+#endif  // DEBUG
+  if (map_ != nullptr) return;
+  map_ = new AddressToIndexHashMap();
   ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate);
-  for (int i = 0; i < table->size(); ++i) {
+  for (uint32_t i = 0; i < table->size(); ++i) {
     Address addr = table->address(i);
-    if (addr == ExternalReferenceTable::NotAvailable()) continue;
-    // We expect no duplicate external references entries in the table.
-    // AccessorRefTable getter may have duplicates, indicated by an empty string
-    // as name.
-    DCHECK(table->name(i)[0] == '\0' ||
-           map_->Lookup(addr, Hash(addr)) == nullptr);
-    map_->LookupOrInsert(addr, Hash(addr))->value = reinterpret_cast<void*>(i);
+    DCHECK(map_->Get(addr).IsNothing() ||
+           strncmp(table->name(i), "Redirect to ", 12) == 0);
+    map_->Set(addr, i);
+    DCHECK(map_->Get(addr).IsJust());
   }
   isolate->set_external_reference_map(map_);
 }
 
 uint32_t ExternalReferenceEncoder::Encode(Address address) const {
-  DCHECK_NOT_NULL(address);
-  base::HashMap::Entry* entry =
-      const_cast<base::HashMap*>(map_)->Lookup(address, Hash(address));
-  DCHECK_NOT_NULL(entry);
-  return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
+  Maybe<uint32_t> maybe_index = map_->Get(address);
+  if (maybe_index.IsNothing()) {
+    void* addr = address;
+    v8::base::OS::PrintError("Unknown external reference %p.\n", addr);
+    v8::base::OS::PrintError("%s", ExternalReferenceTable::ResolveSymbol(addr));
+    v8::base::OS::Abort();
+  }
+#ifdef DEBUG
+  table_->increment_count(maybe_index.FromJust());
+#endif  // DEBUG
+  return maybe_index.FromJust();
 }
 
 const char* ExternalReferenceEncoder::NameOfAddress(Isolate* isolate,
                                                     Address address) const {
-  base::HashMap::Entry* entry =
-      const_cast<base::HashMap*>(map_)->Lookup(address, Hash(address));
-  if (entry == NULL) return "<unknown>";
-  uint32_t i = static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
-  return ExternalReferenceTable::instance(isolate)->name(i);
+  Maybe<uint32_t> maybe_index = map_->Get(address);
+  if (maybe_index.IsNothing()) return "<unknown>";
+  return ExternalReferenceTable::instance(isolate)->name(
+      maybe_index.FromJust());
 }
 
 void SerializedData::AllocateData(int size) {
@@ -64,7 +69,7 @@
   List<Object*>* cache = isolate->partial_snapshot_cache();
   for (int i = 0;; ++i) {
     // Extend the array ready to get a value when deserializing.
-    if (cache->length() <= i) cache->Add(Smi::FromInt(0));
+    if (cache->length() <= i) cache->Add(Smi::kZero);
     // During deserialization, the visitor populates the partial snapshot cache
     // and eventually terminates the cache with undefined.
     visitor->VisitPointer(&cache->at(i));
diff --git a/src/snapshot/serializer-common.h b/src/snapshot/serializer-common.h
index 74b0218..201ac4e 100644
--- a/src/snapshot/serializer-common.h
+++ b/src/snapshot/serializer-common.h
@@ -23,12 +23,10 @@
   const char* NameOfAddress(Isolate* isolate, Address address) const;
 
  private:
-  static uint32_t Hash(Address key) {
-    return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >>
-                                 kPointerSizeLog2);
-  }
-
-  base::HashMap* map_;
+  AddressToIndexHashMap* map_;
+#ifdef DEBUG
+  ExternalReferenceTable* table_;
+#endif  // DEBUG
 
   DISALLOW_COPY_AND_ASSIGN(ExternalReferenceEncoder);
 };
@@ -172,6 +170,8 @@
   // Used for the source code for compiled stubs, which is in the executable,
   // but is referred to from external strings in the snapshot.
   static const int kExtraNativesStringResource = 0x1e;
+  // Used for embedder-provided serialization data for internal fields.
+  static const int kInternalFieldsData = 0x1f;
 
   // 8 hot (recently seen or back-referenced) objects with optional skip.
   static const int kNumberOfHotObjects = 8;
@@ -182,7 +182,7 @@
   static const int kHotObjectWithSkip = 0x58;
   static const int kHotObjectMask = 0x07;
 
-  // 0x1f, 0x35..0x37, 0x55..0x57, 0x75..0x7f unused.
+  // 0x35..0x37, 0x55..0x57, 0x75..0x7f unused.
 
   // ---------- byte code range 0x80..0xff ----------
   // First 32 root array items.
diff --git a/src/snapshot/serializer.cc b/src/snapshot/serializer.cc
index f622a5b..2e971e3 100644
--- a/src/snapshot/serializer.cc
+++ b/src/snapshot/serializer.cc
@@ -212,6 +212,11 @@
     PrintF("\n");
   }
 
+  // Assert that the first 32 root array items are a conscious choice. They are
+  // chosen so that the most common ones can be encoded more efficiently.
+  STATIC_ASSERT(Heap::kEmptyDescriptorArrayRootIndex ==
+                kNumberOfRootArrayConstants - 1);
+
   if (how_to_code == kPlain && where_to_point == kStartOfObject &&
       root_index < kNumberOfRootArrayConstants &&
       !isolate()->heap()->InNewSpace(object)) {
@@ -618,6 +623,7 @@
   sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
   sink_->PutInt(skip, "SkipB4ExternalRef");
   Address target = rinfo->target_external_reference();
+  DCHECK_NOT_NULL(target);  // Code does not reference null.
   sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
   bytes_processed_so_far_ += rinfo->target_address_size();
 }
diff --git a/src/snapshot/startup-serializer.h b/src/snapshot/startup-serializer.h
index 9c1c3b9..ac75c5d 100644
--- a/src/snapshot/startup-serializer.h
+++ b/src/snapshot/startup-serializer.h
@@ -30,25 +30,26 @@
   int PartialSnapshotCacheIndex(HeapObject* o);
 
  private:
-  class PartialCacheIndexMap : public AddressMapBase {
+  class PartialCacheIndexMap {
    public:
     PartialCacheIndexMap() : map_(), next_index_(0) {}
 
     // Lookup object in the map. Return its index if found, or create
     // a new entry with new_index as value, and return kInvalidIndex.
     bool LookupOrInsert(HeapObject* obj, int* index_out) {
-      base::HashMap::Entry* entry = LookupEntry(&map_, obj, false);
-      if (entry != NULL) {
-        *index_out = GetValue(entry);
+      Maybe<uint32_t> maybe_index = map_.Get(obj);
+      if (maybe_index.IsJust()) {
+        *index_out = maybe_index.FromJust();
         return true;
       }
       *index_out = next_index_;
-      SetValue(LookupEntry(&map_, obj, true), next_index_++);
+      map_.Set(obj, next_index_++);
       return false;
     }
 
    private:
-    base::HashMap map_;
+    DisallowHeapAllocation no_allocation_;
+    HeapObjectToIndexHashMap map_;
     int next_index_;
 
     DISALLOW_COPY_AND_ASSIGN(PartialCacheIndexMap);
diff --git a/src/source-position-table.cc b/src/source-position-table.cc
index ef6d077..35d8e7c 100644
--- a/src/source-position-table.cc
+++ b/src/source-position-table.cc
@@ -47,17 +47,19 @@
 }
 
 // Helper: Encode an integer.
-void EncodeInt(ZoneVector<byte>& bytes, int value) {
+template <typename T>
+void EncodeInt(ZoneVector<byte>& bytes, T value) {
   // Zig-zag encoding.
-  static const int kShift = kIntSize * kBitsPerByte - 1;
+  static const int kShift = sizeof(T) * kBitsPerByte - 1;
   value = ((value << 1) ^ (value >> kShift));
   DCHECK_GE(value, 0);
-  unsigned int encoded = static_cast<unsigned int>(value);
+  auto encoded = static_cast<typename std::make_unsigned<T>::type>(value);
   bool more;
   do {
     more = encoded > ValueBits::kMax;
-    bytes.push_back(MoreBit::encode(more) |
-                    ValueBits::encode(encoded & ValueBits::kMask));
+    byte current =
+        MoreBit::encode(more) | ValueBits::encode(encoded & ValueBits::kMask);
+    bytes.push_back(current);
     encoded >>= ValueBits::kSize;
   } while (more);
 }
@@ -73,25 +75,27 @@
 }
 
 // Helper: Decode an integer.
-void DecodeInt(ByteArray* bytes, int* index, int* v) {
+template <typename T>
+T DecodeInt(ByteArray* bytes, int* index) {
   byte current;
   int shift = 0;
-  int decoded = 0;
+  T decoded = 0;
   bool more;
   do {
     current = bytes->get((*index)++);
-    decoded |= ValueBits::decode(current) << shift;
+    decoded |= static_cast<typename std::make_unsigned<T>::type>(
+                   ValueBits::decode(current))
+               << shift;
     more = MoreBit::decode(current);
     shift += ValueBits::kSize;
   } while (more);
   DCHECK_GE(decoded, 0);
   decoded = (decoded >> 1) ^ (-(decoded & 1));
-  *v = decoded;
+  return decoded;
 }
 
 void DecodeEntry(ByteArray* bytes, int* index, PositionTableEntry* entry) {
-  int tmp;
-  DecodeInt(bytes, index, &tmp);
+  int tmp = DecodeInt<int>(bytes, index);
   if (tmp >= 0) {
     entry->is_statement = true;
     entry->code_offset = tmp;
@@ -99,7 +103,7 @@
     entry->is_statement = false;
     entry->code_offset = -(tmp + 1);
   }
-  DecodeInt(bytes, index, &entry->source_position);
+  entry->source_position = DecodeInt<int64_t>(bytes, index);
 }
 
 }  // namespace
@@ -115,11 +119,12 @@
 }
 
 void SourcePositionTableBuilder::AddPosition(size_t code_offset,
-                                             int source_position,
+                                             SourcePosition source_position,
                                              bool is_statement) {
   if (Omit()) return;
+  DCHECK(source_position.IsKnown());
   int offset = static_cast<int>(code_offset);
-  AddEntry({offset, source_position, is_statement});
+  AddEntry({offset, source_position.raw(), is_statement});
 }
 
 void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) {
@@ -152,7 +157,7 @@
        encoded.Advance(), raw++) {
     DCHECK(raw != raw_entries_.end());
     DCHECK_EQ(encoded.code_offset(), raw->code_offset);
-    DCHECK_EQ(encoded.source_position(), raw->source_position);
+    DCHECK_EQ(encoded.source_position().raw(), raw->source_position);
     DCHECK_EQ(encoded.is_statement(), raw->is_statement);
   }
   DCHECK(raw == raw_entries_.end());
@@ -170,7 +175,7 @@
 void SourcePositionTableIterator::Advance() {
   DCHECK(!done());
   DCHECK(index_ >= 0 && index_ <= table_->length());
-  if (index_ == table_->length()) {
+  if (index_ >= table_->length()) {
     index_ = kDone;
   } else {
     PositionTableEntry tmp;
diff --git a/src/source-position-table.h b/src/source-position-table.h
index 74c3b9e..f569ac9 100644
--- a/src/source-position-table.h
+++ b/src/source-position-table.h
@@ -7,7 +7,9 @@
 
 #include "src/assert-scope.h"
 #include "src/checks.h"
+#include "src/globals.h"
 #include "src/handles.h"
+#include "src/source-position.h"
 #include "src/zone/zone-containers.h"
 
 namespace v8 {
@@ -22,22 +24,23 @@
 struct PositionTableEntry {
   PositionTableEntry()
       : code_offset(0), source_position(0), is_statement(false) {}
-  PositionTableEntry(int offset, int source, bool statement)
+  PositionTableEntry(int offset, int64_t source, bool statement)
       : code_offset(offset), source_position(source), is_statement(statement) {}
 
   int code_offset;
-  int source_position;
+  int64_t source_position;
   bool is_statement;
 };
 
-class SourcePositionTableBuilder {
+class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
  public:
   enum RecordingMode { OMIT_SOURCE_POSITIONS, RECORD_SOURCE_POSITIONS };
 
   SourcePositionTableBuilder(Zone* zone,
                              RecordingMode mode = RECORD_SOURCE_POSITIONS);
 
-  void AddPosition(size_t code_offset, int source_position, bool is_statement);
+  void AddPosition(size_t code_offset, SourcePosition source_position,
+                   bool is_statement);
 
   Handle<ByteArray> ToSourcePositionTable(Isolate* isolate,
                                           Handle<AbstractCode> code);
@@ -55,7 +58,7 @@
   PositionTableEntry previous_;  // Previously written entry, to compute delta.
 };
 
-class SourcePositionTableIterator {
+class V8_EXPORT_PRIVATE SourcePositionTableIterator {
  public:
   explicit SourcePositionTableIterator(ByteArray* byte_array);
 
@@ -65,9 +68,9 @@
     DCHECK(!done());
     return current_.code_offset;
   }
-  int source_position() const {
+  SourcePosition source_position() const {
     DCHECK(!done());
-    return current_.source_position;
+    return SourcePosition::FromRaw(current_.source_position);
   }
   bool is_statement() const {
     DCHECK(!done());
diff --git a/src/source-position.cc b/src/source-position.cc
new file mode 100644
index 0000000..e9f86db
--- /dev/null
+++ b/src/source-position.cc
@@ -0,0 +1,131 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/source-position.h"
+#include "src/compilation-info.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+std::ostream& operator<<(std::ostream& out, const SourcePositionInfo& pos) {
+  Handle<SharedFunctionInfo> function(pos.function);
+  Handle<Script> script(Script::cast(function->script()));
+  out << "<";
+  if (script->name()->IsString()) {
+    out << String::cast(script->name())->ToCString(DISALLOW_NULLS).get();
+  } else {
+    out << "unknown";
+  }
+  out << ":" << pos.line + 1 << ":" << pos.column + 1 << ">";
+  return out;
+}
+
+std::ostream& operator<<(std::ostream& out,
+                         const std::vector<SourcePositionInfo>& stack) {
+  bool first = true;
+  for (const SourcePositionInfo& pos : stack) {
+    if (!first) out << " inlined at ";
+    out << pos;
+    first = false;
+  }
+  return out;
+}
+
+std::ostream& operator<<(std::ostream& out, const SourcePosition& pos) {
+  if (pos.isInlined()) {
+    out << "<inlined(" << pos.InliningId() << "):";
+  } else {
+    out << "<not inlined:";
+  }
+  out << pos.ScriptOffset() << ">";
+  return out;
+}
+
+SourcePositionInfo SourcePosition::Info(
+    Handle<SharedFunctionInfo> function) const {
+  SourcePositionInfo result(*this, function);
+  Handle<Script> script(Script::cast(function->script()));
+  Script::PositionInfo pos;
+  if (Script::GetPositionInfo(script, ScriptOffset(), &pos,
+                              Script::WITH_OFFSET)) {
+    result.line = pos.line;
+    result.column = pos.column;
+  }
+  return result;
+}
+
+std::vector<SourcePositionInfo> SourcePosition::InliningStack(
+    CompilationInfo* cinfo) const {
+  SourcePosition pos = *this;
+  std::vector<SourcePositionInfo> stack;
+  while (pos.isInlined()) {
+    const auto& inl = cinfo->inlined_functions()[pos.InliningId()];
+    stack.push_back(pos.Info(inl.shared_info));
+    pos = inl.position.position;
+  }
+  stack.push_back(pos.Info(cinfo->shared_info()));
+  return stack;
+}
+
+std::vector<SourcePositionInfo> SourcePosition::InliningStack(
+    Handle<Code> code) const {
+  Handle<DeoptimizationInputData> deopt_data(
+      DeoptimizationInputData::cast(code->deoptimization_data()));
+  SourcePosition pos = *this;
+  std::vector<SourcePositionInfo> stack;
+  while (pos.isInlined()) {
+    InliningPosition inl =
+        deopt_data->InliningPositions()->get(pos.InliningId());
+    Handle<SharedFunctionInfo> function(
+        deopt_data->GetInlinedFunction(inl.inlined_function_id));
+    stack.push_back(pos.Info(function));
+    pos = inl.position;
+  }
+  Handle<SharedFunctionInfo> function(
+      SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()));
+  stack.push_back(pos.Info(function));
+  return stack;
+}
+
+void SourcePosition::Print(std::ostream& out,
+                           SharedFunctionInfo* function) const {
+  Script* script = Script::cast(function->script());
+  Object* source_name = script->name();
+  Script::PositionInfo pos;
+  script->GetPositionInfo(ScriptOffset(), &pos, Script::WITH_OFFSET);
+  out << "<";
+  if (source_name->IsString()) {
+    out << String::cast(source_name)
+               ->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)
+               .get();
+  } else {
+    out << "unknown";
+  }
+  out << ":" << pos.line + 1 << ":" << pos.column + 1 << ">";
+}
+
+void SourcePosition::Print(std::ostream& out, Code* code) const {
+  DeoptimizationInputData* deopt_data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
+  if (!isInlined()) {
+    SharedFunctionInfo* function(
+        SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()));
+    Print(out, function);
+  } else {
+    InliningPosition inl = deopt_data->InliningPositions()->get(InliningId());
+    if (inl.inlined_function_id == -1) {
+      out << *this;
+    } else {
+      SharedFunctionInfo* function =
+          deopt_data->GetInlinedFunction(inl.inlined_function_id);
+      Print(out, function);
+    }
+    out << " inlined at ";
+    inl.position.Print(out, code);
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/source-position.h b/src/source-position.h
index 2d36e97..aa7d31b 100644
--- a/src/source-position.h
+++ b/src/source-position.h
@@ -9,77 +9,114 @@
 
 #include "src/flags.h"
 #include "src/globals.h"
+#include "src/handles.h"
 #include "src/utils.h"
 
 namespace v8 {
 namespace internal {
 
-// This class encapsulates encoding and decoding of sources positions from
-// which hydrogen values originated.
-// When FLAG_track_hydrogen_positions is set this object encodes the
-// identifier of the inlining and absolute offset from the start of the
-// inlined function.
-// When the flag is not set we simply track absolute offset from the
-// script start.
-class SourcePosition {
+class Code;
+class CompilationInfo;
+class Script;
+class SharedFunctionInfo;
+struct SourcePositionInfo;
+
+// SourcePosition stores
+// - script_offset (31 bit non-negative int or kNoSourcePosition)
+// - inlining_id (16 bit non-negative int or kNotInlined).
+//
+// A defined inlining_id refers to positions in
+// CompilationInfo::inlined_functions or
+// DeoptimizationInputData::InliningPositions, depending on the compilation
+// stage.
+class SourcePosition final {
  public:
-  static SourcePosition Unknown() {
-    return SourcePosition::FromRaw(kNoPosition);
+  explicit SourcePosition(int script_offset, int inlining_id = kNotInlined)
+      : value_(0) {
+    SetScriptOffset(script_offset);
+    SetInliningId(inlining_id);
   }
 
-  bool IsUnknown() const { return value_ == kNoPosition; }
+  static SourcePosition Unknown() { return SourcePosition(kNoSourcePosition); }
+  bool IsKnown() const {
+    return ScriptOffset() != kNoSourcePosition || InliningId() != kNotInlined;
+  }
+  bool isInlined() const { return InliningId() != kNotInlined; }
 
-  uint32_t position() const { return PositionField::decode(value_); }
-  void set_position(uint32_t position) {
-    if (FLAG_hydrogen_track_positions) {
-      value_ = static_cast<uint32_t>(PositionField::update(value_, position));
-    } else {
-      value_ = position;
-    }
+  std::vector<SourcePositionInfo> InliningStack(Handle<Code> code) const;
+  std::vector<SourcePositionInfo> InliningStack(CompilationInfo* code) const;
+
+  void Print(std::ostream& out, Code* function) const;
+
+  int ScriptOffset() const { return ScriptOffsetField::decode(value_) - 1; }
+  int InliningId() const { return InliningIdField::decode(value_) - 1; }
+
+  void SetScriptOffset(int script_offset) {
+    DCHECK(script_offset <= ScriptOffsetField::kMax - 2);
+    DCHECK(script_offset >= kNoSourcePosition);
+    value_ = ScriptOffsetField::update(value_, script_offset + 1);
+  }
+  void SetInliningId(int inlining_id) {
+    DCHECK(inlining_id <= InliningIdField::kMax - 2);
+    DCHECK(inlining_id >= kNotInlined);
+    value_ = InliningIdField::update(value_, inlining_id + 1);
   }
 
-  uint32_t inlining_id() const { return InliningIdField::decode(value_); }
-  void set_inlining_id(uint32_t inlining_id) {
-    if (FLAG_hydrogen_track_positions) {
-      value_ =
-          static_cast<uint32_t>(InliningIdField::update(value_, inlining_id));
-    }
-  }
+  static const int kNotInlined = -1;
+  STATIC_ASSERT(kNoSourcePosition == -1);
 
-  uint32_t raw() const { return value_; }
-
- private:
-  static const uint32_t kNoPosition = static_cast<uint32_t>(kNoSourcePosition);
-  typedef BitField<uint32_t, 0, 9> InliningIdField;
-
-  // Offset from the start of the inlined function.
-  typedef BitField<uint32_t, 9, 23> PositionField;
-
-  friend class HPositionInfo;
-  friend class Deoptimizer;
-
-  static SourcePosition FromRaw(uint32_t raw_position) {
-    SourcePosition position;
-    position.value_ = raw_position;
+  int64_t raw() const { return static_cast<int64_t>(value_); }
+  static SourcePosition FromRaw(int64_t raw) {
+    SourcePosition position = Unknown();
+    DCHECK_GE(raw, 0);
+    position.value_ = static_cast<uint64_t>(raw);
     return position;
   }
 
-  // If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
-  // and PositionField.
-  // Otherwise contains absolute offset from the script start.
-  uint32_t value_;
+ private:
+  void Print(std::ostream& out, SharedFunctionInfo* function) const;
+  SourcePositionInfo Info(Handle<SharedFunctionInfo> script) const;
+
+  // InliningId is in the high bits for better compression in
+  // SourcePositionTable.
+  typedef BitField64<int, 0, 31> ScriptOffsetField;
+  typedef BitField64<int, 31, 16> InliningIdField;
+  // Leaving the highest bit untouched to allow for signed conversion.
+  uint64_t value_;
 };
 
-inline std::ostream& operator<<(std::ostream& os, const SourcePosition& p) {
-  if (p.IsUnknown()) {
-    return os << "<?>";
-  } else if (FLAG_hydrogen_track_positions) {
-    return os << "<" << p.inlining_id() << ":" << p.position() << ">";
-  } else {
-    return os << "<0:" << p.raw() << ">";
-  }
+inline bool operator==(const SourcePosition& lhs, const SourcePosition& rhs) {
+  return lhs.raw() == rhs.raw();
 }
 
+inline bool operator!=(const SourcePosition& lhs, const SourcePosition& rhs) {
+  return !(lhs == rhs);
+}
+
+struct InliningPosition {
+  // position of the inlined call
+  SourcePosition position = SourcePosition::Unknown();
+
+  // references position in DeoptimizationInputData::literals()
+  int inlined_function_id;
+};
+
+struct SourcePositionInfo {
+  explicit SourcePositionInfo(SourcePosition pos, Handle<SharedFunctionInfo> f)
+      : position(pos), function(f) {}
+
+  SourcePosition position;
+  Handle<SharedFunctionInfo> function;
+  int line = -1;
+  int column = -1;
+};
+
+std::ostream& operator<<(std::ostream& out, const SourcePosition& pos);
+
+std::ostream& operator<<(std::ostream& out, const SourcePositionInfo& pos);
+std::ostream& operator<<(std::ostream& out,
+                         const std::vector<SourcePositionInfo>& stack);
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/startup-data-util.cc b/src/startup-data-util.cc
index 7c6d9eb..5f5472f 100644
--- a/src/startup-data-util.cc
+++ b/src/startup-data-util.cc
@@ -86,8 +86,9 @@
 #ifdef V8_USE_EXTERNAL_STARTUP_DATA
   char* natives;
   char* snapshot;
-  LoadFromFiles(RelativePath(&natives, directory_path, "natives_blob.bin"),
-                RelativePath(&snapshot, directory_path, "snapshot_blob.bin"));
+  LoadFromFiles(
+      base::RelativePath(&natives, directory_path, "natives_blob.bin"),
+      base::RelativePath(&snapshot, directory_path, "snapshot_blob.bin"));
   free(natives);
   free(snapshot);
 #endif  // V8_USE_EXTERNAL_STARTUP_DATA
diff --git a/src/string-builder.h b/src/string-builder.h
index 192603f..edc6476 100644
--- a/src/string-builder.h
+++ b/src/string-builder.h
@@ -180,7 +180,6 @@
     return target_array;
   }
 
-
  private:
   Handle<FixedArray> array_;
   int length_;
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 3ae4580..acfb917 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -533,7 +533,7 @@
     print_name = true;
   } else if (isolate->context() != nullptr) {
     if (!receiver->IsJSObject()) {
-      receiver = receiver->GetRootMap(isolate)->prototype();
+      receiver = receiver->GetPrototypeChainRootMap(isolate)->prototype();
     }
 
     for (PrototypeIterator iter(isolate, JSObject::cast(receiver),
diff --git a/src/tracing/trace-event.cc b/src/tracing/trace-event.cc
index 440af19..97da1de 100644
--- a/src/tracing/trace-event.cc
+++ b/src/tracing/trace-event.cc
@@ -8,30 +8,26 @@
 
 #include "src/counters.h"
 #include "src/isolate.h"
+#include "src/tracing/traced-value.h"
 #include "src/v8.h"
 
 namespace v8 {
 namespace internal {
 namespace tracing {
 
-// A global flag used as a shortcut to check for the
-// v8.runtime-call-stats category due to its high frequency use.
-base::Atomic32 kRuntimeCallStatsTracingEnabled = false;
-
 v8::Platform* TraceEventHelper::GetCurrentPlatform() {
   return v8::internal::V8::GetCurrentPlatform();
 }
 
 void CallStatsScopedTracer::AddEndTraceEvent() {
   if (!has_parent_scope_ && p_data_->isolate) {
+    auto value = v8::tracing::TracedValue::Create();
+    p_data_->isolate->counters()->runtime_call_stats()->Dump(value.get());
     v8::internal::tracing::AddTraceEvent(
         TRACE_EVENT_PHASE_END, p_data_->category_group_enabled, p_data_->name,
         v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId,
         v8::internal::tracing::kNoId, TRACE_EVENT_FLAG_NONE,
-        "runtime-call-stats", TRACE_STR_COPY(p_data_->isolate->counters()
-                                                 ->runtime_call_stats()
-                                                 ->Dump()
-                                                 .c_str()));
+        "runtime-call-stats", std::move(value));
   } else {
     v8::internal::tracing::AddTraceEvent(
         TRACE_EVENT_PHASE_END, p_data_->category_group_enabled, p_data_->name,
diff --git a/src/tracing/trace-event.h b/src/tracing/trace-event.h
index 35d2e15..a228608 100644
--- a/src/tracing/trace-event.h
+++ b/src/tracing/trace-event.h
@@ -49,33 +49,6 @@
 #define TRACE_ID_WITH_SCOPE(scope, id) \
   trace_event_internal::TraceID::WithScope(scope, id)
 
-// Sets the current sample state to the given category and name (both must be
-// constant strings). These states are intended for a sampling profiler.
-// Implementation note: we store category and name together because we don't
-// want the inconsistency/expense of storing two pointers.
-// |thread_bucket| is [0..2] and is used to statically isolate samples in one
-// thread from others.
-#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \
-                                                  name)                    \
-  v8::internal::tracing::TraceEventSamplingStateScope<bucket_number>::Set( \
-      category "\0" name)
-
-// Returns a current sampling state of the given bucket.
-#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
-  v8::internal::tracing::TraceEventSamplingStateScope<bucket_number>::Current()
-
-// Creates a scope of a sampling state of the given bucket.
-//
-// {  // The sampling state is set within this scope.
-//    TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
-//    ...;
-// }
-#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \
-                                                     name)                    \
-  v8::internal::TraceEventSamplingStateScope<bucket_number>                   \
-      traceEventSamplingScope(category "\0" name);
-
-
 #define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
   *INTERNAL_TRACE_EVENT_UID(category_group_enabled) &                    \
       (kEnabledForRecording_CategoryGroupEnabledFlags |                  \
@@ -138,12 +111,6 @@
 #define TRACE_EVENT_API_ATOMIC_STORE(var, value) \
   v8::base::NoBarrier_Store(&(var), (value))
 
-// The thread buckets for the sampling profiler.
-extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
-
-#define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket) \
-  g_trace_state[thread_bucket]
-
 ////////////////////////////////////////////////////////////////////////////////
 
 // Implementation detail: trace event macros create temporary variables
@@ -282,20 +249,10 @@
   INTERNAL_TRACE_EVENT_UID(ScopedContext)                                  \
   INTERNAL_TRACE_EVENT_UID(scoped_context)(context);
 
-#define TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() \
-  base::NoBarrier_Load(&v8::internal::tracing::kRuntimeCallStatsTracingEnabled)
-
 #define TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name) \
   INTERNAL_TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name)
 
 #define INTERNAL_TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name)  \
-  {                                                                            \
-    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(                                    \
-        TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"));                        \
-    base::NoBarrier_Store(                                                     \
-        &v8::internal::tracing::kRuntimeCallStatsTracingEnabled,               \
-        INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE());     \
-  }                                                                            \
   INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);                      \
   v8::internal::tracing::CallStatsScopedTracer INTERNAL_TRACE_EVENT_UID(       \
       tracer);                                                                 \
@@ -613,48 +570,6 @@
   Data data_;
 };
 
-// Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly.
-class ScopedTraceBinaryEfficient {
- public:
-  ScopedTraceBinaryEfficient(const char* category_group, const char* name);
-  ~ScopedTraceBinaryEfficient();
-
- private:
-  const uint8_t* category_group_enabled_;
-  const char* name_;
-  uint64_t event_handle_;
-};
-
-// TraceEventSamplingStateScope records the current sampling state
-// and sets a new sampling state. When the scope exists, it restores
-// the sampling state having recorded.
-template <size_t BucketNumber>
-class TraceEventSamplingStateScope {
- public:
-  explicit TraceEventSamplingStateScope(const char* category_and_name) {
-    previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current();
-    TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name);
-  }
-
-  ~TraceEventSamplingStateScope() {
-    TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_);
-  }
-
-  static V8_INLINE const char* Current() {
-    return reinterpret_cast<const char*>(
-        TRACE_EVENT_API_ATOMIC_LOAD(g_trace_state[BucketNumber]));
-  }
-
-  static V8_INLINE void Set(const char* category_and_name) {
-    TRACE_EVENT_API_ATOMIC_STORE(g_trace_state[BucketNumber],
-                                 reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(
-                                     const_cast<char*>(category_and_name)));
-  }
-
- private:
-  const char* previous_state_;
-};
-
 // Do not use directly.
 class CallStatsScopedTracer {
  public:
diff --git a/src/tracing/traced-value.cc b/src/tracing/traced-value.cc
new file mode 100644
index 0000000..81be623
--- /dev/null
+++ b/src/tracing/traced-value.cc
@@ -0,0 +1,203 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/tracing/traced-value.h"
+
+#include "src/base/platform/platform.h"
+#include "src/conversions.h"
+
+namespace v8 {
+namespace tracing {
+
+namespace {
+
+#define DCHECK_CURRENT_CONTAINER_IS(x) DCHECK_EQ(x, nesting_stack_.back())
+#define DCHECK_CONTAINER_STACK_DEPTH_EQ(x) DCHECK_EQ(x, nesting_stack_.size())
+#ifdef DEBUG
+const bool kStackTypeDict = false;
+const bool kStackTypeArray = true;
+#define DEBUG_PUSH_CONTAINER(x) nesting_stack_.push_back(x)
+#define DEBUG_POP_CONTAINER() nesting_stack_.pop_back()
+#else
+#define DEBUG_PUSH_CONTAINER(x) ((void)0)
+#define DEBUG_POP_CONTAINER() ((void)0)
+#endif
+
+std::string EscapeString(const std::string& value) {
+  std::string result;
+  result.reserve(value.length() + 2);
+  result += '"';
+  size_t length = value.length();
+  char number_buffer[10];
+  for (size_t src = 0; src < length; ++src) {
+    char c = value[src];
+    switch (c) {
+      case '\t':
+        result += "\\t";
+        break;
+      case '\n':
+        result += "\\n";
+        break;
+      case '\"':
+        result += "\\\"";
+        break;
+      case '\\':
+        result += "\\\\";
+        break;
+      default:
+        if (c < '\040') {
+          base::OS::SNPrintF(
+              number_buffer, arraysize(number_buffer), "\\u%04X",
+              static_cast<unsigned>(static_cast<unsigned char>(c)));
+          result += number_buffer;
+        } else {
+          result += c;
+        }
+    }
+  }
+  result += '"';
+  return result;
+}
+
+}  // namespace
+
+std::unique_ptr<TracedValue> TracedValue::Create() {
+  return std::unique_ptr<TracedValue>(new TracedValue());
+}
+
+TracedValue::TracedValue() : first_item_(true) {
+  DEBUG_PUSH_CONTAINER(kStackTypeDict);
+}
+
+TracedValue::~TracedValue() {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  DEBUG_POP_CONTAINER();
+  DCHECK_CONTAINER_STACK_DEPTH_EQ(0u);
+}
+
+void TracedValue::SetInteger(const char* name, int value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  WriteName(name);
+  data_ += std::to_string(value);
+}
+
+void TracedValue::SetDouble(const char* name, double value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  WriteName(name);
+  i::EmbeddedVector<char, 100> buffer;
+  data_ += DoubleToCString(value, buffer);
+}
+
+void TracedValue::SetBoolean(const char* name, bool value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  WriteName(name);
+  data_ += value ? "true" : "false";
+}
+
+void TracedValue::SetString(const char* name, const std::string& value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  WriteName(name);
+  data_ += EscapeString(value);
+}
+
+void TracedValue::BeginDictionary(const char* name) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  DEBUG_PUSH_CONTAINER(kStackTypeDict);
+  WriteName(name);
+  data_ += '{';
+  first_item_ = true;
+}
+
+void TracedValue::BeginArray(const char* name) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  DEBUG_PUSH_CONTAINER(kStackTypeArray);
+  WriteName(name);
+  data_ += '[';
+  first_item_ = true;
+}
+
+void TracedValue::AppendInteger(int value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+  WriteComma();
+  data_ += std::to_string(value);
+}
+
+void TracedValue::AppendLongInteger(int64_t value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+  WriteComma();
+  data_ += std::to_string(value);
+}
+
+void TracedValue::AppendDouble(double value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+  WriteComma();
+  i::EmbeddedVector<char, 100> buffer;
+  data_ += DoubleToCString(value, buffer);
+}
+
+void TracedValue::AppendBoolean(bool value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+  WriteComma();
+  data_ += value ? "true" : "false";
+}
+
+void TracedValue::AppendString(const std::string& value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+  WriteComma();
+  data_ += EscapeString(value);
+}
+
+void TracedValue::BeginDictionary() {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+  DEBUG_PUSH_CONTAINER(kStackTypeDict);
+  WriteComma();
+  data_ += '{';
+  first_item_ = true;
+}
+
+void TracedValue::BeginArray() {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+  DEBUG_PUSH_CONTAINER(kStackTypeArray);
+  WriteComma();
+  data_ += '[';
+  first_item_ = true;
+}
+
+void TracedValue::EndDictionary() {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  DEBUG_POP_CONTAINER();
+  data_ += '}';
+  first_item_ = false;
+}
+
+void TracedValue::EndArray() {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+  DEBUG_POP_CONTAINER();
+  data_ += ']';
+  first_item_ = false;
+}
+
+void TracedValue::WriteComma() {
+  if (first_item_) {
+    first_item_ = false;
+  } else {
+    data_ += ',';
+  }
+}
+
+void TracedValue::WriteName(const char* name) {
+  WriteComma();
+  data_ += '"';
+  data_ += name;
+  data_ += "\":";
+}
+
+void TracedValue::AppendAsTraceFormat(std::string* out) const {
+  *out += '{';
+  *out += data_;
+  *out += '}';
+}
+
+}  // namespace tracing
+}  // namespace v8
diff --git a/src/tracing/traced-value.h b/src/tracing/traced-value.h
new file mode 100644
index 0000000..b5c265c
--- /dev/null
+++ b/src/tracing/traced-value.h
@@ -0,0 +1,67 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TRACING_TRACED_VALUE_H_
+#define V8_TRACING_TRACED_VALUE_H_
+
+#include <stddef.h>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "include/v8-platform.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace tracing {
+
+class TracedValue : public ConvertableToTraceFormat {
+ public:
+  ~TracedValue() override;
+
+  static std::unique_ptr<TracedValue> Create();
+
+  void EndDictionary();
+  void EndArray();
+
+  // These methods assume that |name| is a long lived "quoted" string.
+  void SetInteger(const char* name, int value);
+  void SetDouble(const char* name, double value);
+  void SetBoolean(const char* name, bool value);
+  void SetString(const char* name, const std::string& value);
+  void BeginDictionary(const char* name);
+  void BeginArray(const char* name);
+
+  void AppendInteger(int);
+  void AppendLongInteger(int64_t);
+  void AppendDouble(double);
+  void AppendBoolean(bool);
+  void AppendString(const std::string&);
+  void BeginArray();
+  void BeginDictionary();
+
+  // ConvertableToTraceFormat implementation.
+  void AppendAsTraceFormat(std::string* out) const override;
+
+ private:
+  TracedValue();
+
+  void WriteComma();
+  void WriteName(const char* name);
+
+#ifdef DEBUG
+  // In debug builds checks the pairings of {Begin,End}{Dictionary,Array}
+  std::vector<bool> nesting_stack_;
+#endif
+
+  std::string data_;
+  bool first_item_;
+
+  DISALLOW_COPY_AND_ASSIGN(TracedValue);
+};
+
+}  // namespace tracing
+}  // namespace v8
+
+#endif  // V8_TRACING_TRACED_VALUE_H_
diff --git a/src/tracing/tracing-category-observer.cc b/src/tracing/tracing-category-observer.cc
new file mode 100644
index 0000000..3fffd2f
--- /dev/null
+++ b/src/tracing/tracing-category-observer.cc
@@ -0,0 +1,58 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/tracing/tracing-category-observer.h"
+
+#include "src/flags.h"
+#include "src/tracing/trace-event.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace tracing {
+
+TracingCategoryObserver* TracingCategoryObserver::instance_ = nullptr;
+
+void TracingCategoryObserver::SetUp() {
+  TracingCategoryObserver::instance_ = new TracingCategoryObserver();
+  v8::internal::V8::GetCurrentPlatform()->AddTraceStateObserver(
+      TracingCategoryObserver::instance_);
+  TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"));
+  TRACE_EVENT_WARMUP_CATEGORY(
+      TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats_sampling"));
+  TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"));
+}
+
+void TracingCategoryObserver::TearDown() {
+  v8::internal::V8::GetCurrentPlatform()->RemoveTraceStateObserver(
+      TracingCategoryObserver::instance_);
+  delete TracingCategoryObserver::instance_;
+}
+
+void TracingCategoryObserver::OnTraceEnabled() {
+  bool enabled = false;
+  TRACE_EVENT_CATEGORY_GROUP_ENABLED(
+      TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"), &enabled);
+  if (enabled) {
+    v8::internal::FLAG_runtime_stats |= ENABLED_BY_TRACING;
+  }
+  TRACE_EVENT_CATEGORY_GROUP_ENABLED(
+      TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats_sampling"), &enabled);
+  if (enabled) {
+    v8::internal::FLAG_runtime_stats |= ENABLED_BY_SAMPLING;
+  }
+  TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
+                                     &enabled);
+  if (enabled) {
+    v8::internal::FLAG_gc_stats |= ENABLED_BY_TRACING;
+  }
+}
+
+void TracingCategoryObserver::OnTraceDisabled() {
+  v8::internal::FLAG_runtime_stats &=
+      ~(ENABLED_BY_TRACING | ENABLED_BY_SAMPLING);
+  v8::internal::FLAG_gc_stats &= ~ENABLED_BY_TRACING;
+}
+
+}  // namespace tracing
+}  // namespace v8
diff --git a/src/tracing/tracing-category-observer.h b/src/tracing/tracing-category-observer.h
new file mode 100644
index 0000000..66dd2d7
--- /dev/null
+++ b/src/tracing/tracing-category-observer.h
@@ -0,0 +1,35 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TRACING_TRACING_CATEGORY_OBSERVER_H_
+#define V8_TRACING_TRACING_CATEGORY_OBSERVER_H_
+
+#include "include/v8-platform.h"
+
+namespace v8 {
+namespace tracing {
+
+class TracingCategoryObserver : public Platform::TraceStateObserver {
+ public:
+  enum Mode {
+    ENABLED_BY_NATIVE = 1 << 0,
+    ENABLED_BY_TRACING = 1 << 1,
+    ENABLED_BY_SAMPLING = 1 << 2,
+  };
+
+  static void SetUp();
+  static void TearDown();
+
+  // v8::Platform::TraceStateObserver
+  void OnTraceEnabled() final;
+  void OnTraceDisabled() final;
+
+ private:
+  static TracingCategoryObserver* instance_;
+};
+
+}  // namespace tracing
+}  // namespace v8
+
+#endif  // V8_TRACING_TRACING_CATEGORY_OBSERVER_H_
diff --git a/src/transitions-inl.h b/src/transitions-inl.h
index 828a673..df28c2c 100644
--- a/src/transitions-inl.h
+++ b/src/transitions-inl.h
@@ -26,7 +26,7 @@
 
 
 bool TransitionArray::HasPrototypeTransitions() {
-  return get(kPrototypeTransitionsIndex) != Smi::FromInt(0);
+  return get(kPrototypeTransitionsIndex) != Smi::kZero;
 }
 
 
diff --git a/src/transitions.cc b/src/transitions.cc
index 082ebc1..88c1549 100644
--- a/src/transitions.cc
+++ b/src/transitions.cc
@@ -395,7 +395,7 @@
                                                   int slack) {
   Handle<FixedArray> array = isolate->factory()->NewTransitionArray(
       LengthFor(number_of_transitions + slack));
-  array->set(kPrototypeTransitionsIndex, Smi::FromInt(0));
+  array->set(kPrototypeTransitionsIndex, Smi::kZero);
   array->set(kTransitionLengthIndex, Smi::FromInt(number_of_transitions));
   return Handle<TransitionArray>::cast(array);
 }
diff --git a/src/type-feedback-vector-inl.h b/src/type-feedback-vector-inl.h
index f70f018..58dfe33 100644
--- a/src/type-feedback-vector-inl.h
+++ b/src/type-feedback-vector-inl.h
@@ -128,6 +128,7 @@
     case BinaryOperationFeedback::kSignedSmall:
       return BinaryOperationHint::kSignedSmall;
     case BinaryOperationFeedback::kNumber:
+    case BinaryOperationFeedback::kNumberOrOddball:
       return BinaryOperationHint::kNumberOrOddball;
     case BinaryOperationFeedback::kString:
       return BinaryOperationHint::kString;
@@ -158,8 +159,6 @@
 void TypeFeedbackVector::ComputeCounts(int* with_type_info, int* generic,
                                        int* vector_ic_count,
                                        bool code_is_interpreted) {
-  Object* uninitialized_sentinel =
-      TypeFeedbackVector::RawUninitializedSentinel(GetIsolate());
   Object* megamorphic_sentinel =
       *TypeFeedbackVector::MegamorphicSentinel(GetIsolate());
   int with = 0;
@@ -170,47 +169,58 @@
     FeedbackVectorSlot slot = iter.Next();
     FeedbackVectorSlotKind kind = iter.kind();
 
-    Object* obj = Get(slot);
-    if (kind == FeedbackVectorSlotKind::GENERAL) {
-      continue;
-    }
-    total++;
-
-    if (obj != uninitialized_sentinel) {
-      if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC ||
-          kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC) {
-        // If we are not running interpreted code, we need to ignore
-        // the special ic slots for binaryop/compare used by the
-        // interpreter.
-        // TODO(mvstanton): Remove code_is_interpreted when full code
-        // is retired from service.
-        if (!code_is_interpreted) continue;
-
-        DCHECK(obj->IsSmi());
-        int op_feedback = static_cast<int>(Smi::cast(obj)->value());
-        if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC) {
-          CompareOperationHint hint =
-              CompareOperationHintFromFeedback(op_feedback);
-          if (hint == CompareOperationHint::kAny) {
-            gen++;
-          } else if (hint != CompareOperationHint::kNone) {
-            with++;
-          }
-        } else {
-          DCHECK(kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC);
-          BinaryOperationHint hint =
-              BinaryOperationHintFromFeedback(op_feedback);
-          if (hint == BinaryOperationHint::kAny) {
-            gen++;
-          } else if (hint != BinaryOperationHint::kNone) {
-            with++;
-          }
+    Object* const obj = Get(slot);
+    switch (kind) {
+      case FeedbackVectorSlotKind::CALL_IC:
+      case FeedbackVectorSlotKind::LOAD_IC:
+      case FeedbackVectorSlotKind::LOAD_GLOBAL_IC:
+      case FeedbackVectorSlotKind::KEYED_LOAD_IC:
+      case FeedbackVectorSlotKind::STORE_IC:
+      case FeedbackVectorSlotKind::KEYED_STORE_IC: {
+        if (obj->IsWeakCell() || obj->IsFixedArray() || obj->IsString()) {
+          with++;
+        } else if (obj == megamorphic_sentinel) {
+          gen++;
         }
-      } else if (obj->IsWeakCell() || obj->IsFixedArray() || obj->IsString()) {
-        with++;
-      } else if (obj == megamorphic_sentinel) {
-        gen++;
+        total++;
+        break;
       }
+      case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC:
+      case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC: {
+        // If we are not running interpreted code, we need to ignore the special
+        // IC slots for binaryop/compare used by the interpreter.
+        // TODO(mvstanton): Remove code_is_interpreted when full code is retired
+        // from service.
+        if (code_is_interpreted) {
+          int const feedback = Smi::cast(obj)->value();
+          if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC) {
+            CompareOperationHint hint =
+                CompareOperationHintFromFeedback(feedback);
+            if (hint == CompareOperationHint::kAny) {
+              gen++;
+            } else if (hint != CompareOperationHint::kNone) {
+              with++;
+            }
+          } else {
+            DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC, kind);
+            BinaryOperationHint hint =
+                BinaryOperationHintFromFeedback(feedback);
+            if (hint == BinaryOperationHint::kAny) {
+              gen++;
+            } else if (hint != BinaryOperationHint::kNone) {
+              with++;
+            }
+          }
+          total++;
+        }
+        break;
+      }
+      case FeedbackVectorSlotKind::GENERAL:
+        break;
+      case FeedbackVectorSlotKind::INVALID:
+      case FeedbackVectorSlotKind::KINDS_NUMBER:
+        UNREACHABLE();
+        break;
     }
   }
 
diff --git a/src/type-feedback-vector.cc b/src/type-feedback-vector.cc
index 30bc2d4..2ba9690 100644
--- a/src/type-feedback-vector.cc
+++ b/src/type-feedback-vector.cc
@@ -5,7 +5,7 @@
 #include "src/type-feedback-vector.h"
 
 #include "src/code-stubs.h"
-#include "src/ic/ic.h"
+#include "src/ic/ic-inl.h"
 #include "src/ic/ic-state.h"
 #include "src/objects.h"
 #include "src/type-feedback-vector-inl.h"
@@ -91,7 +91,7 @@
   array->set(kSlotsCountIndex, Smi::FromInt(slot_count));
   // Fill the bit-vector part with zeros.
   for (int i = 0; i < slot_kinds_length; i++) {
-    array->set(kReservedIndexCount + i, Smi::FromInt(0));
+    array->set(kReservedIndexCount + i, Smi::kZero);
   }
 
   Handle<TypeFeedbackMetadata> metadata =
@@ -121,7 +121,7 @@
   }
   DCHECK_EQ(name_count, name_index);
   metadata->set(kNamesTableIndex,
-                name_count ? static_cast<Object*>(*names) : Smi::FromInt(0));
+                name_count ? static_cast<Object*>(*names) : Smi::kZero);
 
   // It's important that the TypeFeedbackMetadata have a COW map, since it's
   // pointed to by both a SharedFunctionInfo and indirectly by closures through
@@ -241,13 +241,13 @@
 
   Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
   array->set(kMetadataIndex, *metadata);
-  array->set(kInvocationCountIndex, Smi::FromInt(0));
+  array->set(kInvocationCountIndex, Smi::kZero);
 
   DisallowHeapAllocation no_gc;
 
   // Ensure we can skip the write barrier
   Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
-  DCHECK_EQ(*factory->uninitialized_symbol(), *uninitialized_sentinel);
+  DCHECK_EQ(isolate->heap()->uninitialized_symbol(), *uninitialized_sentinel);
   for (int i = 0; i < slot_count;) {
     FeedbackVectorSlot slot(i);
     FeedbackVectorSlotKind kind = metadata->GetKind(slot);
@@ -256,16 +256,16 @@
 
     Object* value;
     if (kind == FeedbackVectorSlotKind::LOAD_GLOBAL_IC) {
-      value = *factory->empty_weak_cell();
+      value = isolate->heap()->empty_weak_cell();
     } else if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC ||
                kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC) {
-      value = Smi::FromInt(0);
+      value = Smi::kZero;
     } else {
       value = *uninitialized_sentinel;
     }
     array->set(index, value, SKIP_WRITE_BARRIER);
 
-    value = kind == FeedbackVectorSlotKind::CALL_IC ? Smi::FromInt(0)
+    value = kind == FeedbackVectorSlotKind::CALL_IC ? Smi::kZero
                                                     : *uninitialized_sentinel;
     for (int j = 1; j < entry_size; j++) {
       array->set(index + j, value, SKIP_WRITE_BARRIER);
@@ -351,7 +351,7 @@
         case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC: {
           DCHECK(Get(slot)->IsSmi());
           // don't clear these smi slots.
-          // Set(slot, Smi::FromInt(0));
+          // Set(slot, Smi::kZero);
           break;
         }
         case FeedbackVectorSlotKind::GENERAL: {
@@ -657,7 +657,7 @@
   Isolate* isolate = GetIsolate();
   SetFeedback(*TypeFeedbackVector::UninitializedSentinel(isolate),
               SKIP_WRITE_BARRIER);
-  SetFeedbackExtra(Smi::FromInt(0), SKIP_WRITE_BARRIER);
+  SetFeedbackExtra(Smi::kZero, SKIP_WRITE_BARRIER);
 }
 
 void CallICNexus::ConfigureMonomorphicArray() {
@@ -733,18 +733,16 @@
   }
 }
 
-
 void StoreICNexus::ConfigureMonomorphic(Handle<Map> receiver_map,
-                                        Handle<Code> handler) {
+                                        Handle<Object> handler) {
   Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
   SetFeedback(*cell);
   SetFeedbackExtra(*handler);
 }
 
-
 void KeyedStoreICNexus::ConfigureMonomorphic(Handle<Name> name,
                                              Handle<Map> receiver_map,
-                                             Handle<Code> handler) {
+                                             Handle<Object> handler) {
   Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
   if (name.is_null()) {
     SetFeedback(*cell);
@@ -851,17 +849,10 @@
   DCHECK(array->length() >= 2);
   Object* second = array->get(1);
   if (second->IsWeakCell() || second->IsUndefined(isolate)) return 3;
-  DCHECK(second->IsCode() || second->IsSmi());
+  DCHECK(IC::IsHandler(second));
   return 2;
 }
 
-#ifdef DEBUG  // Only used by DCHECKs below.
-bool IsHandler(Object* object) {
-  return object->IsSmi() ||
-         (object->IsCode() && Code::cast(object)->is_handler());
-}
-#endif
-
 }  // namespace
 
 int FeedbackNexus::ExtractMaps(MapHandleList* maps) const {
@@ -914,7 +905,7 @@
         Map* array_map = Map::cast(cell->value());
         if (array_map == *map) {
           Object* code = array->get(i + increment - 1);
-          DCHECK(IsHandler(code));
+          DCHECK(IC::IsHandler(code));
           return handle(code, isolate);
         }
       }
@@ -925,7 +916,7 @@
       Map* cell_map = Map::cast(cell->value());
       if (cell_map == *map) {
         Object* code = GetFeedbackExtra();
-        DCHECK(IsHandler(code));
+        DCHECK(IC::IsHandler(code));
         return handle(code, isolate);
       }
     }
@@ -952,7 +943,7 @@
       // Be sure to skip handlers whose maps have been cleared.
       if (!cell->cleared()) {
         Object* code = array->get(i + increment - 1);
-        DCHECK(IsHandler(code));
+        DCHECK(IC::IsHandler(code));
         code_list->Add(handle(code, isolate));
         count++;
       }
@@ -961,7 +952,7 @@
     WeakCell* cell = WeakCell::cast(feedback);
     if (!cell->cleared()) {
       Object* code = GetFeedbackExtra();
-      DCHECK(IsHandler(code));
+      DCHECK(IC::IsHandler(code));
       code_list->Add(handle(code, isolate));
       count++;
     }
diff --git a/src/type-feedback-vector.h b/src/type-feedback-vector.h
index af69499..3bb51c1 100644
--- a/src/type-feedback-vector.h
+++ b/src/type-feedback-vector.h
@@ -609,7 +609,7 @@
 
   void Clear(Code* host);
 
-  void ConfigureMonomorphic(Handle<Map> receiver_map, Handle<Code> handler);
+  void ConfigureMonomorphic(Handle<Map> receiver_map, Handle<Object> handler);
 
   void ConfigurePolymorphic(MapHandleList* maps,
                             List<Handle<Object>>* handlers);
@@ -637,7 +637,7 @@
 
   // name can be a null handle for element loads.
   void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map,
-                            Handle<Code> handler);
+                            Handle<Object> handler);
   // name can be null.
   void ConfigurePolymorphic(Handle<Name> name, MapHandleList* maps,
                             List<Handle<Object>>* handlers);
diff --git a/src/type-hints.cc b/src/type-hints.cc
index ff00eef..1c40c59 100644
--- a/src/type-hints.cc
+++ b/src/type-hints.cc
@@ -67,6 +67,8 @@
       return os << "SimdValue";
     case ToBooleanHint::kAny:
       return os << "Any";
+    case ToBooleanHint::kNeedsMap:
+      return os << "NeedsMap";
   }
   UNREACHABLE();
   return os;
@@ -87,5 +89,26 @@
   return os;
 }
 
+std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags) {
+  switch (flags) {
+    case STRING_ADD_CHECK_NONE:
+      return os << "CheckNone";
+    case STRING_ADD_CHECK_LEFT:
+      return os << "CheckLeft";
+    case STRING_ADD_CHECK_RIGHT:
+      return os << "CheckRight";
+    case STRING_ADD_CHECK_BOTH:
+      return os << "CheckBoth";
+    case STRING_ADD_CONVERT_LEFT:
+      return os << "ConvertLeft";
+    case STRING_ADD_CONVERT_RIGHT:
+      return os << "ConvertRight";
+    case STRING_ADD_CONVERT:
+      break;
+  }
+  UNREACHABLE();
+  return os;
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/type-hints.h b/src/type-hints.h
index cdf4709..e6138c7 100644
--- a/src/type-hints.h
+++ b/src/type-hints.h
@@ -55,7 +55,9 @@
   kHeapNumber = 1u << 7,
   kSimdValue = 1u << 8,
   kAny = kUndefined | kBoolean | kNull | kSmallInteger | kReceiver | kString |
-         kSymbol | kHeapNumber | kSimdValue
+         kSymbol | kHeapNumber | kSimdValue,
+  kNeedsMap = kReceiver | kString | kSymbol | kHeapNumber | kSimdValue,
+  kCanBeUndetectable = kReceiver,
 };
 
 std::ostream& operator<<(std::ostream&, ToBooleanHint);
@@ -66,6 +68,23 @@
 
 DEFINE_OPERATORS_FOR_FLAGS(ToBooleanHints)
 
+enum StringAddFlags {
+  // Omit both parameter checks.
+  STRING_ADD_CHECK_NONE = 0,
+  // Check left parameter.
+  STRING_ADD_CHECK_LEFT = 1 << 0,
+  // Check right parameter.
+  STRING_ADD_CHECK_RIGHT = 1 << 1,
+  // Check both parameters.
+  STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT,
+  // Convert parameters when check fails (instead of throwing an exception).
+  STRING_ADD_CONVERT = 1 << 2,
+  STRING_ADD_CONVERT_LEFT = STRING_ADD_CHECK_LEFT | STRING_ADD_CONVERT,
+  STRING_ADD_CONVERT_RIGHT = STRING_ADD_CHECK_RIGHT | STRING_ADD_CONVERT
+};
+
+std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags);
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/type-info.cc b/src/type-info.cc
index ce0ab6c..fd3a2dc 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -210,19 +210,20 @@
   return AstType::None();
 }
 
-AstType* BinaryOpHintToType(BinaryOperationHint hint) {
+AstType* BinaryOpFeedbackToType(int hint) {
   switch (hint) {
-    case BinaryOperationHint::kNone:
+    case BinaryOperationFeedback::kNone:
       return AstType::None();
-    case BinaryOperationHint::kSignedSmall:
+    case BinaryOperationFeedback::kSignedSmall:
       return AstType::SignedSmall();
-    case BinaryOperationHint::kSigned32:
-      return AstType::Signed32();
-    case BinaryOperationHint::kNumberOrOddball:
+    case BinaryOperationFeedback::kNumber:
       return AstType::Number();
-    case BinaryOperationHint::kString:
+    case BinaryOperationFeedback::kString:
       return AstType::String();
-    case BinaryOperationHint::kAny:
+    case BinaryOperationFeedback::kNumberOrOddball:
+      return AstType::NumberOrOddball();
+    case BinaryOperationFeedback::kAny:
+    default:
       return AstType::Any();
   }
   UNREACHABLE();
@@ -262,14 +263,33 @@
     CompareICStub stub(code->stub_key(), isolate());
     AstType* left_type_from_ic =
         CompareICState::StateToType(zone(), stub.left());
-    *left_type = AstType::Union(*left_type, left_type_from_ic, zone());
     AstType* right_type_from_ic =
         CompareICState::StateToType(zone(), stub.right());
-    *right_type = AstType::Union(*right_type, right_type_from_ic, zone());
     AstType* combined_type_from_ic =
         CompareICState::StateToType(zone(), stub.state(), map);
-    *combined_type =
-        AstType::Union(*combined_type, combined_type_from_ic, zone());
+    // Full-codegen collects lhs and rhs feedback seperately and Crankshaft
+    // could use this information to optimize better. So if combining the
+    // feedback has made the feedback less precise, we should use the feedback
+    // only from Full-codegen. If the union of the feedback from Full-codegen
+    // is same as that of Ignition, there is no need to combine feedback from
+    // from Ignition.
+    AstType* combined_type_from_fcg = AstType::Union(
+        left_type_from_ic,
+        AstType::Union(right_type_from_ic, combined_type_from_ic, zone()),
+        zone());
+    if (combined_type_from_fcg == *left_type) {
+      // Full-codegen collects information about lhs, rhs and result types
+      // seperately. So just retain that information.
+      *left_type = left_type_from_ic;
+      *right_type = right_type_from_ic;
+      *combined_type = combined_type_from_ic;
+    } else {
+      // Combine Ignition and Full-codegen feedbacks.
+      *left_type = AstType::Union(*left_type, left_type_from_ic, zone());
+      *right_type = AstType::Union(*right_type, right_type_from_ic, zone());
+      *combined_type =
+          AstType::Union(*combined_type, combined_type_from_ic, zone());
+    }
   }
 }
 
@@ -299,7 +319,7 @@
   DCHECK(!slot.IsInvalid());
   BinaryOpICNexus nexus(feedback_vector_, slot);
   *left = *right = *result =
-      BinaryOpHintToType(nexus.GetBinaryOperationFeedback());
+      BinaryOpFeedbackToType(Smi::cast(nexus.GetFeedback())->value());
   *fixed_right_arg = Nothing<int>();
   *allocation_site = Handle<AllocationSite>::null();
 
@@ -311,9 +331,29 @@
   BinaryOpICState state(isolate(), code->extra_ic_state());
   DCHECK_EQ(op, state.op());
 
-  *left = AstType::Union(*left, state.GetLeftType(), zone());
-  *right = AstType::Union(*right, state.GetRightType(), zone());
-  *result = AstType::Union(*result, state.GetResultType(), zone());
+  // Full-codegen collects lhs and rhs feedback seperately and Crankshaft
+  // could use this information to optimize better. So if combining the
+  // feedback has made the feedback less precise, we should use the feedback
+  // only from Full-codegen. If the union of the feedback from Full-codegen
+  // is same as that of Ignition, there is no need to combine feedback from
+  // from Ignition.
+  AstType* combined_type_from_fcg = AstType::Union(
+      state.GetLeftType(),
+      AstType::Union(state.GetRightType(), state.GetResultType(), zone()),
+      zone());
+  if (combined_type_from_fcg == *left) {
+    // Full-codegen collects information about lhs, rhs and result types
+    // seperately. So just retain that information.
+    *left = state.GetLeftType();
+    *right = state.GetRightType();
+    *result = state.GetResultType();
+  } else {
+    // Combine Ignition and Full-codegen feedback.
+    *left = AstType::Union(*left, state.GetLeftType(), zone());
+    *right = AstType::Union(*right, state.GetRightType(), zone());
+    *result = AstType::Union(*result, state.GetResultType(), zone());
+  }
+  // Ignition does not collect this feedback.
   *fixed_right_arg = state.fixed_right_arg();
 
   AllocationSite* first_allocation_site = code->FindFirstAllocationSite();
@@ -334,7 +374,8 @@
 
   DCHECK(!slot.IsInvalid());
   BinaryOpICNexus nexus(feedback_vector_, slot);
-  AstType* type = BinaryOpHintToType(nexus.GetBinaryOperationFeedback());
+  AstType* type =
+      BinaryOpFeedbackToType(Smi::cast(nexus.GetFeedback())->value());
 
   if (!object->IsCode()) return type;
 
diff --git a/src/unicode.cc b/src/unicode.cc
index 015f8a2..83d4a03 100644
--- a/src/unicode.cc
+++ b/src/unicode.cc
@@ -319,7 +319,7 @@
     } else {
       // Otherwise, process the previous byte and save the next byte for next
       // time.
-      DCHECK_EQ(0, *buffer);
+      DCHECK_EQ(0u, *buffer);
       *buffer = next;
       return t;
     }
diff --git a/src/unicode.h b/src/unicode.h
index 1299a8f..1b98a47 100644
--- a/src/unicode.h
+++ b/src/unicode.h
@@ -178,16 +178,16 @@
 struct Letter {
   static bool Is(uchar c);
 };
-struct ID_Start {
+struct V8_EXPORT_PRIVATE ID_Start {
   static bool Is(uchar c);
 };
-struct ID_Continue {
+struct V8_EXPORT_PRIVATE ID_Continue {
   static bool Is(uchar c);
 };
-struct WhiteSpace {
+struct V8_EXPORT_PRIVATE WhiteSpace {
   static bool Is(uchar c);
 };
-struct LineTerminator {
+struct V8_EXPORT_PRIVATE LineTerminator {
   static bool Is(uchar c);
 };
 struct ToLowercase {
diff --git a/src/uri.cc b/src/uri.cc
index de7bd9b..14e2214 100644
--- a/src/uri.cc
+++ b/src/uri.cc
@@ -51,7 +51,7 @@
     return false;
   }
 
-  if (value <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+  if (value <= static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
     buffer->Add(value);
   } else {
     buffer->Add(unibrow::Utf16::LeadSurrogate(value));
diff --git a/src/utils.h b/src/utils.h
index 314ea9b..bd5589c 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -922,7 +922,7 @@
   bool operator==(const BailoutId& other) const { return id_ == other.id_; }
   bool operator!=(const BailoutId& other) const { return id_ != other.id_; }
   friend size_t hash_value(BailoutId);
-  friend std::ostream& operator<<(std::ostream&, BailoutId);
+  V8_EXPORT_PRIVATE friend std::ostream& operator<<(std::ostream&, BailoutId);
 
  private:
   static const int kNoneId = -1;
@@ -964,7 +964,7 @@
 // I/O support.
 
 // Our version of printf().
-void PRINTF_FORMAT(1, 2) PrintF(const char* format, ...);
+V8_EXPORT_PRIVATE void PRINTF_FORMAT(1, 2) PrintF(const char* format, ...);
 void PRINTF_FORMAT(2, 3) PrintF(FILE* out, const char* format, ...);
 
 // Prepends the current process ID to the output.
@@ -1150,9 +1150,9 @@
 // Simple support to read a file into a 0-terminated C-string.
 // The returned buffer must be freed by the caller.
 // On return, *exits tells whether the file existed.
-Vector<const char> ReadFile(const char* filename,
-                            bool* exists,
-                            bool verbose = true);
+V8_EXPORT_PRIVATE Vector<const char> ReadFile(const char* filename,
+                                              bool* exists,
+                                              bool verbose = true);
 Vector<const char> ReadFile(FILE* file,
                             bool* exists,
                             bool verbose = true);
@@ -1597,6 +1597,86 @@
   }
 #endif  // V8_TARGET_LITTLE_ENDIAN
 }
+
+// Represents a linked list that threads through the nodes in the linked list.
+// Entries in the list are pointers to nodes. The nodes need to have a T**
+// next() method that returns the location where the next value is stored.
+template <typename T>
+class ThreadedList final {
+ public:
+  ThreadedList() : head_(nullptr), tail_(&head_) {}
+  void Add(T* v) {
+    DCHECK_NULL(*tail_);
+    DCHECK_NULL(*v->next());
+    *tail_ = v;
+    tail_ = v->next();
+  }
+
+  void Clear() {
+    head_ = nullptr;
+    tail_ = &head_;
+  }
+
+  class Iterator final {
+   public:
+    Iterator& operator++() {
+      entry_ = (*entry_)->next();
+      return *this;
+    }
+    bool operator!=(const Iterator& other) { return entry_ != other.entry_; }
+    T* operator*() { return *entry_; }
+    Iterator& operator=(T* entry) {
+      T* next = *(*entry_)->next();
+      *entry->next() = next;
+      *entry_ = entry;
+      return *this;
+    }
+
+   private:
+    explicit Iterator(T** entry) : entry_(entry) {}
+
+    T** entry_;
+
+    friend class ThreadedList;
+  };
+
+  Iterator begin() { return Iterator(&head_); }
+  Iterator end() { return Iterator(tail_); }
+
+  void Rewind(Iterator reset_point) {
+    tail_ = reset_point.entry_;
+    *tail_ = nullptr;
+  }
+
+  void MoveTail(ThreadedList<T>* parent, Iterator location) {
+    if (parent->end() != location) {
+      DCHECK_NULL(*tail_);
+      *tail_ = *location;
+      tail_ = parent->tail_;
+      parent->Rewind(location);
+    }
+  }
+
+  bool is_empty() const { return head_ == nullptr; }
+
+  // Slow. For testing purposes.
+  int LengthForTest() {
+    int result = 0;
+    for (Iterator t = begin(); t != end(); ++t) ++result;
+    return result;
+  }
+  T* AtForTest(int i) {
+    Iterator t = begin();
+    while (i-- > 0) ++t;
+    return *t;
+  }
+
+ private:
+  T* head_;
+  T** tail_;
+  DISALLOW_COPY_AND_ASSIGN(ThreadedList);
+};
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/v8.cc b/src/v8.cc
index 08796f3..7f0230a 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -20,7 +20,7 @@
 #include "src/runtime-profiler.h"
 #include "src/snapshot/natives.h"
 #include "src/snapshot/snapshot.h"
-
+#include "src/tracing/tracing-category-observer.h"
 
 namespace v8 {
 namespace internal {
@@ -94,11 +94,13 @@
   CHECK(!platform_);
   CHECK(platform);
   platform_ = platform;
+  v8::tracing::TracingCategoryObserver::SetUp();
 }
 
 
 void V8::ShutdownPlatform() {
   CHECK(platform_);
+  v8::tracing::TracingCategoryObserver::TearDown();
   platform_ = NULL;
 }
 
diff --git a/src/v8.gyp b/src/v8.gyp
index 9a38247..020ec09 100644
--- a/src/v8.gyp
+++ b/src/v8.gyp
@@ -37,6 +37,7 @@
     'v8_enable_inspector%': 0,
     'mksnapshot_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
     'mkpeephole_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mkpeephole<(EXECUTABLE_SUFFIX)',
+    'v8_os_page_size%': 0,
   },
   'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi', 'inspector/inspector.gypi'],
   'targets': [
@@ -68,16 +69,6 @@
               'USING_V8_SHARED',
             ],
           },
-          'target_conditions': [
-            ['OS=="android" and _toolset=="target"', {
-              'libraries': [
-                '-llog',
-              ],
-              'include_dirs': [
-                'src/common/android/include',
-              ],
-            }],
-          ],
           'conditions': [
             ['OS=="mac"', {
               'xcode_settings': {
@@ -316,6 +307,9 @@
                   ['v8_vector_stores!=0', {
                     'mksnapshot_flags': ['--vector-stores'],
                   }],
+                  ['v8_os_page_size!=0', {
+                    'mksnapshot_flags': ['--v8_os_page_size', '<(v8_os_page_size)'],
+                  }],
                 ],
               },
               'conditions': [
@@ -433,6 +427,8 @@
         'asmjs/asm-types.h',
         'asmjs/asm-wasm-builder.cc',
         'asmjs/asm-wasm-builder.h',
+        'asmjs/switch-logic.h',
+        'asmjs/switch-logic.cc',
         'assembler.cc',
         'assembler.h',
         'assert-scope.h',
@@ -500,6 +496,7 @@
         'builtins/builtins-math.cc',
         'builtins/builtins-number.cc',
         'builtins/builtins-object.cc',
+        'builtins/builtins-promise.cc',
         'builtins/builtins-proxy.cc',
         'builtins/builtins-reflect.cc',
         'builtins/builtins-regexp.cc',
@@ -709,14 +706,16 @@
         'compiler/scheduler.h',
         'compiler/select-lowering.cc',
         'compiler/select-lowering.h',
+        'compiler/simd-scalar-lowering.cc',
+        'compiler/simd-scalar-lowering.h',
         'compiler/simplified-lowering.cc',
         'compiler/simplified-lowering.h',
         'compiler/simplified-operator-reducer.cc',
         'compiler/simplified-operator-reducer.h',
         'compiler/simplified-operator.cc',
         'compiler/simplified-operator.h',
-        'compiler/source-position.cc',
-        'compiler/source-position.h',
+        'compiler/compiler-source-position-table.cc',
+        'compiler/compiler-source-position-table.h',
         'compiler/state-values-utils.cc',
         'compiler/state-values-utils.h',
         'compiler/store-store-elimination.cc',
@@ -741,10 +740,12 @@
         'compiler/wasm-compiler.cc',
         'compiler/wasm-compiler.h',
         'compiler/wasm-linkage.cc',
-        'compiler/zone-pool.cc',
-        'compiler/zone-pool.h',
+        'compiler/zone-stats.cc',
+        'compiler/zone-stats.h',
         'compiler-dispatcher/compiler-dispatcher-job.cc',
         'compiler-dispatcher/compiler-dispatcher-job.h',
+        'compiler-dispatcher/compiler-dispatcher-tracer.cc',
+        'compiler-dispatcher/compiler-dispatcher-tracer.h',
         'compiler-dispatcher/optimizing-compile-dispatcher.cc',
         'compiler-dispatcher/optimizing-compile-dispatcher.h',
         'compiler.cc',
@@ -788,8 +789,6 @@
         'crankshaft/hydrogen-instructions.h',
         'crankshaft/hydrogen-load-elimination.cc',
         'crankshaft/hydrogen-load-elimination.h',
-        'crankshaft/hydrogen-mark-deoptimize.cc',
-        'crankshaft/hydrogen-mark-deoptimize.h',
         'crankshaft/hydrogen-mark-unreachable.cc',
         'crankshaft/hydrogen-mark-unreachable.h',
         'crankshaft/hydrogen-osr.cc',
@@ -830,6 +829,7 @@
         'dateparser.h',
         'debug/debug-evaluate.cc',
         'debug/debug-evaluate.h',
+        'debug/debug-interface.h',
         'debug/debug-frames.cc',
         'debug/debug-frames.h',
         'debug/debug-scopes.cc',
@@ -933,7 +933,6 @@
         'heap/objects-visiting.cc',
         'heap/objects-visiting.h',
         'heap/page-parallel-job.h',
-        'heap/remembered-set.cc',
         'heap/remembered-set.h',
         'heap/scavenge-job.h',
         'heap/scavenge-job.cc',
@@ -950,12 +949,14 @@
         'i18n.h',
         'icu_util.cc',
         'icu_util.h',
+        'ic/access-compiler-data.h',
         'ic/access-compiler.cc',
         'ic/access-compiler.h',
         'ic/call-optimization.cc',
         'ic/call-optimization.h',
         'ic/handler-compiler.cc',
         'ic/handler-compiler.h',
+        'ic/handler-configuration-inl.h',
         'ic/handler-configuration.h',
         'ic/ic-inl.h',
         'ic/ic-state.cc',
@@ -964,6 +965,8 @@
         'ic/ic.h',
         'ic/ic-compiler.cc',
         'ic/ic-compiler.h',
+        'ic/keyed-store-generic.cc',
+        'ic/keyed-store-generic.h',
         'identity-map.cc',
         'identity-map.h',
         'interface-descriptors.cc',
@@ -1109,6 +1112,8 @@
         'profiler/tracing-cpu-profiler.h',
         'profiler/unbound-queue-inl.h',
         'profiler/unbound-queue.h',
+        'promise-utils.h',
+        'promise-utils.cc',
         'property-descriptor.cc',
         'property-descriptor.h',
         'property-details.h',
@@ -1134,6 +1139,8 @@
         'regexp/regexp-parser.h',
         'regexp/regexp-stack.cc',
         'regexp/regexp-stack.h',
+        'regexp/regexp-utils.cc',
+        'regexp/regexp-utils.h',
         'register-configuration.cc',
         'register-configuration.h',
         'runtime-profiler.cc',
@@ -1156,9 +1163,11 @@
         'runtime/runtime-literals.cc',
         'runtime/runtime-liveedit.cc',
         'runtime/runtime-maths.cc',
+        'runtime/runtime-module.cc',
         'runtime/runtime-numbers.cc',
         'runtime/runtime-object.cc',
         'runtime/runtime-operators.cc',
+        'runtime/runtime-promise.cc',
         'runtime/runtime-proxy.cc',
         'runtime/runtime-regexp.cc',
         'runtime/runtime-scopes.cc',
@@ -1196,6 +1205,7 @@
         'snapshot/startup-serializer.h',
         'source-position-table.cc',
         'source-position-table.h',
+        'source-position.cc',
         'source-position.h',
         'splay-tree.h',
         'splay-tree-inl.h',
@@ -1212,6 +1222,10 @@
         'ic/stub-cache.h',
         'tracing/trace-event.cc',
         'tracing/trace-event.h',
+        'tracing/traced-value.cc',
+        'tracing/traced-value.h',
+        'tracing/tracing-category-observer.cc',
+        'tracing/tracing-category-observer.h',
         'transitions-inl.h',
         'transitions.cc',
         'transitions.h',
@@ -1250,16 +1264,14 @@
         'wasm/ast-decoder.h',
         'wasm/decoder.h',
         'wasm/leb-helper.h',
+        'wasm/managed.h',
         'wasm/module-decoder.cc',
         'wasm/module-decoder.h',
-        'wasm/switch-logic.h',
-        'wasm/switch-logic.cc',
+        'wasm/signature-map.cc',
+        'wasm/signature-map.h',
         'wasm/wasm-debug.cc',
-        'wasm/wasm-debug.h',
         'wasm/wasm-external-refs.cc',
         'wasm/wasm-external-refs.h',
-        'wasm/wasm-function-name-table.cc',
-        'wasm/wasm-function-name-table.h',
         'wasm/wasm-js.cc',
         'wasm/wasm-js.h',
         'wasm/wasm-macro-gen.h',
@@ -1269,6 +1281,8 @@
         'wasm/wasm-module-builder.h',
         'wasm/wasm-interpreter.cc',
         'wasm/wasm-interpreter.h',
+        'wasm/wasm-objects.cc',
+        'wasm/wasm-objects.h',
         'wasm/wasm-opcodes.cc',
         'wasm/wasm-opcodes.h',
         'wasm/wasm-result.cc',
@@ -1279,6 +1293,9 @@
         'zone/zone-segment.h',
         'zone/zone.cc',
         'zone/zone.h',
+        'zone/zone-chunk-list.h',
+        'zone/zone-segment.cc',
+        'zone/zone-segment.h',
         'zone/zone-allocator.h',
         'zone/zone-containers.h',
       ],
@@ -1763,7 +1780,7 @@
     },
     {
       'target_name': 'v8_libbase',
-      'type': 'static_library',
+      'type': '<(component)',
       'variables': {
         'optimize': 'max',
       },
@@ -1774,18 +1791,10 @@
         'base/adapters.h',
         'base/atomic-utils.h',
         'base/atomicops.h',
-        'base/atomicops_internals_arm64_gcc.h',
-        'base/atomicops_internals_arm_gcc.h',
         'base/atomicops_internals_atomicword_compat.h',
-        'base/atomicops_internals_mac.h',
-        'base/atomicops_internals_mips_gcc.h',
-        'base/atomicops_internals_mips64_gcc.h',
-        'base/atomicops_internals_ppc_gcc.h',
-        'base/atomicops_internals_s390_gcc.h',
-        'base/atomicops_internals_tsan.h',
-        'base/atomicops_internals_x86_gcc.cc',
-        'base/atomicops_internals_x86_gcc.h',
+        'base/atomicops_internals_portable.h',
         'base/atomicops_internals_x86_msvc.h',
+        'base/base-export.h',
         'base/bits.cc',
         'base/bits.h',
         'base/build_config.h',
@@ -1824,6 +1833,7 @@
         'base/platform/platform.h',
         'base/platform/semaphore.cc',
         'base/platform/semaphore.h',
+        'base/ring-buffer.h',
         'base/safe_conversions.h',
         'base/safe_conversions_impl.h',
         'base/safe_math.h',
@@ -1833,6 +1843,16 @@
         'base/utils/random-number-generator.cc',
         'base/utils/random-number-generator.h',
       ],
+      'target_conditions': [
+        ['OS=="android" and _toolset=="target"', {
+          'libraries': [
+            '-llog',
+          ],
+          'include_dirs': [
+            'src/common/android/include',
+          ],
+        }],
+      ],
       'conditions': [
         ['want_separate_host_toolset==1 or \
           want_separate_host_toolset_mkpeephole==1', {
@@ -1840,6 +1860,16 @@
         }, {
           'toolsets': ['target'],
         }],
+        ['component=="shared_library"', {
+          'defines': [
+            'BUILDING_V8_BASE_SHARED',
+          ],
+          'direct_dependent_settings': {
+            'defines': [
+              'USING_V8_BASE_SHARED',
+            ],
+          },
+        }],
         ['OS=="linux"', {
             'link_settings': {
               'libraries': [
@@ -2047,7 +2077,7 @@
     },
     {
       'target_name': 'v8_libplatform',
-      'type': 'static_library',
+      'type': '<(component)',
       'variables': {
         'optimize': 'max',
       },
@@ -2061,6 +2091,7 @@
       ],
       'sources': [
         '../include/libplatform/libplatform.h',
+        '../include/libplatform/libplatform-export.h',
         '../include/libplatform/v8-tracing.h',
         'libplatform/default-platform.cc',
         'libplatform/default-platform.h',
@@ -2082,6 +2113,12 @@
         }, {
           'toolsets': ['target'],
         }],
+        ['component=="shared_library"', {
+          'direct_dependent_settings': {
+            'defines': [ 'USING_V8_PLATFORM_SHARED' ],
+          },
+          'defines': [ 'BUILDING_V8_PLATFORM_SHARED' ],
+        }]
       ],
       'direct_dependent_settings': {
         'include_dirs': [
@@ -2197,8 +2234,6 @@
           'js/symbol.js',
           'js/array.js',
           'js/string.js',
-          'js/math.js',
-          'js/regexp.js',
           'js/arraybuffer.js',
           'js/typedarray.js',
           'js/collection.js',
@@ -2206,7 +2241,6 @@
           'js/collection-iterator.js',
           'js/promise.js',
           'js/messages.js',
-          'js/array-iterator.js',
           'js/templates.js',
           'js/spread.js',
           'js/proxy.js',
@@ -2400,7 +2434,12 @@
     {
       'target_name': 'mksnapshot',
       'type': 'executable',
-      'dependencies': ['v8_base', 'v8_nosnapshot', 'v8_libplatform'],
+      'dependencies': [
+        'v8_base',
+        'v8_libbase',
+        'v8_nosnapshot',
+        'v8_libplatform'
+      ],
       'include_dirs+': [
         '..',
         '<(DEPTH)',
diff --git a/src/v8.h b/src/v8.h
index a1b18b2..e49cb00 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -7,6 +7,7 @@
 
 #include "include/v8.h"
 #include "src/allocation.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -25,7 +26,7 @@
 
   static void InitializePlatform(v8::Platform* platform);
   static void ShutdownPlatform();
-  static v8::Platform* GetCurrentPlatform();
+  V8_EXPORT_PRIVATE static v8::Platform* GetCurrentPlatform();
   // Replaces the current platform with the given platform.
   // Should be used only for testing.
   static void SetPlatformForTesting(v8::Platform* platform);
diff --git a/src/value-serializer.cc b/src/value-serializer.cc
index 1d2e36d..c6abb8a 100644
--- a/src/value-serializer.cc
+++ b/src/value-serializer.cc
@@ -9,11 +9,16 @@
 #include "src/base/logging.h"
 #include "src/conversions.h"
 #include "src/factory.h"
+#include "src/flags.h"
 #include "src/handles-inl.h"
 #include "src/isolate.h"
 #include "src/objects-inl.h"
 #include "src/objects.h"
+#include "src/snapshot/code-serializer.h"
 #include "src/transitions.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-result.h"
 
 namespace v8 {
 namespace internal {
@@ -107,6 +112,11 @@
   kArrayBufferView = 'V',
   // Shared array buffer (transferred). transferID:uint32_t
   kSharedArrayBufferTransfer = 'u',
+  // Compiled WebAssembly module. encodingType:(one-byte tag).
+  // If encodingType == 'y' (raw bytes):
+  //  wasmWireByteLength:uint32_t, then raw data
+  //  compiledDataLength:uint32_t, then raw data
+  kWasmModule = 'W',
 };
 
 namespace {
@@ -124,17 +134,29 @@
   kDataView = '?',
 };
 
+enum class WasmEncodingTag : uint8_t {
+  kRawBytes = 'y',
+};
+
 }  // namespace
 
 ValueSerializer::ValueSerializer(Isolate* isolate,
                                  v8::ValueSerializer::Delegate* delegate)
     : isolate_(isolate),
       delegate_(delegate),
-      zone_(isolate->allocator()),
+      zone_(isolate->allocator(), ZONE_NAME),
       id_map_(isolate->heap(), &zone_),
       array_buffer_transfer_map_(isolate->heap(), &zone_) {}
 
-ValueSerializer::~ValueSerializer() {}
+ValueSerializer::~ValueSerializer() {
+  if (buffer_) {
+    if (delegate_) {
+      delegate_->FreeBufferMemory(buffer_);
+    } else {
+      free(buffer_);
+    }
+  }
+}
 
 void ValueSerializer::WriteHeader() {
   WriteTag(SerializationTag::kVersion);
@@ -142,7 +164,8 @@
 }
 
 void ValueSerializer::WriteTag(SerializationTag tag) {
-  buffer_.push_back(static_cast<uint8_t>(tag));
+  uint8_t raw_tag = static_cast<uint8_t>(tag);
+  WriteRawBytes(&raw_tag, sizeof(raw_tag));
 }
 
 template <typename T>
@@ -161,7 +184,7 @@
     value >>= 7;
   } while (value);
   *(next_byte - 1) &= 0x7f;
-  buffer_.insert(buffer_.end(), stack_buffer, next_byte);
+  WriteRawBytes(stack_buffer, next_byte - stack_buffer);
 }
 
 template <typename T>
@@ -179,34 +202,50 @@
 
 void ValueSerializer::WriteDouble(double value) {
   // Warning: this uses host endianness.
-  buffer_.insert(buffer_.end(), reinterpret_cast<const uint8_t*>(&value),
-                 reinterpret_cast<const uint8_t*>(&value + 1));
+  WriteRawBytes(&value, sizeof(value));
 }
 
 void ValueSerializer::WriteOneByteString(Vector<const uint8_t> chars) {
   WriteVarint<uint32_t>(chars.length());
-  buffer_.insert(buffer_.end(), chars.begin(), chars.end());
+  WriteRawBytes(chars.begin(), chars.length() * sizeof(uint8_t));
 }
 
 void ValueSerializer::WriteTwoByteString(Vector<const uc16> chars) {
   // Warning: this uses host endianness.
   WriteVarint<uint32_t>(chars.length() * sizeof(uc16));
-  buffer_.insert(buffer_.end(), reinterpret_cast<const uint8_t*>(chars.begin()),
-                 reinterpret_cast<const uint8_t*>(chars.end()));
+  WriteRawBytes(chars.begin(), chars.length() * sizeof(uc16));
 }
 
 void ValueSerializer::WriteRawBytes(const void* source, size_t length) {
-  const uint8_t* begin = reinterpret_cast<const uint8_t*>(source);
-  buffer_.insert(buffer_.end(), begin, begin + length);
+  memcpy(ReserveRawBytes(length), source, length);
 }
 
 uint8_t* ValueSerializer::ReserveRawBytes(size_t bytes) {
-  if (!bytes) return nullptr;
-  auto old_size = buffer_.size();
-  buffer_.resize(buffer_.size() + bytes);
+  size_t old_size = buffer_size_;
+  size_t new_size = old_size + bytes;
+  if (new_size > buffer_capacity_) ExpandBuffer(new_size);
+  buffer_size_ = new_size;
   return &buffer_[old_size];
 }
 
+void ValueSerializer::ExpandBuffer(size_t required_capacity) {
+  DCHECK_GT(required_capacity, buffer_capacity_);
+  size_t requested_capacity =
+      std::max(required_capacity, buffer_capacity_ * 2) + 64;
+  size_t provided_capacity = 0;
+  void* new_buffer = nullptr;
+  if (delegate_) {
+    new_buffer = delegate_->ReallocateBufferMemory(buffer_, requested_capacity,
+                                                   &provided_capacity);
+  } else {
+    new_buffer = realloc(buffer_, requested_capacity);
+    provided_capacity = requested_capacity;
+  }
+  DCHECK_GE(provided_capacity, requested_capacity);
+  buffer_ = reinterpret_cast<uint8_t*>(new_buffer);
+  buffer_capacity_ = provided_capacity;
+}
+
 void ValueSerializer::WriteUint32(uint32_t value) {
   WriteVarint<uint32_t>(value);
 }
@@ -215,6 +254,18 @@
   WriteVarint<uint64_t>(value);
 }
 
+std::vector<uint8_t> ValueSerializer::ReleaseBuffer() {
+  return std::vector<uint8_t>(buffer_, buffer_ + buffer_size_);
+}
+
+std::pair<uint8_t*, size_t> ValueSerializer::Release() {
+  auto result = std::make_pair(buffer_, buffer_size_);
+  buffer_ = nullptr;
+  buffer_size_ = 0;
+  buffer_capacity_ = 0;
+  return result;
+}
+
 void ValueSerializer::TransferArrayBuffer(uint32_t transfer_id,
                                           Handle<JSArrayBuffer> array_buffer) {
   DCHECK(!array_buffer_transfer_map_.Find(array_buffer));
@@ -325,7 +376,7 @@
     Vector<const uc16> chars = flat.ToUC16Vector();
     uint32_t byte_length = chars.length() * sizeof(uc16);
     // The existing reading code expects 16-byte strings to be aligned.
-    if ((buffer_.size() + 1 + BytesNeededForVarint(byte_length)) & 1)
+    if ((buffer_size_ + 1 + BytesNeededForVarint(byte_length)) & 1)
       WriteTag(SerializationTag::kPadding);
     WriteTag(SerializationTag::kTwoByteString);
     WriteTwoByteString(chars);
@@ -365,8 +416,16 @@
     case JS_OBJECT_TYPE:
     case JS_API_OBJECT_TYPE: {
       Handle<JSObject> js_object = Handle<JSObject>::cast(receiver);
-      return js_object->GetInternalFieldCount() ? WriteHostObject(js_object)
-                                                : WriteJSObject(js_object);
+      Map* map = js_object->map();
+      if (FLAG_expose_wasm &&
+          map->GetConstructor() ==
+              isolate_->native_context()->wasm_module_constructor()) {
+        return WriteWasmModule(js_object);
+      } else if (JSObject::GetInternalFieldCount(map)) {
+        return WriteHostObject(js_object);
+      } else {
+        return WriteJSObject(js_object);
+      }
     }
     case JS_SPECIAL_API_OBJECT_TYPE:
       return WriteHostObject(Handle<JSObject>::cast(receiver));
@@ -470,6 +529,8 @@
       array->HasFastElements() && !array->HasFastHoleyElements();
 
   if (should_serialize_densely) {
+    DCHECK_LE(length, static_cast<uint32_t>(FixedArray::kMaxLength));
+
     // TODO(jbroman): Distinguish between undefined and a hole (this can happen
     // if serializing one of the elements deletes another). This requires wire
     // format changes.
@@ -666,7 +727,6 @@
 Maybe<bool> ValueSerializer::WriteJSArrayBuffer(JSArrayBuffer* array_buffer) {
   uint32_t* transfer_entry = array_buffer_transfer_map_.Find(array_buffer);
   if (transfer_entry) {
-    DCHECK(array_buffer->was_neutered() || array_buffer->is_shared());
     WriteTag(array_buffer->is_shared()
                  ? SerializationTag::kSharedArrayBufferTransfer
                  : SerializationTag::kArrayBufferTransfer);
@@ -716,6 +776,29 @@
   return Just(true);
 }
 
+Maybe<bool> ValueSerializer::WriteWasmModule(Handle<JSObject> object) {
+  Handle<WasmCompiledModule> compiled_part(
+      WasmCompiledModule::cast(object->GetInternalField(0)), isolate_);
+  WasmEncodingTag encoding_tag = WasmEncodingTag::kRawBytes;
+  WriteTag(SerializationTag::kWasmModule);
+  WriteRawBytes(&encoding_tag, sizeof(encoding_tag));
+
+  Handle<String> wire_bytes = compiled_part->module_bytes();
+  int wire_bytes_length = wire_bytes->length();
+  WriteVarint<uint32_t>(wire_bytes_length);
+  uint8_t* destination = ReserveRawBytes(wire_bytes_length);
+  String::WriteToFlat(*wire_bytes, destination, 0, wire_bytes_length);
+
+  std::unique_ptr<ScriptData> script_data =
+      WasmCompiledModuleSerializer::SerializeWasmModule(isolate_,
+                                                        compiled_part);
+  int script_data_length = script_data->length();
+  WriteVarint<uint32_t>(script_data_length);
+  WriteRawBytes(script_data->data(), script_data_length);
+
+  return Just(true);
+}
+
 Maybe<bool> ValueSerializer::WriteHostObject(Handle<JSObject> object) {
   if (!delegate_) {
     isolate_->Throw(*isolate_->factory()->NewError(
@@ -1026,6 +1109,8 @@
       const bool is_shared = true;
       return ReadTransferredJSArrayBuffer(is_shared);
     }
+    case SerializationTag::kWasmModule:
+      return ReadWasmModule();
     default:
       // TODO(jbroman): Introduce an explicit tag for host objects to avoid
       // having to treat every unknown tag as a potential host object.
@@ -1092,7 +1177,7 @@
   // is successfully consumed.
   if (tag == SerializationTag::kUtf8String && flat.IsOneByte()) {
     Vector<const uint8_t> chars = flat.ToOneByteVector();
-    if (byte_length == chars.length() &&
+    if (byte_length == static_cast<size_t>(chars.length()) &&
         String::IsAscii(chars.begin(), chars.length()) &&
         memcmp(bytes.begin(), chars.begin(), byte_length) == 0) {
       return true;
@@ -1165,8 +1250,15 @@
   // If we are at the end of the stack, abort. This function may recurse.
   STACK_CHECK(isolate_, MaybeHandle<JSArray>());
 
+  // We shouldn't permit an array larger than the biggest we can request from
+  // V8. As an additional sanity check, since each entry will take at least one
+  // byte to encode, if there are fewer bytes than that we can also fail fast.
   uint32_t length;
-  if (!ReadVarint<uint32_t>().To(&length)) return MaybeHandle<JSArray>();
+  if (!ReadVarint<uint32_t>().To(&length) ||
+      length > static_cast<uint32_t>(FixedArray::kMaxLength) ||
+      length > static_cast<size_t>(end_ - position_)) {
+    return MaybeHandle<JSArray>();
+  }
 
   uint32_t id = next_id_++;
   HandleScope scope(isolate_);
@@ -1410,7 +1502,8 @@
       TYPED_ARRAYS(TYPED_ARRAY_CASE)
 #undef TYPED_ARRAY_CASE
   }
-  if (byte_offset % element_size != 0 || byte_length % element_size != 0) {
+  if (element_size == 0 || byte_offset % element_size != 0 ||
+      byte_length % element_size != 0) {
     return MaybeHandle<JSArrayBufferView>();
   }
   Handle<JSTypedArray> typed_array = isolate_->factory()->NewJSTypedArray(
@@ -1420,6 +1513,51 @@
   return typed_array;
 }
 
+MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
+  if (!FLAG_expose_wasm) return MaybeHandle<JSObject>();
+
+  Vector<const uint8_t> encoding_tag;
+  if (!ReadRawBytes(sizeof(WasmEncodingTag)).To(&encoding_tag) ||
+      encoding_tag[0] != static_cast<uint8_t>(WasmEncodingTag::kRawBytes)) {
+    return MaybeHandle<JSObject>();
+  }
+
+  // Extract the data from the buffer: wasm wire bytes, followed by V8 compiled
+  // script data.
+  static_assert(sizeof(int) <= sizeof(uint32_t),
+                "max int must fit in uint32_t");
+  const uint32_t max_valid_size = std::numeric_limits<int>::max();
+  uint32_t wire_bytes_length = 0;
+  Vector<const uint8_t> wire_bytes;
+  uint32_t compiled_bytes_length = 0;
+  Vector<const uint8_t> compiled_bytes;
+  if (!ReadVarint<uint32_t>().To(&wire_bytes_length) ||
+      wire_bytes_length > max_valid_size ||
+      !ReadRawBytes(wire_bytes_length).To(&wire_bytes) ||
+      !ReadVarint<uint32_t>().To(&compiled_bytes_length) ||
+      compiled_bytes_length > max_valid_size ||
+      !ReadRawBytes(compiled_bytes_length).To(&compiled_bytes)) {
+    return MaybeHandle<JSObject>();
+  }
+
+  // Try to deserialize the compiled module first.
+  ScriptData script_data(compiled_bytes.start(), compiled_bytes.length());
+  Handle<FixedArray> compiled_part;
+  if (WasmCompiledModuleSerializer::DeserializeWasmModule(
+          isolate_, &script_data, wire_bytes)
+          .ToHandle(&compiled_part)) {
+    return WasmModuleObject::New(
+        isolate_, Handle<WasmCompiledModule>::cast(compiled_part));
+  }
+
+  // If that fails, recompile.
+  wasm::ErrorThrower thrower(isolate_, "ValueDeserializer::ReadWasmModule");
+  return wasm::CreateModuleObjectFromBytes(
+      isolate_, wire_bytes.begin(), wire_bytes.end(), &thrower,
+      wasm::ModuleOrigin::kWasmOrigin, Handle<Script>::null(), nullptr,
+      nullptr);
+}
+
 MaybeHandle<JSObject> ValueDeserializer::ReadHostObject() {
   if (!delegate_) return MaybeHandle<JSObject>();
   STACK_CHECK(isolate_, MaybeHandle<JSObject>());
@@ -1629,7 +1767,7 @@
 
 MaybeHandle<Object>
 ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() {
-  DCHECK_EQ(version_, 0);
+  DCHECK_EQ(version_, 0u);
   HandleScope scope(isolate_);
   std::vector<Handle<Object>> stack;
   while (position_ < end_) {
@@ -1646,6 +1784,8 @@
         uint32_t num_properties;
         if (!ReadVarint<uint32_t>().To(&num_properties) ||
             stack.size() / 2 < num_properties) {
+          isolate_->Throw(*isolate_->factory()->NewError(
+              MessageTemplate::kDataCloneDeserializationError));
           return MaybeHandle<Object>();
         }
 
@@ -1657,6 +1797,7 @@
             !SetPropertiesFromKeyValuePairs(
                  isolate_, js_object, &stack[begin_properties], num_properties)
                  .FromMaybe(false)) {
+          DCHECK(isolate_->has_pending_exception());
           return MaybeHandle<Object>();
         }
 
@@ -1673,6 +1814,8 @@
         if (!ReadVarint<uint32_t>().To(&num_properties) ||
             !ReadVarint<uint32_t>().To(&length) ||
             stack.size() / 2 < num_properties) {
+          isolate_->Throw(*isolate_->factory()->NewError(
+              MessageTemplate::kDataCloneDeserializationError));
           return MaybeHandle<Object>();
         }
 
@@ -1685,6 +1828,7 @@
             !SetPropertiesFromKeyValuePairs(
                  isolate_, js_array, &stack[begin_properties], num_properties)
                  .FromMaybe(false)) {
+          DCHECK(isolate_->has_pending_exception());
           return MaybeHandle<Object>();
         }
 
diff --git a/src/value-serializer.h b/src/value-serializer.h
index 27ce0c1..86e21cf 100644
--- a/src/value-serializer.h
+++ b/src/value-serializer.h
@@ -59,7 +59,13 @@
    * Returns the stored data. This serializer should not be used once the buffer
    * is released. The contents are undefined if a previous write has failed.
    */
-  std::vector<uint8_t> ReleaseBuffer() { return std::move(buffer_); }
+  std::vector<uint8_t> ReleaseBuffer();
+
+  /*
+   * Returns the buffer, allocated via the delegate, and its size.
+   * Caller assumes ownership of the buffer.
+   */
+  std::pair<uint8_t*, size_t> Release();
 
   /*
    * Marks an ArrayBuffer as havings its contents transferred out of band.
@@ -79,6 +85,9 @@
   void WriteDouble(double value);
 
  private:
+  // Managing allocations of the internal buffer.
+  void ExpandBuffer(size_t required_capacity);
+
   // Writing the wire format.
   void WriteTag(SerializationTag tag);
   template <typename T>
@@ -105,6 +114,7 @@
   Maybe<bool> WriteJSSet(Handle<JSSet> map) WARN_UNUSED_RESULT;
   Maybe<bool> WriteJSArrayBuffer(JSArrayBuffer* array_buffer);
   Maybe<bool> WriteJSArrayBufferView(JSArrayBufferView* array_buffer);
+  Maybe<bool> WriteWasmModule(Handle<JSObject> object) WARN_UNUSED_RESULT;
   Maybe<bool> WriteHostObject(Handle<JSObject> object) WARN_UNUSED_RESULT;
 
   /*
@@ -125,7 +135,9 @@
 
   Isolate* const isolate_;
   v8::ValueSerializer::Delegate* const delegate_;
-  std::vector<uint8_t> buffer_;
+  uint8_t* buffer_ = nullptr;
+  size_t buffer_size_ = 0;
+  size_t buffer_capacity_ = 0;
   Zone zone_;
 
   // To avoid extra lookups in the identity map, ID+1 is actually stored in the
@@ -230,6 +242,7 @@
       WARN_UNUSED_RESULT;
   MaybeHandle<JSArrayBufferView> ReadJSArrayBufferView(
       Handle<JSArrayBuffer> buffer) WARN_UNUSED_RESULT;
+  MaybeHandle<JSObject> ReadWasmModule() WARN_UNUSED_RESULT;
   MaybeHandle<JSObject> ReadHostObject() WARN_UNUSED_RESULT;
 
   /*
diff --git a/src/vector.h b/src/vector.h
index d120dfc..080f89e 100644
--- a/src/vector.h
+++ b/src/vector.h
@@ -51,7 +51,8 @@
 
   // Access individual vector elements - checks bounds in debug mode.
   T& operator[](int index) const {
-    DCHECK(0 <= index && index < length_);
+    DCHECK_LE(0, index);
+    DCHECK_LT(index, length_);
     return start_[index];
   }
 
diff --git a/src/wasm/ast-decoder.cc b/src/wasm/ast-decoder.cc
index 02d1db5..ff6af34 100644
--- a/src/wasm/ast-decoder.cc
+++ b/src/wasm/ast-decoder.cc
@@ -32,6 +32,9 @@
 #endif
 
 #define CHECK_PROTOTYPE_OPCODE(flag)                   \
+  if (module_ && module_->origin == kAsmJsOrigin) {    \
+    error("Opcode not supported for asmjs modules");   \
+  }                                                    \
   if (!FLAG_##flag) {                                  \
     error("Invalid opcode (enable with --" #flag ")"); \
     break;                                             \
@@ -147,6 +150,16 @@
   (build() ? CheckForException(builder_->func(__VA_ARGS__)) : nullptr)
 #define BUILD0(func) (build() ? CheckForException(builder_->func()) : nullptr)
 
+struct LaneOperand {
+  uint8_t lane;
+  unsigned length;
+
+  inline LaneOperand(Decoder* decoder, const byte* pc) {
+    lane = decoder->checked_read_u8(pc, 2, "lane");
+    length = 1;
+  }
+};
+
 // Generic Wasm bytecode decoder with utilities for decoding operands,
 // lengths, etc.
 class WasmDecoder : public Decoder {
@@ -214,6 +227,11 @@
   }
 
   inline bool Validate(const byte* pc, CallIndirectOperand& operand) {
+    uint32_t table_index = 0;
+    if (!module_->IsValidTable(table_index)) {
+      error("function table has to exist to execute call_indirect");
+      return false;
+    }
     if (Complete(pc, operand)) {
       return true;
     }
@@ -237,8 +255,17 @@
     return true;
   }
 
+  inline bool Validate(const byte* pc, LaneOperand& operand) {
+    if (operand.lane < 0 || operand.lane > 3) {
+      error(pc_, pc_ + 2, "invalid extract lane value");
+      return false;
+    } else {
+      return true;
+    }
+  }
+
   unsigned OpcodeLength(const byte* pc) {
-    switch (static_cast<WasmOpcode>(*pc)) {
+    switch (static_cast<byte>(*pc)) {
 #define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
       FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
       FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
@@ -295,12 +322,39 @@
         ImmI64Operand operand(this, pc);
         return 1 + operand.length;
       }
+      case kExprGrowMemory:
+      case kExprMemorySize: {
+        MemoryIndexOperand operand(this, pc);
+        return 1 + operand.length;
+      }
       case kExprI8Const:
         return 2;
       case kExprF32Const:
         return 5;
       case kExprF64Const:
         return 9;
+      case kSimdPrefix: {
+        byte simd_index = checked_read_u8(pc, 1, "simd_index");
+        WasmOpcode opcode =
+            static_cast<WasmOpcode>(kSimdPrefix << 8 | simd_index);
+        switch (opcode) {
+#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
+          FOREACH_SIMD_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+          {
+            return 2;
+          }
+#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
+          FOREACH_SIMD_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+          {
+            return 3;
+          }
+          default:
+            error("invalid SIMD opcode");
+            return 2;
+        }
+      }
       default:
         return 1;
     }
@@ -500,7 +554,7 @@
       case kAstF64:
         return builder_->Float64Constant(0);
       case kAstS128:
-        return builder_->DefaultS128Value();
+        return builder_->CreateS128Value(0);
       default:
         UNREACHABLE();
         return nullptr;
@@ -520,7 +574,7 @@
 
   // Decodes the locals declarations, if any, populating {local_type_vec_}.
   void DecodeLocalDecls() {
-    DCHECK_EQ(0, local_type_vec_.size());
+    DCHECK_EQ(0u, local_type_vec_.size());
     // Initialize {local_type_vec} from signature.
     if (sig_) {
       local_type_vec_.reserve(sig_->parameter_count());
@@ -681,8 +735,8 @@
             BlockTypeOperand operand(this, pc_);
             SsaEnv* finish_try_env = Steal(ssa_env_);
             // The continue environment is the inner environment.
-            PrepareForLoop(pc_, finish_try_env);
-            SetEnv("loop:start", Split(finish_try_env));
+            SsaEnv* loop_body_env = PrepareForLoop(pc_, finish_try_env);
+            SetEnv("loop:start", loop_body_env);
             ssa_env_->SetNotMerged();
             PushLoop(finish_try_env);
             SetBlockType(&control_.back(), operand);
@@ -695,7 +749,7 @@
             Value cond = Pop(0, kAstI32);
             TFNode* if_true = nullptr;
             TFNode* if_false = nullptr;
-            BUILD(Branch, cond.node, &if_true, &if_false);
+            BUILD(BranchNoHint, cond.node, &if_true, &if_false);
             SsaEnv* end_env = ssa_env_;
             SsaEnv* false_env = Split(ssa_env_);
             false_env->control = if_false;
@@ -746,7 +800,8 @@
               if (c->false_env != nullptr) {
                 // End the true branch of a one-armed if.
                 Goto(c->false_env, c->end_env);
-                if (ssa_env_->go() && stack_.size() != c->stack_depth) {
+                if (ssa_env_->go() &&
+                    static_cast<int>(stack_.size()) != c->stack_depth) {
                   error("end of if expected empty stack");
                   stack_.resize(c->stack_depth);
                 }
@@ -813,7 +868,7 @@
               DCHECK(fval.type != kAstEnd);
               DCHECK(cond.type != kAstEnd);
               TFNode* controls[2];
-              builder_->Branch(cond.node, &controls[0], &controls[1]);
+              builder_->BranchNoHint(cond.node, &controls[0], &controls[1]);
               TFNode* merge = builder_->Merge(2, controls);
               TFNode* vals[2] = {tval.node, fval.node};
               TFNode* phi = builder_->Phi(tval.type, 2, vals, merge);
@@ -840,7 +895,7 @@
               SsaEnv* fenv = ssa_env_;
               SsaEnv* tenv = Split(fenv);
               fenv->SetNotMerged();
-              BUILD(Branch, cond.node, &tenv->control, &fenv->control);
+              BUILD(BranchNoHint, cond.node, &tenv->control, &fenv->control);
               ssa_env_ = tenv;
               BreakTo(operand.depth);
               ssa_env_ = fenv;
@@ -862,7 +917,7 @@
 
                 SsaEnv* copy = Steal(break_env);
                 ssa_env_ = copy;
-                while (iterator.has_next()) {
+                while (ok() && iterator.has_next()) {
                   uint32_t i = iterator.cur_index();
                   const byte* pos = iterator.pc();
                   uint32_t target = iterator.next();
@@ -876,6 +931,7 @@
                                           : BUILD(IfValue, i, sw);
                   BreakTo(target);
                 }
+                if (failed()) break;
               } else {
                 // Only a default target. Do the equivalent of br.
                 const byte* pos = iterator.pc();
@@ -1057,17 +1113,23 @@
           case kExprF64StoreMem:
             len = DecodeStoreMem(kAstF64, MachineType::Float64());
             break;
-          case kExprGrowMemory:
+          case kExprGrowMemory: {
+            MemoryIndexOperand operand(this, pc_);
             if (module_->origin != kAsmJsOrigin) {
               Value val = Pop(0, kAstI32);
               Push(kAstI32, BUILD(GrowMemory, val.node));
             } else {
               error("grow_memory is not supported for asmjs modules");
             }
+            len = 1 + operand.length;
             break;
-          case kExprMemorySize:
+          }
+          case kExprMemorySize: {
+            MemoryIndexOperand operand(this, pc_);
             Push(kAstI32, BUILD(CurrentMemoryPages));
+            len = 1 + operand.length;
             break;
+          }
           case kExprCallFunction: {
             CallFunctionOperand operand(this, pc_);
             if (Validate(pc_, operand)) {
@@ -1095,13 +1157,31 @@
           case kSimdPrefix: {
             CHECK_PROTOTYPE_OPCODE(wasm_simd_prototype);
             len++;
-            byte simd_index = *(pc_ + 1);
+            byte simd_index = checked_read_u8(pc_, 1, "simd index");
             opcode = static_cast<WasmOpcode>(opcode << 8 | simd_index);
             TRACE("  @%-4d #%02x #%02x:%-20s|", startrel(pc_), kSimdPrefix,
                   simd_index, WasmOpcodes::ShortOpcodeName(opcode));
             len += DecodeSimdOpcode(opcode);
             break;
           }
+          case kAtomicPrefix: {
+            if (!module_ || module_->origin != kAsmJsOrigin) {
+              error("Atomics are allowed only in AsmJs modules");
+              break;
+            }
+            if (!FLAG_wasm_atomics_prototype) {
+              error("Invalid opcode (enable with --wasm_atomics_prototype)");
+              break;
+            }
+            len = 2;
+            byte atomic_opcode = checked_read_u8(pc_, 1, "atomic index");
+            opcode = static_cast<WasmOpcode>(opcode << 8 | atomic_opcode);
+            sig = WasmOpcodes::AtomicSignature(opcode);
+            if (sig) {
+              BuildAtomicOperator(opcode);
+            }
+            break;
+          }
           default: {
             // Deal with special asmjs opcodes.
             if (module_ && module_->origin == kAsmJsOrigin) {
@@ -1245,18 +1325,25 @@
     return 1 + operand.length;
   }
 
+  unsigned ExtractLane(WasmOpcode opcode, LocalType type) {
+    LaneOperand operand(this, pc_);
+    if (Validate(pc_, operand)) {
+      TFNode* input = Pop(0, LocalType::kSimd128).node;
+      TFNode* node = BUILD(SimdExtractLane, opcode, operand.lane, input);
+      Push(type, node);
+    }
+    return operand.length;
+  }
+
   unsigned DecodeSimdOpcode(WasmOpcode opcode) {
     unsigned len = 0;
     switch (opcode) {
       case kExprI32x4ExtractLane: {
-        uint8_t lane = this->checked_read_u8(pc_, 2, "lane number");
-        if (lane < 0 || lane > 3) {
-          error(pc_, pc_ + 2, "invalid extract lane value");
-        }
-        TFNode* input = Pop(0, LocalType::kSimd128).node;
-        TFNode* node = BUILD(SimdExtractLane, opcode, lane, input);
-        Push(LocalType::kWord32, node);
-        len++;
+        len = ExtractLane(opcode, LocalType::kWord32);
+        break;
+      }
+      case kExprF32x4ExtractLane: {
+        len = ExtractLane(opcode, LocalType::kFloat32);
         break;
       }
       default: {
@@ -1277,6 +1364,8 @@
     return len;
   }
 
+  void BuildAtomicOperator(WasmOpcode opcode) { UNIMPLEMENTED(); }
+
   void DoReturn() {
     int count = static_cast<int>(sig_->return_count());
     TFNode** buffer = nullptr;
@@ -1347,7 +1436,7 @@
       // Unreachable code is essentially not typechecked.
       return {pc_, nullptr, kAstEnd};
     }
-    if (stack_depth == stack_.size()) {
+    if (stack_depth == static_cast<int>(stack_.size())) {
       Value val = {pc_, nullptr, kAstStmt};
       return val;
     } else {
@@ -1372,8 +1461,7 @@
       Goto(ssa_env_, c->end_env);
     } else {
       // Merge the value(s) into the end of the block.
-      if (static_cast<size_t>(c->stack_depth + c->merge.arity) >
-          stack_.size()) {
+      if (c->stack_depth + c->merge.arity > stack_.size()) {
         error(
             pc_, pc_,
             "expected at least %d values on the stack for br to @%d, found %d",
@@ -1389,7 +1477,7 @@
     if (!ssa_env_->go()) return;
     // Merge the value(s) into the end of the block.
     int arity = static_cast<int>(c->merge.arity);
-    if (c->stack_depth + arity != stack_.size()) {
+    if (c->stack_depth + arity != static_cast<int>(stack_.size())) {
       error(pc_, pc_, "expected %d elements on the stack for fallthru to @%d",
             arity, startrel(c->pc));
       return;
@@ -1405,7 +1493,7 @@
     if (!ssa_env_->go()) return;
     // Fallthru must match arity exactly.
     int arity = static_cast<int>(c->merge.arity);
-    if (c->stack_depth + arity != stack_.size()) {
+    if (c->stack_depth + arity != static_cast<int>(stack_.size())) {
       error(pc_, pc_, "expected %d elements on the stack for fallthru to @%d",
             arity, startrel(c->pc));
       return;
@@ -1437,9 +1525,13 @@
               WasmOpcodes::TypeName(old.type), WasmOpcodes::TypeName(val.type));
         return;
       }
-      old.node =
-          first ? val.node : CreateOrMergeIntoPhi(old.type, target->control,
-                                                  old.node, val.node);
+      if (builder_) {
+        old.node =
+            first ? val.node : CreateOrMergeIntoPhi(old.type, target->control,
+                                                    old.node, val.node);
+      } else {
+        old.node = nullptr;
+      }
     }
   }
 
@@ -1596,6 +1688,7 @@
 
   TFNode* CreateOrMergeIntoPhi(LocalType type, TFNode* merge, TFNode* tnode,
                                TFNode* fnode) {
+    DCHECK_NOT_NULL(builder_);
     if (builder_->IsPhiWithMerge(tnode, merge)) {
       builder_->AppendToPhi(tnode, fnode);
     } else if (tnode != fnode) {
@@ -1608,16 +1701,17 @@
     return tnode;
   }
 
-  void PrepareForLoop(const byte* pc, SsaEnv* env) {
-    if (!env->go()) return;
+  SsaEnv* PrepareForLoop(const byte* pc, SsaEnv* env) {
+    if (!builder_) return Split(env);
+    if (!env->go()) return Split(env);
     env->state = SsaEnv::kMerged;
-    if (!builder_) return;
 
     env->control = builder_->Loop(env->control);
     env->effect = builder_->EffectPhi(1, &env->effect, env->control);
     builder_->Terminate(env->effect, env->control);
     if (FLAG_wasm_loop_assignment_analysis) {
       BitVector* assigned = AnalyzeLoopAssignment(pc);
+      if (failed()) return env;
       if (assigned != nullptr) {
         // Only introduce phis for variables assigned in this loop.
         for (int i = EnvironmentCount() - 1; i >= 0; i--) {
@@ -1625,7 +1719,10 @@
           env->locals[i] = builder_->Phi(local_type_vec_[i], 1, &env->locals[i],
                                          env->control);
         }
-        return;
+        SsaEnv* loop_body_env = Split(env);
+        builder_->StackCheck(position(), &(loop_body_env->effect),
+                             &(loop_body_env->control));
+        return loop_body_env;
       }
     }
 
@@ -1634,6 +1731,11 @@
       env->locals[i] =
           builder_->Phi(local_type_vec_[i], 1, &env->locals[i], env->control);
     }
+
+    SsaEnv* loop_body_env = Split(env);
+    builder_->StackCheck(position(), &(loop_body_env->effect),
+                         &(loop_body_env->control));
+    return loop_body_env;
   }
 
   // Create a complete copy of the {from}.
@@ -1766,7 +1868,7 @@
 bool DecodeLocalDecls(AstLocalDecls& decls, const byte* start,
                       const byte* end) {
   AccountingAllocator allocator;
-  Zone tmp(&allocator);
+  Zone tmp(&allocator, ZONE_NAME);
   FunctionBody body = {nullptr, nullptr, nullptr, start, end};
   WasmFullDecoder decoder(&tmp, nullptr, body);
   return decoder.DecodeLocalDecls(decls);
@@ -1785,7 +1887,7 @@
 
 DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
                             FunctionBody& body) {
-  Zone zone(allocator);
+  Zone zone(allocator, ZONE_NAME);
   WasmFullDecoder decoder(&zone, nullptr, body);
   decoder.Decode();
   return decoder.toResult<DecodeStruct*>(nullptr);
@@ -1793,7 +1895,7 @@
 
 DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
                           FunctionBody& body) {
-  Zone zone(allocator);
+  Zone zone(allocator, ZONE_NAME);
   WasmFullDecoder decoder(&zone, builder, body);
   decoder.Decode();
   return decoder.toResult<DecodeStruct*>(nullptr);
@@ -1813,7 +1915,7 @@
 bool PrintAst(AccountingAllocator* allocator, const FunctionBody& body,
               std::ostream& os,
               std::vector<std::tuple<uint32_t, int, int>>* offset_table) {
-  Zone zone(allocator);
+  Zone zone(allocator, ZONE_NAME);
   WasmFullDecoder decoder(&zone, nullptr, body);
   int line_nr = 0;
 
diff --git a/src/wasm/ast-decoder.h b/src/wasm/ast-decoder.h
index 8c2c2c4..9ce323e 100644
--- a/src/wasm/ast-decoder.h
+++ b/src/wasm/ast-decoder.h
@@ -5,6 +5,8 @@
 #ifndef V8_WASM_AST_DECODER_H_
 #define V8_WASM_AST_DECODER_H_
 
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
 #include "src/signature.h"
 #include "src/wasm/decoder.h"
 #include "src/wasm/wasm-opcodes.h"
@@ -156,6 +158,9 @@
       case kLocalF64:
         *result = kAstF64;
         return true;
+      case kLocalS128:
+        *result = kAstS128;
+        return true;
       default:
         *result = kAstStmt;
         return false;
@@ -181,14 +186,19 @@
 };
 
 struct CallIndirectOperand {
+  uint32_t table_index;
   uint32_t index;
   FunctionSig* sig;
   unsigned length;
   inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
-    unsigned len1 = 0;
-    unsigned len2 = 0;
-    index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "signature index");
-    length = len1 + len2;
+    unsigned len = 0;
+    index = decoder->checked_read_u32v(pc, 1, &len, "signature index");
+    table_index = decoder->checked_read_u8(pc, 1 + len, "table index");
+    if (table_index != 0) {
+      decoder->error(pc, pc + 1 + len, "expected table index 0, found %u",
+                     table_index);
+    }
+    length = 1 + len;
     sig = nullptr;
   }
 };
@@ -206,6 +216,18 @@
   }
 };
 
+struct MemoryIndexOperand {
+  uint32_t index;
+  unsigned length;
+  inline MemoryIndexOperand(Decoder* decoder, const byte* pc) {
+    index = decoder->checked_read_u8(pc, 1, "memory index");
+    if (index != 0) {
+      decoder->error(pc, pc + 1, "expected memory index 0, found %u", index);
+    }
+    length = 1;
+  }
+};
+
 struct BranchTableOperand {
   uint32_t table_count;
   const byte* start;
@@ -231,7 +253,7 @@
 class BranchTableIterator {
  public:
   unsigned cur_index() { return index_; }
-  bool has_next() { return index_ <= table_count_; }
+  bool has_next() { return decoder_->ok() && index_ <= table_count_; }
   uint32_t next() {
     DCHECK(has_next());
     index_++;
@@ -352,15 +374,18 @@
       : decls_encoded_size(0), total_local_count(0), local_types(zone) {}
 };
 
-bool DecodeLocalDecls(AstLocalDecls& decls, const byte* start, const byte* end);
-BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
-                                           const byte* start, const byte* end);
+V8_EXPORT_PRIVATE bool DecodeLocalDecls(AstLocalDecls& decls, const byte* start,
+                                        const byte* end);
+V8_EXPORT_PRIVATE BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone,
+                                                             size_t num_locals,
+                                                             const byte* start,
+                                                             const byte* end);
 
 // Computes the length of the opcode at the given address.
-unsigned OpcodeLength(const byte* pc, const byte* end);
+V8_EXPORT_PRIVATE unsigned OpcodeLength(const byte* pc, const byte* end);
 
 // A simple forward iterator for bytecodes.
-class BytecodeIterator : public Decoder {
+class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
  public:
   // If one wants to iterate over the bytecode without looking at {pc_offset()}.
   class iterator {
diff --git a/src/wasm/decoder.h b/src/wasm/decoder.h
index d5c9f43..fc8f110 100644
--- a/src/wasm/decoder.h
+++ b/src/wasm/decoder.h
@@ -173,48 +173,14 @@
     return traceOffEnd<uint32_t>();
   }
 
-  // Reads a LEB128 variable-length 32-bit integer and advances {pc_}.
+  // Reads a LEB128 variable-length unsigned 32-bit integer and advances {pc_}.
   uint32_t consume_u32v(const char* name = nullptr) {
-    TRACE("  +%d  %-20s: ", static_cast<int>(pc_ - start_),
-          name ? name : "varint");
-    if (checkAvailable(1)) {
-      const byte* pos = pc_;
-      const byte* end = pc_ + 5;
-      if (end > limit_) end = limit_;
-
-      uint32_t result = 0;
-      int shift = 0;
-      byte b = 0;
-      while (pc_ < end) {
-        b = *pc_++;
-        TRACE("%02x ", b);
-        result = result | ((b & 0x7F) << shift);
-        if ((b & 0x80) == 0) break;
-        shift += 7;
-      }
-
-      int length = static_cast<int>(pc_ - pos);
-      if (pc_ == end && (b & 0x80)) {
-        error(pc_ - 1, "varint too large");
-      } else if (length == 0) {
-        error(pc_, "varint of length 0");
-      } else {
-        TRACE("= %u\n", result);
-      }
-      return result;
-    }
-    return traceOffEnd<uint32_t>();
+    return consume_leb<uint32_t, false>(name);
   }
 
-  // Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
-  void consume_bytes(int size) {
-    TRACE("  +%d  %-20s: %d bytes\n", static_cast<int>(pc_ - start_), "skip",
-          size);
-    if (checkAvailable(size)) {
-      pc_ += size;
-    } else {
-      pc_ = limit_;
-    }
+  // Reads a LEB128 variable-length signed 32-bit integer and advances {pc_}.
+  int32_t consume_i32v(const char* name = nullptr) {
+    return consume_leb<int32_t, true>(name);
   }
 
   // Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
@@ -287,7 +253,7 @@
   template <typename T>
   Result<T> toResult(T val) {
     Result<T> result;
-    if (error_pc_) {
+    if (failed()) {
       TRACE("Result error: %s\n", error_msg_.get());
       result.error_code = kError;
       result.start = start_;
@@ -313,8 +279,8 @@
     error_msg_.reset();
   }
 
-  bool ok() const { return error_pc_ == nullptr; }
-  bool failed() const { return !!error_msg_; }
+  bool ok() const { return error_msg_ == nullptr; }
+  bool failed() const { return !ok(); }
   bool more() const { return pc_ < limit_; }
 
   const byte* start() { return start_; }
@@ -383,6 +349,47 @@
     }
     return result;
   }
+
+  template <typename IntType, bool is_signed>
+  IntType consume_leb(const char* name = nullptr) {
+    TRACE("  +%d  %-20s: ", static_cast<int>(pc_ - start_),
+          name ? name : "varint");
+    if (checkAvailable(1)) {
+      const int kMaxLength = (sizeof(IntType) * 8 + 6) / 7;
+      const byte* pos = pc_;
+      const byte* end = pc_ + kMaxLength;
+      if (end > limit_) end = limit_;
+
+      IntType result = 0;
+      int shift = 0;
+      byte b = 0;
+      while (pc_ < end) {
+        b = *pc_++;
+        TRACE("%02x ", b);
+        result = result | (static_cast<IntType>(b & 0x7F) << shift);
+        shift += 7;
+        if ((b & 0x80) == 0) break;
+      }
+
+      int length = static_cast<int>(pc_ - pos);
+      if (pc_ == end && (b & 0x80)) {
+        error(pc_ - 1, "varint too large");
+      } else if (length == 0) {
+        error(pc_, "varint of length 0");
+      } else if (is_signed) {
+        if (length < kMaxLength) {
+          int sign_ext_shift = 8 * sizeof(IntType) - shift;
+          // Perform sign extension.
+          result = (result << sign_ext_shift) >> sign_ext_shift;
+        }
+        TRACE("= %" PRIi64 "\n", static_cast<int64_t>(result));
+      } else {
+        TRACE("= %" PRIu64 "\n", static_cast<uint64_t>(result));
+      }
+      return result;
+    }
+    return traceOffEnd<uint32_t>();
+  }
 };
 
 #undef TRACE
diff --git a/src/wasm/managed.h b/src/wasm/managed.h
new file mode 100644
index 0000000..785d5d3
--- /dev/null
+++ b/src/wasm/managed.h
@@ -0,0 +1,56 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MANAGED_H_
+#define V8_WASM_MANAGED_H_
+
+#include "src/factory.h"
+#include "src/global-handles.h"
+#include "src/handles.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+// An object that wraps a pointer to a C++ object and optionally deletes it
+// when the managed wrapper object is garbage collected.
+template <class CppType>
+class Managed : public Foreign {
+ public:
+  V8_INLINE CppType* get() {
+    return reinterpret_cast<CppType*>(foreign_address());
+  }
+
+  static Handle<Managed<CppType>> New(Isolate* isolate, CppType* ptr,
+                                      bool delete_on_gc = true) {
+    Handle<Foreign> foreign =
+        isolate->factory()->NewForeign(reinterpret_cast<Address>(ptr));
+    Handle<Managed<CppType>> handle(
+        reinterpret_cast<Managed<CppType>*>(*foreign), isolate);
+    if (delete_on_gc) {
+      RegisterWeakCallbackForDelete(isolate, handle);
+    }
+    return handle;
+  }
+
+ private:
+  static void RegisterWeakCallbackForDelete(Isolate* isolate,
+                                            Handle<Managed<CppType>> handle) {
+    Handle<Object> global_handle = isolate->global_handles()->Create(*handle);
+    GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
+                            &Managed<CppType>::Delete,
+                            v8::WeakCallbackType::kFinalizer);
+  }
+  static void Delete(const v8::WeakCallbackInfo<void>& data) {
+    Managed<CppType>** p =
+        reinterpret_cast<Managed<CppType>**>(data.GetParameter());
+    delete (*p)->get();
+    (*p)->set_foreign_address(0);
+    GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
+  }
+};
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_WASM_MANAGED_H_
diff --git a/src/wasm/module-decoder.cc b/src/wasm/module-decoder.cc
index 9006561..c8eace3 100644
--- a/src/wasm/module-decoder.cc
+++ b/src/wasm/module-decoder.cc
@@ -6,6 +6,7 @@
 
 #include "src/base/functional.h"
 #include "src/base/platform/platform.h"
+#include "src/flags.h"
 #include "src/macro-assembler.h"
 #include "src/objects.h"
 #include "src/v8.h"
@@ -76,6 +77,12 @@
     return static_cast<uint32_t>(section_end_ - section_start_);
   }
 
+  inline const byte* payload_start() const { return payload_start_; }
+
+  inline uint32_t payload_length() const {
+    return static_cast<uint32_t>(section_end_ - payload_start_);
+  }
+
   inline const byte* section_end() const { return section_end_; }
 
   // Advances to the next section, checking that decoding the current section
@@ -96,6 +103,7 @@
   Decoder& decoder_;
   WasmSectionCode section_code_;
   const byte* section_start_;
+  const byte* payload_start_;
   const byte* section_end_;
 
   // Reads the section code/name at the current position and sets up
@@ -110,6 +118,7 @@
       // Read and check the section size.
       uint32_t section_length = decoder_.consume_u32v("section length");
       section_start_ = decoder_.pc();
+      payload_start_ = section_start_;
       if (decoder_.checkAvailable(section_length)) {
         // Get the limit of the section within the module.
         section_end_ = section_start_ + section_length;
@@ -119,7 +128,7 @@
       }
 
       if (section_code == kUnknownSectionCode) {
-        // Check for the known "names" section.
+        // Check for the known "name" section.
         uint32_t string_length = decoder_.consume_u32v("section name length");
         const byte* section_name_start = decoder_.pc();
         decoder_.consume_bytes(string_length, "section name");
@@ -128,6 +137,7 @@
           section_code_ = kUnknownSectionCode;
           return;
         }
+        payload_start_ = decoder_.pc();
 
         TRACE("  +%d  section name        : \"%.*s\"\n",
               static_cast<int>(section_name_start - decoder_.start()),
@@ -299,29 +309,38 @@
             // ===== Imported table ==========================================
             import->index =
                 static_cast<uint32_t>(module->function_tables.size());
-            module->function_tables.push_back(
-                {0, 0, std::vector<int32_t>(), true, false});
-            expect_u8("element type", 0x20);
+            module->function_tables.push_back({0, 0, false,
+                                               std::vector<int32_t>(), true,
+                                               false, SignatureMap()});
+            expect_u8("element type", kWasmAnyFunctionTypeForm);
             WasmIndirectFunctionTable* table = &module->function_tables.back();
-            consume_resizable_limits("element count", "elements", kMaxUInt32,
-                                     &table->size, &table->max_size);
+            consume_resizable_limits(
+                "element count", "elements", WasmModule::kV8MaxTableSize,
+                &table->min_size, &table->has_max, WasmModule::kV8MaxTableSize,
+                &table->max_size);
             break;
           }
           case kExternalMemory: {
             // ===== Imported memory =========================================
-            //            import->index =
-            //            static_cast<uint32_t>(module->memories.size());
-            // TODO(titzer): imported memories
+            bool has_max = false;
+            consume_resizable_limits("memory", "pages", WasmModule::kV8MaxPages,
+                                     &module->min_mem_pages, &has_max,
+                                     WasmModule::kSpecMaxPages,
+                                     &module->max_mem_pages);
+            module->has_memory = true;
             break;
           }
           case kExternalGlobal: {
             // ===== Imported global =========================================
             import->index = static_cast<uint32_t>(module->globals.size());
             module->globals.push_back(
-                {kAstStmt, false, NO_INIT, 0, true, false});
+                {kAstStmt, false, WasmInitExpr(), 0, true, false});
             WasmGlobal* global = &module->globals.back();
             global->type = consume_value_type();
             global->mutability = consume_u8("mutability") != 0;
+            if (global->mutability) {
+              error("mutable globals cannot be imported");
+            }
             break;
           }
           default:
@@ -362,14 +381,18 @@
       if (table_count > 1) {
         error(pos, pos, "invalid table count %d, maximum 1", table_count);
       }
+      if (module->function_tables.size() < 1) {
+        module->function_tables.push_back({0, 0, false, std::vector<int32_t>(),
+                                           false, false, SignatureMap()});
+      }
 
       for (uint32_t i = 0; ok() && i < table_count; i++) {
-        module->function_tables.push_back(
-            {0, 0, std::vector<int32_t>(), false, false});
         WasmIndirectFunctionTable* table = &module->function_tables.back();
         expect_u8("table type", kWasmAnyFunctionTypeForm);
-        consume_resizable_limits("table elements", "elements", kMaxUInt32,
-                                 &table->size, &table->max_size);
+        consume_resizable_limits("table elements", "elements",
+                                 WasmModule::kV8MaxTableSize, &table->min_size,
+                                 &table->has_max, WasmModule::kV8MaxTableSize,
+                                 &table->max_size);
       }
       section_iter.advance();
     }
@@ -384,24 +407,33 @@
       }
 
       for (uint32_t i = 0; ok() && i < memory_count; i++) {
-        consume_resizable_limits("memory", "pages", WasmModule::kMaxLegalPages,
-                                 &module->min_mem_pages,
-                                 &module->max_mem_pages);
+        bool has_max = false;
+        consume_resizable_limits(
+            "memory", "pages", WasmModule::kV8MaxPages, &module->min_mem_pages,
+            &has_max, WasmModule::kSpecMaxPages, &module->max_mem_pages);
       }
+      module->has_memory = true;
       section_iter.advance();
     }
 
     // ===== Global section ==================================================
     if (section_iter.section_code() == kGlobalSectionCode) {
       uint32_t globals_count = consume_u32v("globals count");
-      module->globals.reserve(SafeReserve(globals_count));
+      uint32_t imported_globals = static_cast<uint32_t>(module->globals.size());
+      if (!IsWithinLimit(std::numeric_limits<int32_t>::max(), globals_count,
+                         imported_globals)) {
+        error(pos, pos, "too many imported+defined globals: %u + %u",
+              imported_globals, globals_count);
+      }
+      module->globals.reserve(SafeReserve(imported_globals + globals_count));
       for (uint32_t i = 0; ok() && i < globals_count; ++i) {
         TRACE("DecodeGlobal[%d] module+%d\n", i,
               static_cast<int>(pc_ - start_));
         // Add an uninitialized global and pass a pointer to it.
-        module->globals.push_back({kAstStmt, false, NO_INIT, 0, false, false});
+        module->globals.push_back(
+            {kAstStmt, false, WasmInitExpr(), 0, false, false});
         WasmGlobal* global = &module->globals.back();
-        DecodeGlobalInModule(module, i, global);
+        DecodeGlobalInModule(module, i + imported_globals, global);
       }
       section_iter.advance();
     }
@@ -448,7 +480,12 @@
           case kExternalGlobal: {
             WasmGlobal* global = nullptr;
             exp->index = consume_global_index(module, &global);
-            if (global) global->exported = true;
+            if (global) {
+              if (global->mutability) {
+                error("mutable globals cannot be exported");
+              }
+              global->exported = true;
+            }
             break;
           }
           default:
@@ -491,8 +528,10 @@
       WasmFunction* func;
       const byte* pos = pc_;
       module->start_function_index = consume_func_index(module, &func);
-      if (func && func->sig->parameter_count() > 0) {
-        error(pos, "invalid start function: non-zero parameter count");
+      if (func &&
+          (func->sig->parameter_count() > 0 || func->sig->return_count() > 0)) {
+        error(pos,
+              "invalid start function: non-zero parameter or return count");
       }
       section_iter.advance();
     }
@@ -501,8 +540,17 @@
     if (section_iter.section_code() == kElementSectionCode) {
       uint32_t element_count = consume_u32v("element count");
       for (uint32_t i = 0; ok() && i < element_count; ++i) {
+        const byte* pos = pc();
         uint32_t table_index = consume_u32v("table index");
-        if (table_index != 0) error("illegal table index != 0");
+        if (table_index != 0) {
+          error(pos, pos, "illegal table index %u != 0", table_index);
+        }
+        WasmIndirectFunctionTable* table = nullptr;
+        if (table_index >= module->function_tables.size()) {
+          error(pos, pos, "out of bounds table index %u", table_index);
+        } else {
+          table = &module->function_tables[table_index];
+        }
         WasmInitExpr offset = consume_init_expr(module, kAstI32);
         uint32_t num_elem = consume_u32v("number of elements");
         std::vector<uint32_t> vector;
@@ -511,7 +559,13 @@
         init->entries.reserve(SafeReserve(num_elem));
         for (uint32_t j = 0; ok() && j < num_elem; j++) {
           WasmFunction* func = nullptr;
-          init->entries.push_back(consume_func_index(module, &func));
+          uint32_t index = consume_func_index(module, &func);
+          init->entries.push_back(index);
+          if (table && index < module->functions.size()) {
+            // Canonicalize signature indices during decoding.
+            // TODO(titzer): suboptimal, redundant when verifying only.
+            table->map.FindOrInsert(module->functions[index].sig);
+          }
         }
       }
 
@@ -532,6 +586,14 @@
         uint32_t size = consume_u32v("body size");
         function->code_start_offset = pc_offset();
         function->code_end_offset = pc_offset() + size;
+        if (verify_functions) {
+          ModuleEnv module_env;
+          module_env.module = module;
+          module_env.origin = module->origin;
+
+          VerifyFunctionBody(i + module->num_imported_functions, &module_env,
+                             function);
+        }
         consume_bytes(size, "function body");
       }
       section_iter.advance();
@@ -542,12 +604,16 @@
       uint32_t data_segments_count = consume_u32v("data segments count");
       module->data_segments.reserve(SafeReserve(data_segments_count));
       for (uint32_t i = 0; ok() && i < data_segments_count; ++i) {
+        if (!module->has_memory) {
+          error("cannot load data without memory");
+          break;
+        }
         TRACE("DecodeDataSegment[%d] module+%d\n", i,
               static_cast<int>(pc_ - start_));
         module->data_segments.push_back({
-            NO_INIT,  // dest_addr
-            0,        // source_offset
-            0         // source_size
+            WasmInitExpr(),  // dest_addr
+            0,               // source_offset
+            0                // source_size
         });
         WasmDataSegment* segment = &module->data_segments.back();
         DecodeDataSegmentInModule(module, segment);
@@ -557,24 +623,20 @@
 
     // ===== Name section ====================================================
     if (section_iter.section_code() == kNameSectionCode) {
-      const byte* pos = pc_;
       uint32_t functions_count = consume_u32v("functions count");
-      if (functions_count != module->num_declared_functions) {
-        error(pos, pos, "function name count %u mismatch (%u expected)",
-              functions_count, module->num_declared_functions);
-      }
 
       for (uint32_t i = 0; ok() && i < functions_count; ++i) {
-        WasmFunction* function =
-            &module->functions[i + module->num_imported_functions];
-        function->name_offset = consume_string(&function->name_length, false);
+        uint32_t function_name_length = 0;
+        uint32_t name_offset = consume_string(&function_name_length, false);
+        uint32_t func_index = i;
+        if (func_index < module->functions.size()) {
+          module->functions[func_index].name_offset = name_offset;
+          module->functions[func_index].name_length = function_name_length;
+        }
 
         uint32_t local_names_count = consume_u32v("local names count");
         for (uint32_t j = 0; ok() && j < local_names_count; j++) {
-          uint32_t unused = 0;
-          uint32_t offset = consume_string(&unused, false);
-          USE(unused);
-          USE(offset);
+          skip_string();
         }
       }
       section_iter.advance();
@@ -588,10 +650,12 @@
 
     if (ok()) {
       CalculateGlobalOffsets(module);
-      PreinitializeIndirectFunctionTables(module);
     }
     const WasmModule* finished_module = module;
     ModuleResult result = toResult(finished_module);
+    if (verify_functions && result.ok()) {
+      result.MoveFrom(result_);  // Copy error code and location.
+    }
     if (FLAG_dump_wasm_module) DumpModule(module, result);
     return result;
   }
@@ -647,13 +711,22 @@
     const byte* pos = pc();
     global->init = consume_init_expr(module, kAstStmt);
     switch (global->init.kind) {
-      case WasmInitExpr::kGlobalIndex:
-        if (global->init.val.global_index >= index) {
-          error("invalid global index in init expression");
-        } else if (module->globals[index].type != global->type) {
-          error("type mismatch in global initialization");
+      case WasmInitExpr::kGlobalIndex: {
+        uint32_t other_index = global->init.val.global_index;
+        if (other_index >= index) {
+          error(pos, pos,
+                "invalid global index in init expression, "
+                "index %u, other_index %u",
+                index, other_index);
+        } else if (module->globals[other_index].type != global->type) {
+          error(pos, pos,
+                "type mismatch in global initialization "
+                "(from global #%u), expected %s, got %s",
+                other_index, WasmOpcodes::TypeName(global->type),
+                WasmOpcodes::TypeName(module->globals[other_index].type));
         }
         break;
+      }
       default:
         if (global->type != TypeOf(module, global->init)) {
           error(pos, pos,
@@ -705,30 +778,6 @@
     module->globals_size = offset;
   }
 
-  // TODO(titzer): this only works without overlapping initializations from
-  // global bases for entries
-  void PreinitializeIndirectFunctionTables(WasmModule* module) {
-    // Fill all tables with invalid entries first.
-    for (WasmIndirectFunctionTable& table : module->function_tables) {
-      table.values.resize(table.size);
-      for (size_t i = 0; i < table.size; i++) {
-        table.values[i] = kInvalidFunctionIndex;
-      }
-    }
-    for (WasmTableInit& init : module->table_inits) {
-      if (init.offset.kind != WasmInitExpr::kI32Const) continue;
-      if (init.table_index >= module->function_tables.size()) continue;
-      WasmIndirectFunctionTable& table =
-          module->function_tables[init.table_index];
-      for (size_t i = 0; i < init.entries.size(); i++) {
-        size_t index = i + init.offset.val.i32_const;
-        if (index < table.values.size()) {
-          table.values[index] = init.entries[i];
-        }
-      }
-    }
-  }
-
   // Verifies the body (code) of a given function.
   void VerifyFunctionBody(uint32_t func_num, ModuleEnv* menv,
                           WasmFunction* function) {
@@ -766,7 +815,7 @@
     uint32_t offset = pc_offset();
     const byte* string_start = pc_;
     // Consume bytes before validation to guarantee that the string is not oob.
-    consume_bytes(*length, "string");
+    if (*length > 0) consume_bytes(*length, "string");
     if (ok() && validate_utf8 &&
         !unibrow::Utf8::Validate(string_start, *length)) {
       error(string_start, "no valid UTF-8 string");
@@ -774,6 +823,12 @@
     return offset;
   }
 
+  // Skips over a length-prefixed string, but checks that it is within bounds.
+  void skip_string() {
+    uint32_t length = consume_u32v("string length");
+    consume_bytes(length, "string");
+  }
+
   uint32_t consume_sig_index(WasmModule* module, FunctionSig** sig) {
     const byte* pos = pc_;
     uint32_t sig_index = consume_u32v("signature index");
@@ -815,30 +870,35 @@
   }
 
   void consume_resizable_limits(const char* name, const char* units,
-                                uint32_t max_value, uint32_t* initial,
+                                uint32_t max_initial, uint32_t* initial,
+                                bool* has_max, uint32_t max_maximum,
                                 uint32_t* maximum) {
     uint32_t flags = consume_u32v("resizable limits flags");
     const byte* pos = pc();
     *initial = consume_u32v("initial size");
-    if (*initial > max_value) {
+    *has_max = false;
+    if (*initial > max_initial) {
       error(pos, pos,
-            "initial %s size (%u %s) is larger than maximum allowable (%u)",
-            name, *initial, units, max_value);
+            "initial %s size (%u %s) is larger than implementation limit (%u)",
+            name, *initial, units, max_initial);
     }
     if (flags & 1) {
+      *has_max = true;
       pos = pc();
       *maximum = consume_u32v("maximum size");
-      if (*maximum > max_value) {
-        error(pos, pos,
-              "maximum %s size (%u %s) is larger than maximum allowable (%u)",
-              name, *maximum, units, max_value);
+      if (*maximum > max_maximum) {
+        error(
+            pos, pos,
+            "maximum %s size (%u %s) is larger than implementation limit (%u)",
+            name, *maximum, units, max_maximum);
       }
       if (*maximum < *initial) {
         error(pos, pos, "maximum %s size (%u %s) is less than initial (%u %s)",
               name, *maximum, units, *initial, units);
       }
     } else {
-      *maximum = 0;
+      *has_max = false;
+      *maximum = max_initial;
     }
   }
 
@@ -860,6 +920,21 @@
     switch (opcode) {
       case kExprGetGlobal: {
         GlobalIndexOperand operand(this, pc() - 1);
+        if (module->globals.size() <= operand.index) {
+          error("global index is out of bounds");
+          expr.kind = WasmInitExpr::kNone;
+          expr.val.i32_const = 0;
+          break;
+        }
+        WasmGlobal* global = &module->globals[operand.index];
+        if (global->mutability || !global->imported) {
+          error(
+              "only immutable imported globals can be used in initializer "
+              "expressions");
+          expr.kind = WasmInitExpr::kNone;
+          expr.val.i32_const = 0;
+          break;
+        }
         expr.kind = WasmInitExpr::kGlobalIndex;
         expr.val.global_index = operand.index;
         len = operand.length;
@@ -925,7 +1000,12 @@
       case kLocalF64:
         return kAstF64;
       case kLocalS128:
-        return kAstS128;
+        if (origin_ != kAsmJsOrigin && FLAG_wasm_simd_prototype) {
+          return kAstS128;
+        } else {
+          error(pc_ - 1, "invalid local type");
+          return kAstStmt;
+        }
       default:
         error(pc_ - 1, "invalid local type");
         return kAstStmt;
@@ -999,6 +1079,8 @@
   }
 };
 
+// Find section with given section code. Return Vector of the payload, or null
+// Vector if section is not found or module bytes are invalid.
 Vector<const byte> FindSection(const byte* module_start, const byte* module_end,
                                WasmSectionCode code) {
   Decoder decoder(module_start, module_end);
@@ -1012,10 +1094,10 @@
   WasmSectionIterator section_iter(decoder);
   while (section_iter.more()) {
     if (section_iter.section_code() == code) {
-      return Vector<const uint8_t>(section_iter.section_start(),
-                                   section_iter.section_length());
+      return Vector<const uint8_t>(section_iter.payload_start(),
+                                   section_iter.payload_length());
     }
-    decoder.consume_bytes(section_iter.section_length(), "section payload");
+    decoder.consume_bytes(section_iter.payload_length(), "section payload");
     section_iter.advance();
   }
 
@@ -1024,10 +1106,9 @@
 
 }  // namespace
 
-ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
-                              const byte* module_start, const byte* module_end,
-                              bool verify_functions, ModuleOrigin origin) {
-  size_t decode_memory_start = zone->allocation_size();
+ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
+                              const byte* module_end, bool verify_functions,
+                              ModuleOrigin origin) {
   HistogramTimerScope wasm_decode_module_time_scope(
       isolate->counters()->wasm_decode_module_time());
   size_t size = module_end - module_start;
@@ -1036,12 +1117,18 @@
   // TODO(bradnelson): Improve histogram handling of size_t.
   isolate->counters()->wasm_module_size_bytes()->AddSample(
       static_cast<int>(size));
-  WasmModule* module = new WasmModule();
+  // Signatures are stored in zone memory, which have the same lifetime
+  // as the {module}.
+  Zone* zone = new Zone(isolate->allocator(), ZONE_NAME);
+  WasmModule* module = new WasmModule(zone, module_start);
   ModuleDecoder decoder(zone, module_start, module_end, origin);
   ModuleResult result = decoder.DecodeModule(module, verify_functions);
   // TODO(bradnelson): Improve histogram handling of size_t.
+  // TODO(titzer): this isn't accurate, since it doesn't count the data
+  // allocated on the C++ heap.
+  // https://bugs.chromium.org/p/chromium/issues/detail?id=657320
   isolate->counters()->wasm_decode_module_peak_memory_bytes()->AddSample(
-      static_cast<int>(zone->allocation_size() - decode_memory_start));
+      static_cast<int>(zone->allocation_size()));
   return result;
 }
 
@@ -1053,7 +1140,7 @@
 
 WasmInitExpr DecodeWasmInitExprForTesting(const byte* start, const byte* end) {
   AccountingAllocator allocator;
-  Zone zone(&allocator);
+  Zone zone(&allocator, ZONE_NAME);
   ModuleDecoder decoder(&zone, start, end, kWasmOrigin);
   return decoder.DecodeInitExpr(start);
 }
@@ -1075,9 +1162,8 @@
   return decoder.DecodeSingleFunction(module_env, function);
 }
 
-FunctionOffsetsResult DecodeWasmFunctionOffsets(
-    const byte* module_start, const byte* module_end,
-    uint32_t num_imported_functions) {
+FunctionOffsetsResult DecodeWasmFunctionOffsets(const byte* module_start,
+                                                const byte* module_end) {
   // Find and decode the code section.
   Vector<const byte> code_section =
       FindSection(module_start, module_end, kCodeSectionCode);
@@ -1088,16 +1174,12 @@
     return decoder.toResult(std::move(table));
   }
 
-  // Reserve entries for the imported functions.
-  table.reserve(num_imported_functions);
-  for (uint32_t i = 0; i < num_imported_functions; i++) {
-    table.push_back(std::make_pair(0, 0));
+  uint32_t functions_count = decoder.consume_u32v("functions count");
+  // Reserve space for the entries, taking care of invalid input.
+  if (functions_count < static_cast<unsigned>(code_section.length()) / 2) {
+    table.reserve(functions_count);
   }
 
-  uint32_t functions_count = decoder.consume_u32v("functions count");
-  // Take care of invalid input here.
-  if (functions_count < static_cast<unsigned>(code_section.length()) / 2)
-    table.reserve(functions_count);
   int section_offset = static_cast<int>(code_section.start() - module_start);
   DCHECK_LE(0, section_offset);
   for (uint32_t i = 0; i < functions_count && decoder.ok(); ++i) {
@@ -1112,6 +1194,47 @@
   return decoder.toResult(std::move(table));
 }
 
+AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* tables_start,
+                                      const byte* tables_end) {
+  AsmJsOffsets table;
+
+  Decoder decoder(tables_start, tables_end);
+  uint32_t functions_count = decoder.consume_u32v("functions count");
+  // Reserve space for the entries, taking care of invalid input.
+  if (functions_count < static_cast<unsigned>(tables_end - tables_start)) {
+    table.reserve(functions_count);
+  }
+
+  for (uint32_t i = 0; i < functions_count && decoder.ok(); ++i) {
+    uint32_t size = decoder.consume_u32v("table size");
+    if (size == 0) {
+      table.push_back(std::vector<std::pair<int, int>>());
+      continue;
+    }
+    if (!decoder.checkAvailable(size)) {
+      decoder.error("illegal asm function offset table size");
+    }
+    const byte* table_end = decoder.pc() + size;
+    uint32_t locals_size = decoder.consume_u32("locals size");
+    int last_byte_offset = locals_size;
+    int last_asm_position = 0;
+    std::vector<std::pair<int, int>> func_asm_offsets;
+    func_asm_offsets.reserve(size / 4);  // conservative estimation
+    while (decoder.ok() && decoder.pc() < table_end) {
+      last_byte_offset += decoder.consume_u32v("byte offset delta");
+      last_asm_position += decoder.consume_i32v("asm position delta");
+      func_asm_offsets.push_back({last_byte_offset, last_asm_position});
+    }
+    if (decoder.pc() != table_end) {
+      decoder.error("broken asm offset table");
+    }
+    table.push_back(std::move(func_asm_offsets));
+  }
+  if (decoder.more()) decoder.error("unexpected additional bytes");
+
+  return decoder.toResult(std::move(table));
+}
+
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/module-decoder.h b/src/wasm/module-decoder.h
index 22a313c..7cf5cfe 100644
--- a/src/wasm/module-decoder.h
+++ b/src/wasm/module-decoder.h
@@ -5,14 +5,24 @@
 #ifndef V8_WASM_MODULE_DECODER_H_
 #define V8_WASM_MODULE_DECODER_H_
 
+#include "src/globals.h"
 #include "src/wasm/ast-decoder.h"
 #include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-result.h"
 
 namespace v8 {
 namespace internal {
 namespace wasm {
+
+typedef Result<const WasmModule*> ModuleResult;
+typedef Result<WasmFunction*> FunctionResult;
+typedef std::vector<std::pair<int, int>> FunctionOffsets;
+typedef Result<FunctionOffsets> FunctionOffsetsResult;
+typedef std::vector<std::vector<std::pair<int, int>>> AsmJsOffsets;
+typedef Result<AsmJsOffsets> AsmJsOffsetsResult;
+
 // Decodes the bytes of a WASM module between {module_start} and {module_end}.
-V8_EXPORT_PRIVATE ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
+V8_EXPORT_PRIVATE ModuleResult DecodeWasmModule(Isolate* isolate,
                                                 const byte* module_start,
                                                 const byte* module_end,
                                                 bool verify_functions,
@@ -20,23 +30,33 @@
 
 // Exposed for testing. Decodes a single function signature, allocating it
 // in the given zone. Returns {nullptr} upon failure.
-FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
-                                           const byte* end);
+V8_EXPORT_PRIVATE FunctionSig* DecodeWasmSignatureForTesting(Zone* zone,
+                                                             const byte* start,
+                                                             const byte* end);
 
 // Decodes the bytes of a WASM function between
 // {function_start} and {function_end}.
-FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone, ModuleEnv* env,
-                                  const byte* function_start,
-                                  const byte* function_end);
+V8_EXPORT_PRIVATE FunctionResult DecodeWasmFunction(Isolate* isolate,
+                                                    Zone* zone, ModuleEnv* env,
+                                                    const byte* function_start,
+                                                    const byte* function_end);
 
 // Extracts the function offset table from the wasm module bytes.
 // Returns a vector with <offset, length> entries, or failure if the wasm bytes
 // are detected as invalid. Note that this validation is not complete.
-FunctionOffsetsResult DecodeWasmFunctionOffsets(
-    const byte* module_start, const byte* module_end,
-    uint32_t num_imported_functions);
+FunctionOffsetsResult DecodeWasmFunctionOffsets(const byte* module_start,
+                                                const byte* module_end);
 
-WasmInitExpr DecodeWasmInitExprForTesting(const byte* start, const byte* end);
+V8_EXPORT_PRIVATE WasmInitExpr DecodeWasmInitExprForTesting(const byte* start,
+                                                            const byte* end);
+
+// Extracts the mapping from wasm byte offset to asm.js source position per
+// function.
+// Returns a vector of vectors with <byte_offset, source_position> entries, or
+// failure if the wasm bytes are detected as invalid. Note that this validation
+// is not complete.
+AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* module_start,
+                                      const byte* module_end);
 
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/wasm/signature-map.cc b/src/wasm/signature-map.cc
new file mode 100644
index 0000000..e7f8b2f
--- /dev/null
+++ b/src/wasm/signature-map.cc
@@ -0,0 +1,51 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/signature-map.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+uint32_t SignatureMap::FindOrInsert(FunctionSig* sig) {
+  auto pos = map_.find(sig);
+  if (pos != map_.end()) {
+    return pos->second;
+  } else {
+    uint32_t index = next_++;
+    map_[sig] = index;
+    return index;
+  }
+}
+
+int32_t SignatureMap::Find(FunctionSig* sig) const {
+  auto pos = map_.find(sig);
+  if (pos != map_.end()) {
+    return static_cast<int32_t>(pos->second);
+  } else {
+    return -1;
+  }
+}
+
+bool SignatureMap::CompareFunctionSigs::operator()(FunctionSig* a,
+                                                   FunctionSig* b) const {
+  if (a == b) return false;
+  if (a->return_count() < b->return_count()) return true;
+  if (a->return_count() > b->return_count()) return false;
+  if (a->parameter_count() < b->parameter_count()) return true;
+  if (a->parameter_count() > b->parameter_count()) return false;
+  for (size_t r = 0; r < a->return_count(); r++) {
+    if (a->GetReturn(r) < b->GetReturn(r)) return true;
+    if (a->GetReturn(r) > b->GetReturn(r)) return false;
+  }
+  for (size_t p = 0; p < a->parameter_count(); p++) {
+    if (a->GetParam(p) < b->GetParam(p)) return true;
+    if (a->GetParam(p) > b->GetParam(p)) return false;
+  }
+  return false;
+}
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
diff --git a/src/wasm/signature-map.h b/src/wasm/signature-map.h
new file mode 100644
index 0000000..3a7ed0a
--- /dev/null
+++ b/src/wasm/signature-map.h
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_SIGNATURE_MAP_H_
+#define V8_WASM_SIGNATURE_MAP_H_
+
+#include <map>
+
+#include "src/signature.h"
+#include "src/wasm/wasm-opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// A signature map canonicalizes signatures into a range of indices so that
+// two different {FunctionSig} instances with the same contents map to the
+// same index.
+class V8_EXPORT_PRIVATE SignatureMap {
+ public:
+  // Gets the index for a signature, assigning a new index if necessary.
+  uint32_t FindOrInsert(FunctionSig* sig);
+
+  // Gets the index for a signature, returning {-1} if not found.
+  int32_t Find(FunctionSig* sig) const;
+
+ private:
+  // TODO(wasm): use a hashmap instead of an ordered map?
+  struct CompareFunctionSigs {
+    bool operator()(FunctionSig* a, FunctionSig* b) const;
+  };
+  uint32_t next_ = 0;
+  std::map<FunctionSig*, uint32_t, CompareFunctionSigs> map_;
+};
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_WASM_SIGNATURE_MAP_H_
diff --git a/src/wasm/wasm-debug.cc b/src/wasm/wasm-debug.cc
index 42a8e5f..11c2ef8 100644
--- a/src/wasm/wasm-debug.cc
+++ b/src/wasm/wasm-debug.cc
@@ -2,14 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/wasm/wasm-debug.h"
-
 #include "src/assert-scope.h"
 #include "src/debug/debug.h"
 #include "src/factory.h"
 #include "src/isolate.h"
 #include "src/wasm/module-decoder.h"
 #include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
 
 using namespace v8::internal;
 using namespace v8::internal::wasm;
@@ -19,68 +18,62 @@
 enum {
   kWasmDebugInfoWasmObj,
   kWasmDebugInfoWasmBytesHash,
-  kWasmDebugInfoFunctionByteOffsets,
-  kWasmDebugInfoFunctionScripts,
+  kWasmDebugInfoAsmJsOffsets,
   kWasmDebugInfoNumEntries
 };
 
-ByteArray *GetOrCreateFunctionOffsetTable(Handle<WasmDebugInfo> debug_info) {
-  Object *offset_table = debug_info->get(kWasmDebugInfoFunctionByteOffsets);
-  Isolate *isolate = debug_info->GetIsolate();
-  if (!offset_table->IsUndefined(isolate)) return ByteArray::cast(offset_table);
+// TODO(clemensh): Move asm.js offset tables to the compiled module.
+FixedArray *GetAsmJsOffsetTables(Handle<WasmDebugInfo> debug_info,
+                                 Isolate *isolate) {
+  Object *offset_tables = debug_info->get(kWasmDebugInfoAsmJsOffsets);
+  if (!offset_tables->IsUndefined(isolate)) {
+    return FixedArray::cast(offset_tables);
+  }
 
-  FunctionOffsetsResult function_offsets;
+  Handle<JSObject> wasm_instance(debug_info->wasm_instance(), isolate);
+  Handle<WasmCompiledModule> compiled_module(GetCompiledModule(*wasm_instance),
+                                             isolate);
+  DCHECK(compiled_module->has_asm_js_offset_tables());
+
+  AsmJsOffsetsResult asm_offsets;
   {
+    Handle<ByteArray> asm_offset_tables =
+        compiled_module->asm_js_offset_tables();
     DisallowHeapAllocation no_gc;
-    Handle<JSObject> wasm_object(debug_info->wasm_object(), isolate);
-    uint32_t num_imported_functions =
-        wasm::GetNumImportedFunctions(wasm_object);
-    SeqOneByteString *wasm_bytes =
-        wasm::GetWasmBytes(debug_info->wasm_object());
-    const byte *bytes_start = wasm_bytes->GetChars();
-    const byte *bytes_end = bytes_start + wasm_bytes->length();
-    function_offsets = wasm::DecodeWasmFunctionOffsets(bytes_start, bytes_end,
-                                                       num_imported_functions);
+    const byte *bytes_start = asm_offset_tables->GetDataStartAddress();
+    const byte *bytes_end = bytes_start + asm_offset_tables->length();
+    asm_offsets = wasm::DecodeAsmJsOffsets(bytes_start, bytes_end);
   }
-  DCHECK(function_offsets.ok());
-  size_t array_size = 2 * kIntSize * function_offsets.val.size();
-  CHECK_LE(array_size, static_cast<size_t>(kMaxInt));
-  ByteArray *arr =
-      *isolate->factory()->NewByteArray(static_cast<int>(array_size));
-  int idx = 0;
-  for (std::pair<int, int> p : function_offsets.val) {
-    arr->set_int(idx++, p.first);
-    arr->set_int(idx++, p.second);
+  // Wasm bytes must be valid and must contain asm.js offset table.
+  DCHECK(asm_offsets.ok());
+  DCHECK_GE(static_cast<size_t>(kMaxInt), asm_offsets.val.size());
+  int num_functions = static_cast<int>(asm_offsets.val.size());
+  DCHECK_EQ(
+      wasm::GetNumberOfFunctions(handle(debug_info->wasm_instance())),
+      static_cast<int>(num_functions +
+                       compiled_module->module()->num_imported_functions));
+  Handle<FixedArray> all_tables =
+      isolate->factory()->NewFixedArray(num_functions);
+  debug_info->set(kWasmDebugInfoAsmJsOffsets, *all_tables);
+  for (int func = 0; func < num_functions; ++func) {
+    std::vector<std::pair<int, int>> &func_asm_offsets = asm_offsets.val[func];
+    if (func_asm_offsets.empty()) continue;
+    size_t array_size = 2 * kIntSize * func_asm_offsets.size();
+    CHECK_LE(array_size, static_cast<size_t>(kMaxInt));
+    ByteArray *arr =
+        *isolate->factory()->NewByteArray(static_cast<int>(array_size));
+    all_tables->set(func, arr);
+    int idx = 0;
+    for (std::pair<int, int> p : func_asm_offsets) {
+      // Byte offsets must be strictly monotonously increasing:
+      DCHECK(idx == 0 || p.first > arr->get_int(idx - 2));
+      arr->set_int(idx++, p.first);
+      arr->set_int(idx++, p.second);
+    }
+    DCHECK_EQ(arr->length(), idx * kIntSize);
   }
-  DCHECK_EQ(arr->length(), idx * kIntSize);
-  debug_info->set(kWasmDebugInfoFunctionByteOffsets, arr);
-
-  return arr;
+  return *all_tables;
 }
-
-std::pair<int, int> GetFunctionOffsetAndLength(Handle<WasmDebugInfo> debug_info,
-                                               int func_index) {
-  ByteArray *arr = GetOrCreateFunctionOffsetTable(debug_info);
-  DCHECK(func_index >= 0 && func_index < arr->length() / kIntSize / 2);
-
-  int offset = arr->get_int(2 * func_index);
-  int length = arr->get_int(2 * func_index + 1);
-  // Assert that it's distinguishable from the "illegal function index" return.
-  DCHECK(offset > 0 && length > 0);
-  return {offset, length};
-}
-
-Vector<const uint8_t> GetFunctionBytes(Handle<WasmDebugInfo> debug_info,
-                                       int func_index) {
-  SeqOneByteString *module_bytes =
-      wasm::GetWasmBytes(debug_info->wasm_object());
-  std::pair<int, int> offset_and_length =
-      GetFunctionOffsetAndLength(debug_info, func_index);
-  return Vector<const uint8_t>(
-      module_bytes->GetChars() + offset_and_length.first,
-      offset_and_length.second);
-}
-
 }  // namespace
 
 Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<JSObject> wasm) {
@@ -90,7 +83,7 @@
       factory->NewFixedArray(kWasmDebugInfoNumEntries, TENURED);
   arr->set(kWasmDebugInfoWasmObj, *wasm);
   int hash = 0;
-  Handle<SeqOneByteString> wasm_bytes(GetWasmBytes(*wasm), isolate);
+  Handle<SeqOneByteString> wasm_bytes = GetWasmBytes(wasm);
   {
     DisallowHeapAllocation no_gc;
     hash = StringHasher::HashSequentialString(
@@ -105,14 +98,9 @@
 bool WasmDebugInfo::IsDebugInfo(Object *object) {
   if (!object->IsFixedArray()) return false;
   FixedArray *arr = FixedArray::cast(object);
-  Isolate *isolate = arr->GetIsolate();
   return arr->length() == kWasmDebugInfoNumEntries &&
-         IsWasmObject(arr->get(kWasmDebugInfoWasmObj)) &&
-         arr->get(kWasmDebugInfoWasmBytesHash)->IsNumber() &&
-         (arr->get(kWasmDebugInfoFunctionByteOffsets)->IsUndefined(isolate) ||
-          arr->get(kWasmDebugInfoFunctionByteOffsets)->IsByteArray()) &&
-         (arr->get(kWasmDebugInfoFunctionScripts)->IsUndefined(isolate) ||
-          arr->get(kWasmDebugInfoFunctionScripts)->IsFixedArray());
+         IsWasmInstance(arr->get(kWasmDebugInfoWasmObj)) &&
+         arr->get(kWasmDebugInfoWasmBytesHash)->IsNumber();
 }
 
 WasmDebugInfo *WasmDebugInfo::cast(Object *object) {
@@ -120,119 +108,38 @@
   return reinterpret_cast<WasmDebugInfo *>(object);
 }
 
-JSObject *WasmDebugInfo::wasm_object() {
+JSObject *WasmDebugInfo::wasm_instance() {
   return JSObject::cast(get(kWasmDebugInfoWasmObj));
 }
 
-Script *WasmDebugInfo::GetFunctionScript(Handle<WasmDebugInfo> debug_info,
-                                         int func_index) {
+int WasmDebugInfo::GetAsmJsSourcePosition(Handle<WasmDebugInfo> debug_info,
+                                          int func_index, int byte_offset) {
   Isolate *isolate = debug_info->GetIsolate();
-  Object *scripts_obj = debug_info->get(kWasmDebugInfoFunctionScripts);
-  Handle<FixedArray> scripts;
-  if (scripts_obj->IsUndefined(isolate)) {
-    int num_functions = wasm::GetNumberOfFunctions(debug_info->wasm_object());
-    scripts = isolate->factory()->NewFixedArray(num_functions, TENURED);
-    debug_info->set(kWasmDebugInfoFunctionScripts, *scripts);
-  } else {
-    scripts = handle(FixedArray::cast(scripts_obj), isolate);
+  Handle<JSObject> instance(debug_info->wasm_instance(), isolate);
+  FixedArray *offset_tables = GetAsmJsOffsetTables(debug_info, isolate);
+
+  WasmCompiledModule *compiled_module = wasm::GetCompiledModule(*instance);
+  int num_imported_functions =
+      compiled_module->module()->num_imported_functions;
+  DCHECK_LE(num_imported_functions, func_index);
+  func_index -= num_imported_functions;
+  DCHECK_LT(func_index, offset_tables->length());
+  ByteArray *offset_table = ByteArray::cast(offset_tables->get(func_index));
+
+  // Binary search for the current byte offset.
+  int left = 0;                                       // inclusive
+  int right = offset_table->length() / kIntSize / 2;  // exclusive
+  DCHECK_LT(left, right);
+  while (right - left > 1) {
+    int mid = left + (right - left) / 2;
+    if (offset_table->get_int(2 * mid) <= byte_offset) {
+      left = mid;
+    } else {
+      right = mid;
+    }
   }
-
-  DCHECK(func_index >= 0 && func_index < scripts->length());
-  Object *script_or_undef = scripts->get(func_index);
-  if (!script_or_undef->IsUndefined(isolate)) {
-    return Script::cast(script_or_undef);
-  }
-
-  Handle<Script> script =
-      isolate->factory()->NewScript(isolate->factory()->empty_string());
-  scripts->set(func_index, *script);
-
-  script->set_type(Script::TYPE_WASM);
-  script->set_wasm_object(debug_info->wasm_object());
-  script->set_wasm_function_index(func_index);
-
-  int hash = 0;
-  debug_info->get(kWasmDebugInfoWasmBytesHash)->ToInt32(&hash);
-  char buffer[32];
-  SNPrintF(ArrayVector(buffer), "wasm://%08x/%d", hash, func_index);
-  Handle<String> source_url =
-      isolate->factory()->NewStringFromAsciiChecked(buffer, TENURED);
-  script->set_source_url(*source_url);
-
-  int func_bytes_len =
-      GetFunctionOffsetAndLength(debug_info, func_index).second;
-  Handle<FixedArray> line_ends = isolate->factory()->NewFixedArray(1, TENURED);
-  line_ends->set(0, Smi::FromInt(func_bytes_len));
-  line_ends->set_map(isolate->heap()->fixed_cow_array_map());
-  script->set_line_ends(*line_ends);
-
-  // TODO(clemensh): Register with the debugger. Note that we cannot call into
-  // JS at this point since this function is called from within stack trace
-  // collection (which means we cannot call Debug::OnAfterCompile in its
-  // current form). See crbug.com/641065.
-  if (false) isolate->debug()->OnAfterCompile(script);
-
-  return *script;
-}
-
-Handle<String> WasmDebugInfo::DisassembleFunction(
-    Handle<WasmDebugInfo> debug_info, int func_index) {
-  std::ostringstream disassembly_os;
-
-  {
-    Vector<const uint8_t> bytes_vec = GetFunctionBytes(debug_info, func_index);
-    DisallowHeapAllocation no_gc;
-
-    AccountingAllocator allocator;
-    bool ok = PrintAst(
-        &allocator, FunctionBodyForTesting(bytes_vec.start(), bytes_vec.end()),
-        disassembly_os, nullptr);
-    DCHECK(ok);
-    USE(ok);
-  }
-
-  // Unfortunately, we have to copy the string here.
-  std::string code_str = disassembly_os.str();
-  CHECK_LE(code_str.length(), static_cast<size_t>(kMaxInt));
-  Factory *factory = debug_info->GetIsolate()->factory();
-  Vector<const char> code_vec(code_str.data(),
-                              static_cast<int>(code_str.length()));
-  return factory->NewStringFromAscii(code_vec).ToHandleChecked();
-}
-
-Handle<FixedArray> WasmDebugInfo::GetFunctionOffsetTable(
-    Handle<WasmDebugInfo> debug_info, int func_index) {
-  class NullBuf : public std::streambuf {};
-  NullBuf null_buf;
-  std::ostream null_stream(&null_buf);
-
-  std::vector<std::tuple<uint32_t, int, int>> offset_table_vec;
-
-  {
-    Vector<const uint8_t> bytes_vec = GetFunctionBytes(debug_info, func_index);
-    DisallowHeapAllocation no_gc;
-
-    AccountingAllocator allocator;
-    bool ok = PrintAst(
-        &allocator, FunctionBodyForTesting(bytes_vec.start(), bytes_vec.end()),
-        null_stream, &offset_table_vec);
-    DCHECK(ok);
-    USE(ok);
-  }
-
-  size_t arr_size = 3 * offset_table_vec.size();
-  CHECK_LE(arr_size, static_cast<size_t>(kMaxInt));
-  Factory *factory = debug_info->GetIsolate()->factory();
-  Handle<FixedArray> offset_table =
-      factory->NewFixedArray(static_cast<int>(arr_size), TENURED);
-
-  int idx = 0;
-  for (std::tuple<uint32_t, int, int> elem : offset_table_vec) {
-    offset_table->set(idx++, Smi::FromInt(std::get<0>(elem)));
-    offset_table->set(idx++, Smi::FromInt(std::get<1>(elem)));
-    offset_table->set(idx++, Smi::FromInt(std::get<2>(elem)));
-  }
-  DCHECK_EQ(idx, offset_table->length());
-
-  return offset_table;
+  // There should be an entry for each position that could show up on the stack
+  // trace:
+  DCHECK_EQ(byte_offset, offset_table->get_int(2 * left));
+  return offset_table->get_int(2 * left + 1);
 }
diff --git a/src/wasm/wasm-debug.h b/src/wasm/wasm-debug.h
deleted file mode 100644
index 9659951..0000000
--- a/src/wasm/wasm-debug.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_DEBUG_H_
-#define V8_WASM_DEBUG_H_
-
-#include "src/handles.h"
-#include "src/objects.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-class WasmDebugInfo : public FixedArray {
- public:
-  static Handle<WasmDebugInfo> New(Handle<JSObject> wasm);
-
-  static bool IsDebugInfo(Object* object);
-  static WasmDebugInfo* cast(Object* object);
-
-  JSObject* wasm_object();
-
-  bool SetBreakPoint(int byte_offset);
-
-  // Get the Script for the specified function.
-  static Script* GetFunctionScript(Handle<WasmDebugInfo> debug_info,
-                                   int func_index);
-
-  // Disassemble the specified function from this module.
-  static Handle<String> DisassembleFunction(Handle<WasmDebugInfo> debug_info,
-                                            int func_index);
-
-  // Get the offset table for the specified function, mapping from byte offsets
-  // to position in the disassembly.
-  // Returns an array with three entries per instruction: byte offset, line and
-  // column.
-  static Handle<FixedArray> GetFunctionOffsetTable(
-      Handle<WasmDebugInfo> debug_info, int func_index);
-};
-
-}  // namespace wasm
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_WASM_DEBUG_H_
diff --git a/src/wasm/wasm-function-name-table.cc b/src/wasm/wasm-function-name-table.cc
deleted file mode 100644
index cc52125..0000000
--- a/src/wasm/wasm-function-name-table.cc
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/wasm/wasm-function-name-table.h"
-
-#include "src/wasm/wasm-module.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// Build an array with all function names. If there are N functions in the
-// module, then the first (kIntSize * (N+1)) bytes are integer entries.
-// The first integer entry encodes the number of functions in the module.
-// The entries 1 to N contain offsets into the second part of this array.
-// If a function is unnamed (not to be confused with an empty name), then the
-// integer entry is the negative offset of the next function name.
-// After these N+1 integer entries, the second part begins, which holds a
-// concatenation of all function names.
-Handle<ByteArray> BuildFunctionNamesTable(Isolate* isolate,
-                                          const WasmModule* module) {
-  uint64_t func_names_length = 0;
-  for (auto& func : module->functions) func_names_length += func.name_length;
-  int num_funcs_int = static_cast<int>(module->functions.size());
-  int current_offset = (num_funcs_int + 1) * kIntSize;
-  uint64_t total_array_length = current_offset + func_names_length;
-  int total_array_length_int = static_cast<int>(total_array_length);
-  // Check for overflow.
-  CHECK(total_array_length_int == total_array_length && num_funcs_int >= 0 &&
-        num_funcs_int == module->functions.size());
-  Handle<ByteArray> func_names_array =
-      isolate->factory()->NewByteArray(total_array_length_int, TENURED);
-  func_names_array->set_int(0, num_funcs_int);
-  int func_index = 0;
-  for (const WasmFunction& fun : module->functions) {
-    WasmName name = module->GetNameOrNull(&fun);
-    if (name.start() == nullptr) {
-      func_names_array->set_int(func_index + 1, -current_offset);
-    } else {
-      func_names_array->copy_in(current_offset,
-                                reinterpret_cast<const byte*>(name.start()),
-                                name.length());
-      func_names_array->set_int(func_index + 1, current_offset);
-      current_offset += name.length();
-    }
-    ++func_index;
-  }
-  return func_names_array;
-}
-
-MaybeHandle<String> GetWasmFunctionNameFromTable(
-    Handle<ByteArray> func_names_array, uint32_t func_index) {
-  uint32_t num_funcs = static_cast<uint32_t>(func_names_array->get_int(0));
-  DCHECK(static_cast<int>(num_funcs) >= 0);
-  Factory* factory = func_names_array->GetIsolate()->factory();
-  DCHECK(func_index < num_funcs);
-  int offset = func_names_array->get_int(func_index + 1);
-  if (offset < 0) return {};
-  int next_offset = func_index == num_funcs - 1
-                        ? func_names_array->length()
-                        : abs(func_names_array->get_int(func_index + 2));
-  ScopedVector<byte> buffer(next_offset - offset);
-  func_names_array->copy_out(offset, buffer.start(), next_offset - offset);
-  if (!unibrow::Utf8::Validate(buffer.start(), buffer.length())) return {};
-  return factory->NewStringFromUtf8(Vector<const char>::cast(buffer));
-}
-
-}  // namespace wasm
-}  // namespace internal
-}  // namespace v8
diff --git a/src/wasm/wasm-function-name-table.h b/src/wasm/wasm-function-name-table.h
deleted file mode 100644
index ffee782..0000000
--- a/src/wasm/wasm-function-name-table.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_FUNCTION_NAME_TABLE_H_
-#define V8_WASM_FUNCTION_NAME_TABLE_H_
-
-#include "src/handles.h"
-#include "src/objects.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// Forward declarations for some WASM data structures.
-struct WasmModule;
-
-// Encode all function names of the WasmModule into one ByteArray.
-Handle<ByteArray> BuildFunctionNamesTable(Isolate* isolate,
-                                          const WasmModule* module);
-
-// Extract the function name for the given func_index from the function name
-// table.
-// Returns a null handle if the respective function is unnamed (not to be
-// confused with empty names) or the function name is not a valid UTF-8 string.
-MaybeHandle<String> GetWasmFunctionNameFromTable(
-    Handle<ByteArray> wasm_names_table, uint32_t func_index);
-
-}  // namespace wasm
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_WASM_FUNCTION_NAME_TABLE_H_
diff --git a/src/wasm/wasm-interpreter.cc b/src/wasm/wasm-interpreter.cc
index 2ac681e..6e049ff 100644
--- a/src/wasm/wasm-interpreter.cc
+++ b/src/wasm/wasm-interpreter.cc
@@ -62,8 +62,6 @@
   V(I64GtS, int64_t, >)         \
   V(I64GeS, int64_t, >=)        \
   V(F32Add, float, +)           \
-  V(F32Mul, float, *)           \
-  V(F32Div, float, /)           \
   V(F32Eq, float, ==)           \
   V(F32Ne, float, !=)           \
   V(F32Lt, float, <)            \
@@ -71,8 +69,6 @@
   V(F32Gt, float, >)            \
   V(F32Ge, float, >=)           \
   V(F64Add, double, +)          \
-  V(F64Mul, double, *)          \
-  V(F64Div, double, /)          \
   V(F64Eq, double, ==)          \
   V(F64Ne, double, !=)          \
   V(F64Lt, double, <)           \
@@ -80,6 +76,12 @@
   V(F64Gt, double, >)           \
   V(F64Ge, double, >=)
 
+#define FOREACH_SIMPLE_BINOP_NAN(V) \
+  V(F32Mul, float, *)               \
+  V(F64Mul, double, *)              \
+  V(F32Div, float, /)               \
+  V(F64Div, double, /)
+
 #define FOREACH_OTHER_BINOP(V) \
   V(I32DivS, int32_t)          \
   V(I32DivU, uint32_t)         \
@@ -127,14 +129,12 @@
   V(F32Floor, float)             \
   V(F32Trunc, float)             \
   V(F32NearestInt, float)        \
-  V(F32Sqrt, float)              \
   V(F64Abs, double)              \
   V(F64Neg, double)              \
   V(F64Ceil, double)             \
   V(F64Floor, double)            \
   V(F64Trunc, double)            \
   V(F64NearestInt, double)       \
-  V(F64Sqrt, double)             \
   V(I32SConvertF32, float)       \
   V(I32SConvertF64, double)      \
   V(I32UConvertF32, float)       \
@@ -165,6 +165,10 @@
   V(I32AsmjsSConvertF64, double) \
   V(I32AsmjsUConvertF64, double)
 
+#define FOREACH_OTHER_UNOP_NAN(V) \
+  V(F32Sqrt, float)               \
+  V(F64Sqrt, double)
+
 static inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
   if (b == 0) {
     *trap = kTrapDivByZero;
@@ -460,7 +464,8 @@
 }
 
 static inline float ExecuteF32Sqrt(float a, TrapReason* trap) {
-  return sqrtf(a);
+  float result = sqrtf(a);
+  return result;
 }
 
 static inline double ExecuteF64Abs(double a, TrapReason* trap) {
@@ -655,19 +660,16 @@
 }
 
 static inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
-                                        WasmModuleInstance* instance) {
+                                        WasmInstance* instance) {
   // TODO(ahaas): Move memory allocation to wasm-module.cc for better
   // encapsulation.
-  if (delta_pages > wasm::WasmModule::kMaxMemPages) {
+  if (delta_pages > wasm::WasmModule::kV8MaxPages) {
     return -1;
   }
   uint32_t old_size = instance->mem_size;
   uint32_t new_size;
   byte* new_mem_start;
   if (instance->mem_size == 0) {
-    if (delta_pages > wasm::WasmModule::kMaxMemPages) {
-      return -1;
-    }
     // TODO(gdeepti): Fix bounds check to take into account size of memtype.
     new_size = delta_pages * wasm::WasmModule::kPageSize;
     new_mem_start = static_cast<byte*>(calloc(new_size, sizeof(byte)));
@@ -678,7 +680,7 @@
     DCHECK_NOT_NULL(instance->mem_start);
     new_size = old_size + delta_pages * wasm::WasmModule::kPageSize;
     if (new_size >
-        wasm::WasmModule::kMaxMemPages * wasm::WasmModule::kPageSize) {
+        wasm::WasmModule::kV8MaxPages * wasm::WasmModule::kPageSize) {
       return -1;
     }
     new_mem_start = static_cast<byte*>(realloc(instance->mem_start, new_size));
@@ -690,9 +692,6 @@
   }
   instance->mem_start = new_mem_start;
   instance->mem_size = new_size;
-  // realloc
-  // update mem_start
-  // update mem_size
   return static_cast<int32_t>(old_size / WasmModule::kPageSize);
 }
 
@@ -967,7 +966,7 @@
 // Responsible for executing code directly.
 class ThreadImpl : public WasmInterpreter::Thread {
  public:
-  ThreadImpl(Zone* zone, CodeMap* codemap, WasmModuleInstance* instance)
+  ThreadImpl(Zone* zone, CodeMap* codemap, WasmInstance* instance)
       : codemap_(codemap),
         instance_(instance),
         stack_(zone),
@@ -975,7 +974,8 @@
         blocks_(zone),
         state_(WasmInterpreter::STOPPED),
         break_pc_(kInvalidPc),
-        trap_reason_(kTrapCount) {}
+        trap_reason_(kTrapCount),
+        possible_nondeterminism_(false) {}
 
   virtual ~ThreadImpl() {}
 
@@ -1030,6 +1030,7 @@
     frames_.clear();
     state_ = WasmInterpreter::STOPPED;
     trap_reason_ = kTrapCount;
+    possible_nondeterminism_ = false;
   }
 
   virtual int GetFrameCount() { return static_cast<int>(frames_.size()); }
@@ -1053,6 +1054,8 @@
 
   virtual pc_t GetBreakpointPc() { return break_pc_; }
 
+  virtual bool PossibleNondeterminism() { return possible_nondeterminism_; }
+
   bool Terminated() {
     return state_ == WasmInterpreter::TRAPPED ||
            state_ == WasmInterpreter::FINISHED;
@@ -1080,16 +1083,17 @@
   };
 
   CodeMap* codemap_;
-  WasmModuleInstance* instance_;
+  WasmInstance* instance_;
   ZoneVector<WasmVal> stack_;
   ZoneVector<Frame> frames_;
   ZoneVector<Block> blocks_;
   WasmInterpreter::State state_;
   pc_t break_pc_;
   TrapReason trap_reason_;
+  bool possible_nondeterminism_;
 
   CodeMap* codemap() { return codemap_; }
-  WasmModuleInstance* instance() { return instance_; }
+  WasmInstance* instance() { return instance_; }
   const WasmModule* module() { return instance_->module; }
 
   void DoTrap(TrapReason trap, pc_t pc) {
@@ -1327,9 +1331,15 @@
         }
         case kExprBrTable: {
           BranchTableOperand operand(&decoder, code->at(pc));
+          BranchTableIterator iterator(&decoder, operand);
           uint32_t key = Pop().to<uint32_t>();
+          uint32_t depth = 0;
           if (key >= operand.table_count) key = operand.table_count;
-          len = key + DoBreak(code, pc + key, operand.table[key]);
+          for (uint32_t i = 0; i <= key; i++) {
+            DCHECK(iterator.has_next());
+            depth = iterator.next();
+          }
+          len = key + DoBreak(code, pc + key, static_cast<size_t>(depth));
           TRACE("  br[%u] => @%zu\n", key, pc + key + len);
           break;
         }
@@ -1419,7 +1429,16 @@
           if (target == nullptr) {
             return DoTrap(kTrapFuncInvalid, pc);
           } else if (target->function->sig_index != operand.index) {
-            return DoTrap(kTrapFuncSigMismatch, pc);
+            // If not an exact match, we have to do a canonical check.
+            // TODO(titzer): make this faster with some kind of caching?
+            const WasmIndirectFunctionTable* table =
+                &module()->function_tables[0];
+            int function_key = table->map.Find(target->function->sig);
+            if (function_key < 0 ||
+                (function_key !=
+                 table->map.Find(module()->signatures[operand.index]))) {
+              return DoTrap(kTrapFuncSigMismatch, pc);
+            }
           }
 
           DoCall(target, &pc, pc + 1 + operand.length, &limit);
@@ -1573,13 +1592,17 @@
           ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
 #undef ASMJS_STORE_CASE
         case kExprGrowMemory: {
+          MemoryIndexOperand operand(&decoder, code->at(pc));
           uint32_t delta_pages = Pop().to<uint32_t>();
           Push(pc, WasmVal(ExecuteGrowMemory(delta_pages, instance())));
+          len = 1 + operand.length;
           break;
         }
         case kExprMemorySize: {
+          MemoryIndexOperand operand(&decoder, code->at(pc));
           Push(pc, WasmVal(static_cast<uint32_t>(instance()->mem_size /
                                                  WasmModule::kPageSize)));
+          len = 1 + operand.length;
           break;
         }
 #define EXECUTE_SIMPLE_BINOP(name, ctype, op)             \
@@ -1593,6 +1616,19 @@
           FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
 #undef EXECUTE_SIMPLE_BINOP
 
+#define EXECUTE_SIMPLE_BINOP_NAN(name, ctype, op)        \
+  case kExpr##name: {                                    \
+    WasmVal rval = Pop();                                \
+    WasmVal lval = Pop();                                \
+    ctype result = lval.to<ctype>() op rval.to<ctype>(); \
+    possible_nondeterminism_ |= std::isnan(result);      \
+    WasmVal result_val(result);                          \
+    Push(pc, result_val);                                \
+    break;                                               \
+  }
+          FOREACH_SIMPLE_BINOP_NAN(EXECUTE_SIMPLE_BINOP_NAN)
+#undef EXECUTE_SIMPLE_BINOP_NAN
+
 #define EXECUTE_OTHER_BINOP(name, ctype)              \
   case kExpr##name: {                                 \
     TrapReason trap = kTrapCount;                     \
@@ -1618,6 +1654,20 @@
           FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
 #undef EXECUTE_OTHER_UNOP
 
+#define EXECUTE_OTHER_UNOP_NAN(name, ctype)          \
+  case kExpr##name: {                                \
+    TrapReason trap = kTrapCount;                    \
+    volatile ctype val = Pop().to<ctype>();          \
+    ctype result = Execute##name(val, &trap);        \
+    possible_nondeterminism_ |= std::isnan(result);  \
+    WasmVal result_val(result);                      \
+    if (trap != kTrapCount) return DoTrap(trap, pc); \
+    Push(pc, result_val);                            \
+    break;                                           \
+  }
+          FOREACH_OTHER_UNOP_NAN(EXECUTE_OTHER_UNOP_NAN)
+#undef EXECUTE_OTHER_UNOP_NAN
+
         default:
           V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
                    code->start[pc], OpcodeName(code->start[pc]));
@@ -1648,7 +1698,7 @@
 
   WasmVal PopArity(size_t arity) {
     if (arity == 0) return WasmVal();
-    CHECK_EQ(1, arity);
+    CHECK_EQ(1u, arity);
     return Pop();
   }
 
@@ -1709,11 +1759,11 @@
 //============================================================================
 class WasmInterpreterInternals : public ZoneObject {
  public:
-  WasmModuleInstance* instance_;
+  WasmInstance* instance_;
   CodeMap codemap_;
   ZoneVector<ThreadImpl*> threads_;
 
-  WasmInterpreterInternals(Zone* zone, WasmModuleInstance* instance)
+  WasmInterpreterInternals(Zone* zone, WasmInstance* instance)
       : instance_(instance),
         codemap_(instance_ ? instance_->module : nullptr, zone),
         threads_(zone) {
@@ -1730,9 +1780,9 @@
 //============================================================================
 // Implementation of the public interface of the interpreter.
 //============================================================================
-WasmInterpreter::WasmInterpreter(WasmModuleInstance* instance,
+WasmInterpreter::WasmInterpreter(WasmInstance* instance,
                                  AccountingAllocator* allocator)
-    : zone_(allocator),
+    : zone_(allocator, ZONE_NAME),
       internals_(new (&zone_) WasmInterpreterInternals(&zone_, instance)) {}
 
 WasmInterpreter::~WasmInterpreter() { internals_->Delete(); }
diff --git a/src/wasm/wasm-interpreter.h b/src/wasm/wasm-interpreter.h
index b61e092..360362b 100644
--- a/src/wasm/wasm-interpreter.h
+++ b/src/wasm/wasm-interpreter.h
@@ -18,7 +18,7 @@
 
 // forward declarations.
 struct WasmFunction;
-struct WasmModuleInstance;
+struct WasmInstance;
 class WasmInterpreterInternals;
 
 typedef size_t pc_t;
@@ -125,13 +125,17 @@
     virtual const WasmFrame* GetFrame(int index) = 0;
     virtual WasmFrame* GetMutableFrame(int index) = 0;
     virtual WasmVal GetReturnValue(int index = 0) = 0;
+    // Returns true if the thread executed an instruction which may produce
+    // nondeterministic results, e.g. float div, float sqrt, and float mul,
+    // where the sign bit of a NaN is nondeterministic.
+    virtual bool PossibleNondeterminism() = 0;
 
     // Thread-specific breakpoints.
     bool SetBreakpoint(const WasmFunction* function, int pc, bool enabled);
     bool GetBreakpoint(const WasmFunction* function, int pc);
   };
 
-  WasmInterpreter(WasmModuleInstance* instance, AccountingAllocator* allocator);
+  WasmInterpreter(WasmInstance* instance, AccountingAllocator* allocator);
   ~WasmInterpreter();
 
   //==========================================================================
diff --git a/src/wasm/wasm-js.cc b/src/wasm/wasm-js.cc
index 254fd70..0e030a2 100644
--- a/src/wasm/wasm-js.cc
+++ b/src/wasm/wasm-js.cc
@@ -19,6 +19,7 @@
 #include "src/wasm/module-decoder.h"
 #include "src/wasm/wasm-js.h"
 #include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
 #include "src/wasm/wasm-result.h"
 
 typedef uint8_t byte;
@@ -27,6 +28,12 @@
 
 namespace v8 {
 
+enum WasmMemoryObjectData {
+  kWasmMemoryBuffer,
+  kWasmMemoryMaximum,
+  kWasmMemoryInstanceObject
+};
+
 namespace {
 i::Handle<i::String> v8_str(i::Isolate* isolate, const char* str) {
   return isolate->factory()->NewStringFromAsciiChecked(str);
@@ -55,7 +62,7 @@
     end = start + contents.ByteLength();
 
     if (start == nullptr || end == start) {
-      thrower->Error("ArrayBuffer argument is empty");
+      thrower->CompileError("ArrayBuffer argument is empty");
     }
   } else if (source->IsTypedArray()) {
     // A TypedArray was passed.
@@ -69,132 +76,28 @@
     end = start + array->ByteLength();
 
     if (start == nullptr || end == start) {
-      thrower->Error("ArrayBuffer argument is empty");
+      thrower->TypeError("ArrayBuffer argument is empty");
     }
   } else {
-    thrower->Error("Argument 0 must be an ArrayBuffer or Uint8Array");
+    thrower->TypeError("Argument 0 must be an ArrayBuffer or Uint8Array");
   }
 
   return {start, end};
 }
 
-void VerifyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  HandleScope scope(args.GetIsolate());
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
-  ErrorThrower thrower(isolate, "Wasm.verifyModule()");
-
-  if (args.Length() < 1) {
-    thrower.TypeError("Argument 0 must be a buffer source");
-    return;
-  }
-  RawBuffer buffer = GetRawBufferSource(args[0], &thrower);
-  if (thrower.error()) return;
-
-  i::Zone zone(isolate->allocator());
-  internal::wasm::ModuleResult result =
-      internal::wasm::DecodeWasmModule(isolate, &zone, buffer.start, buffer.end,
-                                       true, internal::wasm::kWasmOrigin);
-
-  if (result.failed()) {
-    thrower.Failed("", result);
-  }
-
-  if (result.val) delete result.val;
-}
-
-void VerifyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  HandleScope scope(args.GetIsolate());
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
-  ErrorThrower thrower(isolate, "Wasm.verifyFunction()");
-
-  if (args.Length() < 1) {
-    thrower.TypeError("Argument 0 must be a buffer source");
-    return;
-  }
-  RawBuffer buffer = GetRawBufferSource(args[0], &thrower);
-  if (thrower.error()) return;
-
-  internal::wasm::FunctionResult result;
-  {
-    // Verification of a single function shouldn't allocate.
-    i::DisallowHeapAllocation no_allocation;
-    i::Zone zone(isolate->allocator());
-    result = internal::wasm::DecodeWasmFunction(isolate, &zone, nullptr,
-                                                buffer.start, buffer.end);
-  }
-
-  if (result.failed()) {
-    thrower.Failed("", result);
-  }
-
-  if (result.val) delete result.val;
-}
-
-i::MaybeHandle<i::JSObject> InstantiateModule(
-    const v8::FunctionCallbackInfo<v8::Value>& args, const byte* start,
-    const byte* end, ErrorThrower* thrower,
-    internal::wasm::ModuleOrigin origin = i::wasm::kWasmOrigin) {
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
-
-  // Decode but avoid a redundant pass over function bodies for verification.
-  // Verification will happen during compilation.
-  i::Zone zone(isolate->allocator());
-  i::MaybeHandle<i::JSObject> module_object =
-      i::wasm::CreateModuleObjectFromBytes(isolate, start, end, thrower,
-                                           origin);
-  i::MaybeHandle<i::JSObject> object;
-  if (!module_object.is_null()) {
-    // Success. Instantiate the module and return the object.
-    i::Handle<i::JSObject> ffi = i::Handle<i::JSObject>::null();
-    if (args.Length() > 1 && args[1]->IsObject()) {
-      Local<Object> obj = Local<Object>::Cast(args[1]);
-      ffi = i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj));
-    }
-
-    i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
-    if (args.Length() > 2 && args[2]->IsArrayBuffer()) {
-      Local<Object> obj = Local<Object>::Cast(args[2]);
-      i::Handle<i::Object> mem_obj = v8::Utils::OpenHandle(*obj);
-      memory = i::Handle<i::JSArrayBuffer>(i::JSArrayBuffer::cast(*mem_obj));
-    }
-
-    object = i::wasm::WasmModule::Instantiate(
-        isolate, thrower, module_object.ToHandleChecked(), ffi, memory);
-    if (!object.is_null()) {
-      args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
-    }
-  }
-  return object;
-}
-
-void InstantiateModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  HandleScope scope(args.GetIsolate());
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
-  ErrorThrower thrower(isolate, "Wasm.instantiateModule()");
-
-  if (args.Length() < 1) {
-    thrower.TypeError("Argument 0 must be a buffer source");
-    return;
-  }
-  RawBuffer buffer = GetRawBufferSource(args[0], &thrower);
-  if (buffer.start == nullptr) return;
-
-  InstantiateModule(args, buffer.start, buffer.end, &thrower);
-}
-
-static i::MaybeHandle<i::JSObject> CreateModuleObject(
+static i::MaybeHandle<i::WasmModuleObject> CreateModuleObject(
     v8::Isolate* isolate, const v8::Local<v8::Value> source,
     ErrorThrower* thrower) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   i::MaybeHandle<i::JSObject> nothing;
 
   RawBuffer buffer = GetRawBufferSource(source, thrower);
-  if (buffer.start == nullptr) return i::MaybeHandle<i::JSObject>();
+  if (buffer.start == nullptr) return i::MaybeHandle<i::WasmModuleObject>();
 
   DCHECK(source->IsArrayBuffer() || source->IsTypedArray());
   return i::wasm::CreateModuleObjectFromBytes(
-      i_isolate, buffer.start, buffer.end, thrower,
-      i::wasm::ModuleOrigin::kWasmOrigin);
+      i_isolate, buffer.start, buffer.end, thrower, i::wasm::kWasmOrigin,
+      i::Handle<i::Script>::null(), nullptr, nullptr);
 }
 
 static bool ValidateModule(v8::Isolate* isolate,
@@ -212,8 +115,8 @@
                                       i::wasm::ModuleOrigin::kWasmOrigin);
 }
 
-bool BrandCheck(Isolate* isolate, i::Handle<i::Object> value,
-                i::Handle<i::Symbol> sym, const char* msg) {
+static bool BrandCheck(Isolate* isolate, i::Handle<i::Object> value,
+                       i::Handle<i::Symbol> sym, const char* msg) {
   if (value->IsJSObject()) {
     i::Handle<i::JSObject> object = i::Handle<i::JSObject>::cast(value);
     Maybe<bool> has_brand = i::JSObject::HasOwnProperty(object, sym);
@@ -308,14 +211,8 @@
   }
 
   Local<Object> obj = Local<Object>::Cast(args[0]);
-
-  i::Handle<i::JSObject> module_obj =
+  i::Handle<i::JSObject> i_obj =
       i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj));
-  if (module_obj->GetInternalFieldCount() < 1 ||
-      !module_obj->GetInternalField(0)->IsFixedArray()) {
-    thrower.TypeError("Argument 0 is an invalid WebAssembly.Module");
-    return;
-  }
 
   i::Handle<i::JSReceiver> ffi = i::Handle<i::JSObject>::null();
   if (args.Length() > 1 && args[1]->IsObject()) {
@@ -324,17 +221,24 @@
   }
 
   i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
-  if (args.Length() > 2 && args[2]->IsArrayBuffer()) {
+  if (args.Length() > 2 && args[2]->IsObject()) {
     Local<Object> obj = Local<Object>::Cast(args[2]);
     i::Handle<i::Object> mem_obj = v8::Utils::OpenHandle(*obj);
-    memory = i::Handle<i::JSArrayBuffer>(i::JSArrayBuffer::cast(*mem_obj));
+    if (i::WasmJs::IsWasmMemoryObject(i_isolate, mem_obj)) {
+      memory = i::Handle<i::JSArrayBuffer>(
+          i::Handle<i::WasmMemoryObject>::cast(mem_obj)->get_buffer(),
+          i_isolate);
+    } else {
+      thrower.TypeError("Argument 2 must be a WebAssembly.Memory");
+    }
   }
-  i::MaybeHandle<i::JSObject> instance = i::wasm::WasmModule::Instantiate(
-      i_isolate, &thrower, module_obj, ffi, memory);
+  i::MaybeHandle<i::JSObject> instance =
+      i::wasm::WasmModule::Instantiate(i_isolate, &thrower, i_obj, ffi, memory);
   if (instance.is_null()) {
-    if (!thrower.error()) thrower.Error("Could not instantiate module");
+    if (!thrower.error()) thrower.RuntimeError("Could not instantiate module");
     return;
   }
+  DCHECK(!i_isolate->has_pending_exception());
   v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
   return_value.Set(Utils::ToLocal(instance.ToHandleChecked()));
 }
@@ -366,6 +270,8 @@
   return false;
 }
 
+const int max_table_size = 1 << 26;
+
 void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
   v8::Isolate* isolate = args.GetIsolate();
   HandleScope scope(isolate);
@@ -392,7 +298,6 @@
       return;
     }
   }
-  const int max_table_size = 1 << 26;
   // The descriptor's 'initial'.
   int initial;
   if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
@@ -414,24 +319,14 @@
                             &maximum, initial, max_table_size)) {
       return;
     }
+  } else {
+    maximum = static_cast<int>(i::wasm::WasmModule::kV8MaxTableSize);
   }
 
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  i::Handle<i::JSFunction> table_ctor(
-      i_isolate->native_context()->wasm_table_constructor());
+  i::Handle<i::FixedArray> fixed_array;
   i::Handle<i::JSObject> table_obj =
-      i_isolate->factory()->NewJSObject(table_ctor);
-  i::Handle<i::FixedArray> fixed_array =
-      i_isolate->factory()->NewFixedArray(initial);
-  i::Object* null = i_isolate->heap()->null_value();
-  for (int i = 0; i < initial; ++i) fixed_array->set(i, null);
-  table_obj->SetInternalField(0, *fixed_array);
-  table_obj->SetInternalField(
-      1, has_maximum.FromJust()
-             ? static_cast<i::Object*>(i::Smi::FromInt(maximum))
-             : static_cast<i::Object*>(i_isolate->heap()->undefined_value()));
-  i::Handle<i::Symbol> table_sym(i_isolate->native_context()->wasm_table_sym());
-  i::Object::SetProperty(table_obj, table_sym, table_obj, i::STRICT).Check();
+      i::WasmTableObject::New(i_isolate, initial, maximum, &fixed_array);
   v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
   return_value.Set(Utils::ToLocal(table_obj));
 }
@@ -442,7 +337,7 @@
   ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
                        "WebAssembly.Module()");
   if (args.Length() < 1 || !args[0]->IsObject()) {
-    thrower.TypeError("Argument 0 must be a table descriptor");
+    thrower.TypeError("Argument 0 must be a memory descriptor");
     return;
   }
   Local<Context> context = isolate->GetCurrentContext();
@@ -475,27 +370,196 @@
                 static_cast<size_t>(initial);
   i::JSArrayBuffer::SetupAllocatingData(buffer, i_isolate, size);
 
-  i::Handle<i::JSObject> memory_obj = i::WasmJs::CreateWasmMemoryObject(
-      i_isolate, buffer, has_maximum.FromJust(), maximum);
-  v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
-  return_value.Set(Utils::ToLocal(memory_obj));
+  i::Handle<i::JSObject> memory_obj = i::WasmMemoryObject::New(
+      i_isolate, buffer, has_maximum.FromJust() ? maximum : -1);
+  args.GetReturnValue().Set(Utils::ToLocal(memory_obj));
 }
+
 void WebAssemblyTableGetLength(
     const v8::FunctionCallbackInfo<v8::Value>& args) {
-  // TODO(rossberg)
+  v8::Isolate* isolate = args.GetIsolate();
+  Local<Context> context = isolate->GetCurrentContext();
+  i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+  if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
+                  i::Handle<i::Symbol>(i_context->wasm_table_sym()),
+                  "Receiver is not a WebAssembly.Table")) {
+    return;
+  }
+  auto receiver =
+      i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
+  args.GetReturnValue().Set(
+      v8::Number::New(isolate, receiver->current_length()));
 }
+
 void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  // TODO(rossberg)
+  v8::Isolate* isolate = args.GetIsolate();
+  Local<Context> context = isolate->GetCurrentContext();
+  i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+  if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
+                  i::Handle<i::Symbol>(i_context->wasm_table_sym()),
+                  "Receiver is not a WebAssembly.Table")) {
+    return;
+  }
+
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  auto receiver =
+      i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
+  i::Handle<i::FixedArray> old_array(receiver->get_functions(), i_isolate);
+  int old_size = old_array->length();
+  int64_t new_size64 = 0;
+  if (args.Length() > 0 && !args[0]->IntegerValue(context).To(&new_size64)) {
+    return;
+  }
+  new_size64 += old_size;
+
+  if (new_size64 < old_size || new_size64 > receiver->maximum_length()) {
+    v8::Local<v8::Value> e = v8::Exception::RangeError(
+        v8_str(isolate, new_size64 < old_size ? "trying to shrink table"
+                                              : "maximum table size exceeded"));
+    isolate->ThrowException(e);
+    return;
+  }
+  int new_size = static_cast<int>(new_size64);
+
+  if (new_size != old_size) {
+    i::Handle<i::FixedArray> new_array =
+        i_isolate->factory()->NewFixedArray(new_size);
+    for (int i = 0; i < old_size; ++i) new_array->set(i, old_array->get(i));
+    i::Object* null = i_isolate->heap()->null_value();
+    for (int i = old_size; i < new_size; ++i) new_array->set(i, null);
+    receiver->set_functions(*new_array);
+  }
+
+  // TODO(titzer): update relevant instances.
 }
+
 void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  // TODO(rossberg)
+  v8::Isolate* isolate = args.GetIsolate();
+  Local<Context> context = isolate->GetCurrentContext();
+  i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+  if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
+                  i::Handle<i::Symbol>(i_context->wasm_table_sym()),
+                  "Receiver is not a WebAssembly.Table")) {
+    return;
+  }
+
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  auto receiver =
+      i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
+  i::Handle<i::FixedArray> array(receiver->get_functions(), i_isolate);
+  int i = 0;
+  if (args.Length() > 0 && !args[0]->Int32Value(context).To(&i)) return;
+  v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+  if (i < 0 || i >= array->length()) {
+    v8::Local<v8::Value> e =
+        v8::Exception::RangeError(v8_str(isolate, "index out of bounds"));
+    isolate->ThrowException(e);
+    return;
+  }
+
+  i::Handle<i::Object> value(array->get(i), i_isolate);
+  return_value.Set(Utils::ToLocal(value));
 }
+
 void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  // TODO(rossberg)
+  v8::Isolate* isolate = args.GetIsolate();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  Local<Context> context = isolate->GetCurrentContext();
+  i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+  if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
+                  i::Handle<i::Symbol>(i_context->wasm_table_sym()),
+                  "Receiver is not a WebAssembly.Table")) {
+    return;
+  }
+  if (args.Length() < 2) {
+    v8::Local<v8::Value> e = v8::Exception::TypeError(
+        v8_str(isolate, "Argument 1 must be null or a function"));
+    isolate->ThrowException(e);
+    return;
+  }
+  i::Handle<i::Object> value = Utils::OpenHandle(*args[1]);
+  if (!value->IsNull(i_isolate) &&
+      (!value->IsJSFunction() ||
+       i::Handle<i::JSFunction>::cast(value)->code()->kind() !=
+           i::Code::JS_TO_WASM_FUNCTION)) {
+    v8::Local<v8::Value> e = v8::Exception::TypeError(
+        v8_str(isolate, "Argument 1 must be null or a WebAssembly function"));
+    isolate->ThrowException(e);
+    return;
+  }
+
+  auto receiver =
+      i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
+  i::Handle<i::FixedArray> array(receiver->get_functions(), i_isolate);
+  int i;
+  if (!args[0]->Int32Value(context).To(&i)) return;
+  if (i < 0 || i >= array->length()) {
+    v8::Local<v8::Value> e =
+        v8::Exception::RangeError(v8_str(isolate, "index out of bounds"));
+    isolate->ThrowException(e);
+    return;
+  }
+
+  i::Handle<i::FixedArray> dispatch_tables(receiver->get_dispatch_tables(),
+                                           i_isolate);
+  if (value->IsNull(i_isolate)) {
+    i::wasm::UpdateDispatchTables(i_isolate, dispatch_tables, i,
+                                  i::Handle<i::JSFunction>::null());
+  } else {
+    i::wasm::UpdateDispatchTables(i_isolate, dispatch_tables, i,
+                                  i::Handle<i::JSFunction>::cast(value));
+  }
+
+  i::Handle<i::FixedArray>::cast(array)->set(i, *value);
 }
+
 void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  // TODO(rossberg)
+  v8::Isolate* isolate = args.GetIsolate();
+  Local<Context> context = isolate->GetCurrentContext();
+  i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+  if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
+                  i::Handle<i::Symbol>(i_context->wasm_memory_sym()),
+                  "Receiver is not a WebAssembly.Memory")) {
+    return;
+  }
+  if (args.Length() < 1) {
+    v8::Local<v8::Value> e = v8::Exception::TypeError(
+        v8_str(isolate, "Argument 0 required, must be numeric value of pages"));
+    isolate->ThrowException(e);
+    return;
+  }
+
+  uint32_t delta = args[0]->Uint32Value(context).FromJust();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  i::Handle<i::JSObject> receiver =
+      i::Handle<i::JSObject>::cast(Utils::OpenHandle(*args.This()));
+  i::Handle<i::Object> instance_object(
+      receiver->GetInternalField(kWasmMemoryInstanceObject), i_isolate);
+  i::Handle<i::JSObject> instance(
+      i::Handle<i::JSObject>::cast(instance_object));
+
+  // TODO(gdeepti) Implement growing memory when shared by different
+  // instances.
+  int32_t ret = internal::wasm::GrowInstanceMemory(i_isolate, instance, delta);
+  if (ret == -1) {
+    v8::Local<v8::Value> e = v8::Exception::Error(
+        v8_str(isolate, "Unable to grow instance memory."));
+    isolate->ThrowException(e);
+    return;
+  }
+  i::MaybeHandle<i::JSArrayBuffer> buffer =
+      internal::wasm::GetInstanceMemory(i_isolate, instance);
+  if (buffer.is_null()) {
+    v8::Local<v8::Value> e = v8::Exception::Error(
+        v8_str(isolate, "WebAssembly.Memory buffer object not set."));
+    isolate->ThrowException(e);
+    return;
+  }
+  receiver->SetInternalField(kWasmMemoryBuffer, *buffer.ToHandleChecked());
+  v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+  return_value.Set(ret);
 }
+
 void WebAssemblyMemoryGetBuffer(
     const v8::FunctionCallbackInfo<v8::Value>& args) {
   v8::Isolate* isolate = args.GetIsolate();
@@ -509,31 +573,14 @@
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   i::Handle<i::JSObject> receiver =
       i::Handle<i::JSObject>::cast(Utils::OpenHandle(*args.This()));
-  i::Handle<i::Object> buffer(receiver->GetInternalField(0), i_isolate);
+  i::Handle<i::Object> buffer(receiver->GetInternalField(kWasmMemoryBuffer),
+                              i_isolate);
   DCHECK(buffer->IsJSArrayBuffer());
   v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
   return_value.Set(Utils::ToLocal(buffer));
 }
 }  // namespace
 
-i::Handle<i::JSObject> i::WasmJs::CreateWasmMemoryObject(
-    i::Isolate* i_isolate, i::Handle<i::JSArrayBuffer> buffer, bool has_maximum,
-    int maximum) {
-  i::Handle<i::JSFunction> memory_ctor(
-      i_isolate->native_context()->wasm_memory_constructor());
-  i::Handle<i::JSObject> memory_obj =
-      i_isolate->factory()->NewJSObject(memory_ctor);
-  memory_obj->SetInternalField(0, *buffer);
-  memory_obj->SetInternalField(
-      1, has_maximum
-             ? static_cast<i::Object*>(i::Smi::FromInt(maximum))
-             : static_cast<i::Object*>(i_isolate->heap()->undefined_value()));
-  i::Handle<i::Symbol> memory_sym(
-      i_isolate->native_context()->wasm_memory_sym());
-  i::Object::SetProperty(memory_obj, memory_sym, memory_obj, i::STRICT).Check();
-  return memory_obj;
-}
-
 // TODO(titzer): we use the API to create the function template because the
 // internal guts are too ugly to replicate here.
 static i::Handle<i::FunctionTemplateInfo> NewTemplate(i::Isolate* i_isolate,
@@ -605,41 +652,43 @@
   JSFunction::SetInstancePrototype(
       cons, Handle<Object>(context->initial_object_prototype(), isolate));
   cons->shared()->set_instance_class_name(*name);
-  Handle<JSObject> wasm_object = factory->NewJSObject(cons, TENURED);
+  Handle<JSObject> webassembly = factory->NewJSObject(cons, TENURED);
   PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
-  JSObject::AddProperty(global, name, wasm_object, attributes);
+  JSObject::AddProperty(global, name, webassembly, attributes);
 
   // Setup compile
-  InstallFunc(isolate, wasm_object, "compile", WebAssemblyCompile);
+  InstallFunc(isolate, webassembly, "compile", WebAssemblyCompile);
 
   // Setup compile
-  InstallFunc(isolate, wasm_object, "validate", WebAssemblyValidate);
+  InstallFunc(isolate, webassembly, "validate", WebAssemblyValidate);
 
   // Setup Module
   Handle<JSFunction> module_constructor =
-      InstallFunc(isolate, wasm_object, "Module", WebAssemblyModule);
+      InstallFunc(isolate, webassembly, "Module", WebAssemblyModule);
   context->set_wasm_module_constructor(*module_constructor);
   Handle<JSObject> module_proto =
       factory->NewJSObject(module_constructor, TENURED);
   i::Handle<i::Map> map = isolate->factory()->NewMap(
-      i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize + i::kPointerSize);
+      i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize +
+                             WasmModuleObject::kFieldCount * i::kPointerSize);
   JSFunction::SetInitialMap(module_constructor, map, module_proto);
   JSObject::AddProperty(module_proto, isolate->factory()->constructor_string(),
                         module_constructor, DONT_ENUM);
 
   // Setup Instance
   Handle<JSFunction> instance_constructor =
-      InstallFunc(isolate, wasm_object, "Instance", WebAssemblyInstance);
+      InstallFunc(isolate, webassembly, "Instance", WebAssemblyInstance);
   context->set_wasm_instance_constructor(*instance_constructor);
 
   // Setup Table
   Handle<JSFunction> table_constructor =
-      InstallFunc(isolate, wasm_object, "Table", WebAssemblyTable);
+      InstallFunc(isolate, webassembly, "Table", WebAssemblyTable);
   context->set_wasm_table_constructor(*table_constructor);
   Handle<JSObject> table_proto =
       factory->NewJSObject(table_constructor, TENURED);
   map = isolate->factory()->NewMap(
-      i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize + 2 * i::kPointerSize);
+      i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize +
+                             WasmTableObject::kFieldCount * i::kPointerSize);
   JSFunction::SetInitialMap(table_constructor, map, table_proto);
   JSObject::AddProperty(table_proto, isolate->factory()->constructor_string(),
                         table_constructor, DONT_ENUM);
@@ -650,17 +699,29 @@
 
   // Setup Memory
   Handle<JSFunction> memory_constructor =
-      InstallFunc(isolate, wasm_object, "Memory", WebAssemblyMemory);
+      InstallFunc(isolate, webassembly, "Memory", WebAssemblyMemory);
   context->set_wasm_memory_constructor(*memory_constructor);
   Handle<JSObject> memory_proto =
       factory->NewJSObject(memory_constructor, TENURED);
   map = isolate->factory()->NewMap(
-      i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize + 2 * i::kPointerSize);
+      i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize +
+                             WasmMemoryObject::kFieldCount * i::kPointerSize);
   JSFunction::SetInitialMap(memory_constructor, map, memory_proto);
   JSObject::AddProperty(memory_proto, isolate->factory()->constructor_string(),
                         memory_constructor, DONT_ENUM);
   InstallFunc(isolate, memory_proto, "grow", WebAssemblyMemoryGrow);
   InstallGetter(isolate, memory_proto, "buffer", WebAssemblyMemoryGetBuffer);
+
+  // Setup errors
+  attributes = static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
+  Handle<JSFunction> compile_error(
+      isolate->native_context()->wasm_compile_error_function());
+  JSObject::AddProperty(webassembly, isolate->factory()->CompileError_string(),
+                        compile_error, attributes);
+  Handle<JSFunction> runtime_error(
+      isolate->native_context()->wasm_runtime_error_function());
+  JSObject::AddProperty(webassembly, isolate->factory()->RuntimeError_string(),
+                        runtime_error, attributes);
 }
 
 void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
@@ -668,44 +729,13 @@
     return;
   }
 
-  Factory* factory = isolate->factory();
-
   // Setup wasm function map.
   Handle<Context> context(global->native_context(), isolate);
   InstallWasmMapsIfNeeded(isolate, context);
 
-  if (!FLAG_expose_wasm) {
-    return;
+  if (FLAG_expose_wasm) {
+    InstallWasmConstructors(isolate, global, context);
   }
-
-  // Bind the experimental WASM object.
-  // TODO(rossberg, titzer): remove once it's no longer needed.
-  {
-    Handle<String> name = v8_str(isolate, "Wasm");
-    Handle<JSFunction> cons = factory->NewFunction(name);
-    JSFunction::SetInstancePrototype(
-        cons, Handle<Object>(context->initial_object_prototype(), isolate));
-    cons->shared()->set_instance_class_name(*name);
-    Handle<JSObject> wasm_object = factory->NewJSObject(cons, TENURED);
-    PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
-    JSObject::AddProperty(global, name, wasm_object, attributes);
-
-    // Install functions on the WASM object.
-    InstallFunc(isolate, wasm_object, "verifyModule", VerifyModule);
-    InstallFunc(isolate, wasm_object, "verifyFunction", VerifyFunction);
-    InstallFunc(isolate, wasm_object, "instantiateModule", InstantiateModule);
-
-    {
-      // Add the Wasm.experimentalVersion property.
-      Handle<String> name = v8_str(isolate, "experimentalVersion");
-      PropertyAttributes attributes =
-          static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
-      Handle<Smi> value =
-          Handle<Smi>(Smi::FromInt(wasm::kWasmVersion), isolate);
-      JSObject::AddProperty(wasm_object, name, value, attributes);
-    }
-  }
-  InstallWasmConstructors(isolate, global, context);
 }
 
 void WasmJs::InstallWasmMapsIfNeeded(Isolate* isolate,
@@ -737,5 +767,24 @@
   }
 }
 
+static bool HasBrand(i::Handle<i::Object> value, i::Handle<i::Symbol> symbol) {
+  if (value->IsJSObject()) {
+    i::Handle<i::JSObject> object = i::Handle<i::JSObject>::cast(value);
+    Maybe<bool> has_brand = i::JSObject::HasOwnProperty(object, symbol);
+    if (has_brand.IsNothing()) return false;
+    if (has_brand.ToChecked()) return true;
+  }
+  return false;
+}
+
+bool WasmJs::IsWasmMemoryObject(Isolate* isolate, Handle<Object> value) {
+  i::Handle<i::Symbol> symbol(isolate->context()->wasm_memory_sym(), isolate);
+  return HasBrand(value, symbol);
+}
+
+bool WasmJs::IsWasmTableObject(Isolate* isolate, Handle<Object> value) {
+  i::Handle<i::Symbol> symbol(isolate->context()->wasm_table_sym(), isolate);
+  return HasBrand(value, symbol);
+}
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/wasm-js.h b/src/wasm/wasm-js.h
index 4f26494..f5b9596 100644
--- a/src/wasm/wasm-js.h
+++ b/src/wasm/wasm-js.h
@@ -24,9 +24,11 @@
                                       Handle<JSGlobalObject> global,
                                       Handle<Context> context);
 
-  static Handle<JSObject> CreateWasmMemoryObject(Isolate* isolate,
-                                                 Handle<JSArrayBuffer> buffer,
-                                                 bool has_maximum, int maximum);
+  // WebAssembly.Table.
+  static bool IsWasmTableObject(Isolate* isolate, Handle<Object> value);
+
+  // WebAssembly.Memory
+  static bool IsWasmMemoryObject(Isolate* isolate, Handle<Object> value);
 };
 
 }  // namespace internal
diff --git a/src/wasm/wasm-macro-gen.h b/src/wasm/wasm-macro-gen.h
index fd10a39..ce2f843 100644
--- a/src/wasm/wasm-macro-gen.h
+++ b/src/wasm/wasm-macro-gen.h
@@ -420,21 +420,23 @@
 #define WASM_CALL_FUNCTION(index, ...) \
   __VA_ARGS__, kExprCallFunction, static_cast<byte>(index)
 
+#define TABLE_ZERO 0
+
 // TODO(titzer): change usages of these macros to put func last.
 #define WASM_CALL_INDIRECT0(index, func) \
-  func, kExprCallIndirect, static_cast<byte>(index)
+  func, kExprCallIndirect, static_cast<byte>(index), TABLE_ZERO
 #define WASM_CALL_INDIRECT1(index, func, a) \
-  a, func, kExprCallIndirect, static_cast<byte>(index)
+  a, func, kExprCallIndirect, static_cast<byte>(index), TABLE_ZERO
 #define WASM_CALL_INDIRECT2(index, func, a, b) \
-  a, b, func, kExprCallIndirect, static_cast<byte>(index)
+  a, b, func, kExprCallIndirect, static_cast<byte>(index), TABLE_ZERO
 #define WASM_CALL_INDIRECT3(index, func, a, b, c) \
-  a, b, c, func, kExprCallIndirect, static_cast<byte>(index)
+  a, b, c, func, kExprCallIndirect, static_cast<byte>(index), TABLE_ZERO
 #define WASM_CALL_INDIRECT4(index, func, a, b, c, d) \
-  a, b, c, d, func, kExprCallIndirect, static_cast<byte>(index)
+  a, b, c, d, func, kExprCallIndirect, static_cast<byte>(index), TABLE_ZERO
 #define WASM_CALL_INDIRECT5(index, func, a, b, c, d, e) \
-  a, b, c, d, e, func, kExprCallIndirect, static_cast<byte>(index)
+  a, b, c, d, e, func, kExprCallIndirect, static_cast<byte>(index), TABLE_ZERO
 #define WASM_CALL_INDIRECTN(arity, index, func, ...) \
-  __VA_ARGS__, func, kExprCallIndirect, static_cast<byte>(index)
+  __VA_ARGS__, func, kExprCallIndirect, static_cast<byte>(index), TABLE_ZERO
 
 #define WASM_NOT(x) x, kExprI32Eqz
 #define WASM_SEQ(...) __VA_ARGS__
@@ -493,6 +495,14 @@
 #define WASM_I32_EQZ(x) x, kExprI32Eqz
 
 //------------------------------------------------------------------------------
+// Asmjs Int32 operations
+//------------------------------------------------------------------------------
+#define WASM_I32_ASMJS_DIVS(x, y) x, y, kExprI32AsmjsDivS
+#define WASM_I32_ASMJS_REMS(x, y) x, y, kExprI32AsmjsRemS
+#define WASM_I32_ASMJS_DIVU(x, y) x, y, kExprI32AsmjsDivU
+#define WASM_I32_ASMJS_REMU(x, y) x, y, kExprI32AsmjsRemU
+
+//------------------------------------------------------------------------------
 // Int64 operations
 //------------------------------------------------------------------------------
 #define WASM_I64_ADD(x, y) x, y, kExprI64Add
@@ -605,8 +615,8 @@
 //------------------------------------------------------------------------------
 // Memory Operations.
 //------------------------------------------------------------------------------
-#define WASM_GROW_MEMORY(x) x, kExprGrowMemory
-#define WASM_MEMORY_SIZE kExprMemorySize
+#define WASM_GROW_MEMORY(x) x, kExprGrowMemory, 0
+#define WASM_MEMORY_SIZE kExprMemorySize, 0
 
 //------------------------------------------------------------------------------
 // Simd Operations.
@@ -614,6 +624,11 @@
 #define WASM_SIMD_I32x4_SPLAT(x) x, kSimdPrefix, kExprI32x4Splat & 0xff
 #define WASM_SIMD_I32x4_EXTRACT_LANE(lane, x) \
   x, kSimdPrefix, kExprI32x4ExtractLane & 0xff, static_cast<byte>(lane)
+#define WASM_SIMD_I32x4_ADD(x, y) x, y, kSimdPrefix, kExprI32x4Add & 0xff
+#define WASM_SIMD_F32x4_SPLAT(x) x, kSimdPrefix, kExprF32x4Splat & 0xff
+#define WASM_SIMD_F32x4_EXTRACT_LANE(lane, x) \
+  x, kSimdPrefix, kExprF32x4ExtractLane & 0xff, static_cast<byte>(lane)
+#define WASM_SIMD_F32x4_ADD(x, y) x, y, kSimdPrefix, kExprF32x4Add & 0xff
 
 #define SIG_ENTRY_v_v kWasmFunctionTypeForm, 0, 0
 #define SIZEOF_SIG_ENTRY_v_v 3
diff --git a/src/wasm/wasm-module-builder.cc b/src/wasm/wasm-module-builder.cc
index 084f5a0..290e98e 100644
--- a/src/wasm/wasm-module-builder.cc
+++ b/src/wasm/wasm-module-builder.cc
@@ -54,11 +54,13 @@
       func_index_(static_cast<uint32_t>(builder->functions_.size())),
       body_(builder->zone()),
       name_(builder->zone()),
+      exported_name_(builder->zone()),
       i32_temps_(builder->zone()),
       i64_temps_(builder->zone()),
       f32_temps_(builder->zone()),
       f64_temps_(builder->zone()),
-      direct_calls_(builder->zone()) {}
+      direct_calls_(builder->zone()),
+      asm_offsets_(builder->zone(), 8) {}
 
 void WasmFunctionBuilder::EmitVarInt(uint32_t val) {
   byte buffer[8];
@@ -139,15 +141,31 @@
   EmitCode(code, sizeof(code));
 }
 
-void WasmFunctionBuilder::SetExported() { exported_ = true; }
+void WasmFunctionBuilder::Export() { exported_ = true; }
 
-void WasmFunctionBuilder::SetName(const char* name, int name_length) {
-  name_.clear();
-  if (name_length > 0) {
-    for (int i = 0; i < name_length; ++i) {
-      name_.push_back(*(name + i));
-    }
-  }
+void WasmFunctionBuilder::ExportAs(Vector<const char> name) {
+  exported_ = true;
+  exported_name_.resize(name.length());
+  memcpy(exported_name_.data(), name.start(), name.length());
+}
+
+void WasmFunctionBuilder::SetName(Vector<const char> name) {
+  name_.resize(name.length());
+  memcpy(name_.data(), name.start(), name.length());
+}
+
+void WasmFunctionBuilder::AddAsmWasmOffset(int asm_position) {
+  // We only want to emit one mapping per byte offset:
+  DCHECK(asm_offsets_.size() == 0 || body_.size() > last_asm_byte_offset_);
+
+  DCHECK_LE(body_.size(), kMaxUInt32);
+  uint32_t byte_offset = static_cast<uint32_t>(body_.size());
+  asm_offsets_.write_u32v(byte_offset - last_asm_byte_offset_);
+  last_asm_byte_offset_ = byte_offset;
+
+  DCHECK_GE(asm_position, 0);
+  asm_offsets_.write_i32v(asm_position - last_asm_source_position_);
+  last_asm_source_position_ = asm_position;
 }
 
 void WasmFunctionBuilder::WriteSignature(ZoneBuffer& buffer) const {
@@ -156,10 +174,11 @@
 
 void WasmFunctionBuilder::WriteExport(ZoneBuffer& buffer) const {
   if (exported_) {
-    buffer.write_size(name_.size());
-    if (name_.size() > 0) {
-      buffer.write(reinterpret_cast<const byte*>(&name_[0]), name_.size());
-    }
+    const ZoneVector<char>* exported_name =
+        exported_name_.size() == 0 ? &name_ : &exported_name_;
+    buffer.write_size(exported_name->size());
+    buffer.write(reinterpret_cast<const byte*>(exported_name->data()),
+                 exported_name->size());
     buffer.write_u8(kExternalFunction);
     buffer.write_u32v(func_index_ +
                       static_cast<uint32_t>(builder_->imports_.size()));
@@ -184,6 +203,18 @@
   }
 }
 
+void WasmFunctionBuilder::WriteAsmWasmOffsetTable(ZoneBuffer& buffer) const {
+  if (asm_offsets_.size() == 0) {
+    buffer.write_size(0);
+    return;
+  }
+  buffer.write_size(asm_offsets_.size() + kInt32Size);
+  // Offset of the recorded byte offsets.
+  DCHECK_GE(kMaxUInt32, locals_.Size());
+  buffer.write_u32(static_cast<uint32_t>(locals_.Size()));
+  buffer.write(asm_offsets_.begin(), asm_offsets_.size());
+}
+
 WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
     : zone_(zone),
       signatures_(zone),
@@ -255,8 +286,9 @@
 }
 
 uint32_t WasmModuleBuilder::AddGlobal(LocalType type, bool exported,
-                                      bool mutability) {
-  globals_.push_back({type, exported, mutability});
+                                      bool mutability,
+                                      const WasmInitExpr& init) {
+  globals_.push_back({type, exported, mutability, init});
   return static_cast<uint32_t>(globals_.size() - 1);
 }
 
@@ -332,7 +364,7 @@
     buffer.write_u8(1);  // memory count
     buffer.write_u32v(kResizableMaximumFlag);
     buffer.write_u32v(16);  // min memory size
-    buffer.write_u32v(16);  // max memory size
+    buffer.write_u32v(32);  // max memory size
     FixupSection(buffer, start);
   }
 
@@ -344,29 +376,64 @@
     for (auto global : globals_) {
       buffer.write_u8(WasmOpcodes::LocalTypeCodeFor(global.type));
       buffer.write_u8(global.mutability ? 1 : 0);
-      switch (global.type) {
-        case kAstI32: {
-          static const byte code[] = {WASM_I32V_1(0)};
+      switch (global.init.kind) {
+        case WasmInitExpr::kI32Const: {
+          DCHECK_EQ(kAstI32, global.type);
+          const byte code[] = {WASM_I32V_5(global.init.val.i32_const)};
           buffer.write(code, sizeof(code));
           break;
         }
-        case kAstF32: {
-          static const byte code[] = {WASM_F32(0)};
+        case WasmInitExpr::kI64Const: {
+          DCHECK_EQ(kAstI64, global.type);
+          const byte code[] = {WASM_I64V_10(global.init.val.i64_const)};
           buffer.write(code, sizeof(code));
           break;
         }
-        case kAstI64: {
-          static const byte code[] = {WASM_I64V_1(0)};
+        case WasmInitExpr::kF32Const: {
+          DCHECK_EQ(kAstF32, global.type);
+          const byte code[] = {WASM_F32(global.init.val.f32_const)};
           buffer.write(code, sizeof(code));
           break;
         }
-        case kAstF64: {
-          static const byte code[] = {WASM_F64(0.0)};
+        case WasmInitExpr::kF64Const: {
+          DCHECK_EQ(kAstF64, global.type);
+          const byte code[] = {WASM_F64(global.init.val.f64_const)};
           buffer.write(code, sizeof(code));
           break;
         }
-        default:
-          UNREACHABLE();
+        case WasmInitExpr::kGlobalIndex: {
+          const byte code[] = {kExprGetGlobal,
+                               U32V_5(global.init.val.global_index)};
+          buffer.write(code, sizeof(code));
+          break;
+        }
+        default: {
+          // No initializer, emit a default value.
+          switch (global.type) {
+            case kAstI32: {
+              const byte code[] = {WASM_I32V_1(0)};
+              buffer.write(code, sizeof(code));
+              break;
+            }
+            case kAstI64: {
+              const byte code[] = {WASM_I64V_1(0)};
+              buffer.write(code, sizeof(code));
+              break;
+            }
+            case kAstF32: {
+              const byte code[] = {WASM_F32(0.0)};
+              buffer.write(code, sizeof(code));
+              break;
+            }
+            case kAstF64: {
+              const byte code[] = {WASM_F64(0.0)};
+              buffer.write(code, sizeof(code));
+              break;
+            }
+            default:
+              UNREACHABLE();
+          }
+        }
       }
       buffer.write_u8(kExprEnd);
     }
@@ -442,7 +509,12 @@
     buffer.write_size(4);
     buffer.write(reinterpret_cast<const byte*>("name"), 4);
     // Emit the names.
-    buffer.write_size(functions_.size());
+    size_t count = functions_.size() + imports_.size();
+    buffer.write_size(count);
+    for (size_t i = 0; i < imports_.size(); i++) {
+      buffer.write_u8(0);  // empty name for import
+      buffer.write_u8(0);  // no local variables
+    }
     for (auto function : functions_) {
       buffer.write_size(function->name_.size());
       if (function->name_.size() > 0) {
@@ -454,6 +526,15 @@
     FixupSection(buffer, start);
   }
 }
+
+void WasmModuleBuilder::WriteAsmJsOffsetTable(ZoneBuffer& buffer) const {
+  // == Emit asm.js offset table ===============================================
+  buffer.write_size(functions_.size());
+  // Emit the offset table per function.
+  for (auto function : functions_) {
+    function->WriteAsmWasmOffsetTable(buffer);
+  }
+}
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/wasm-module-builder.h b/src/wasm/wasm-module-builder.h
index dcaf6c8..d35313e 100644
--- a/src/wasm/wasm-module-builder.h
+++ b/src/wasm/wasm-module-builder.h
@@ -49,6 +49,11 @@
     LEBHelper::write_u32v(&pos_, val);
   }
 
+  void write_i32v(int32_t val) {
+    EnsureSpace(kMaxVarInt32Size);
+    LEBHelper::write_i32v(&pos_, val);
+  }
+
   void write_size(size_t val) {
     EnsureSpace(kMaxVarInt32Size);
     DCHECK_EQ(val, static_cast<uint32_t>(val));
@@ -83,10 +88,10 @@
     }
   }
 
-  size_t offset() { return static_cast<size_t>(pos_ - buffer_); }
-  size_t size() { return static_cast<size_t>(pos_ - buffer_); }
-  const byte* begin() { return buffer_; }
-  const byte* end() { return pos_; }
+  size_t offset() const { return static_cast<size_t>(pos_ - buffer_); }
+  size_t size() const { return static_cast<size_t>(pos_ - buffer_); }
+  const byte* begin() const { return buffer_; }
+  const byte* end() const { return pos_; }
 
   void EnsureSpace(size_t size) {
     if ((pos_ + size) > end_) {
@@ -127,12 +132,15 @@
   void EmitWithU8U8(WasmOpcode opcode, const byte imm1, const byte imm2);
   void EmitWithVarInt(WasmOpcode opcode, uint32_t immediate);
   void EmitDirectCallIndex(uint32_t index);
-  void SetExported();
-  void SetName(const char* name, int name_length);
+  void Export();
+  void ExportAs(Vector<const char> name);
+  void SetName(Vector<const char> name);
+  void AddAsmWasmOffset(int asm_position);
 
   void WriteSignature(ZoneBuffer& buffer) const;
   void WriteExport(ZoneBuffer& buffer) const;
   void WriteBody(ZoneBuffer& buffer) const;
+  void WriteAsmWasmOffsetTable(ZoneBuffer& buffer) const;
 
   bool exported() { return exported_; }
   uint32_t func_index() { return func_index_; }
@@ -155,11 +163,17 @@
   uint32_t func_index_;
   ZoneVector<uint8_t> body_;
   ZoneVector<char> name_;
+  ZoneVector<char> exported_name_;
   ZoneVector<uint32_t> i32_temps_;
   ZoneVector<uint32_t> i64_temps_;
   ZoneVector<uint32_t> f32_temps_;
   ZoneVector<uint32_t> f64_temps_;
   ZoneVector<DirectCallIndex> direct_calls_;
+
+  // Delta-encoded mapping from wasm bytes to asm.js source positions.
+  ZoneBuffer asm_offsets_;
+  uint32_t last_asm_byte_offset_ = 0;
+  uint32_t last_asm_source_position_ = 0;
 };
 
 class WasmTemporary {
@@ -212,7 +226,8 @@
     imports_[index].name_length = name_length;
   }
   WasmFunctionBuilder* AddFunction(FunctionSig* sig = nullptr);
-  uint32_t AddGlobal(LocalType type, bool exported, bool mutability = true);
+  uint32_t AddGlobal(LocalType type, bool exported, bool mutability = true,
+                     const WasmInitExpr& init = WasmInitExpr());
   void AddDataSegment(const byte* data, uint32_t size, uint32_t dest);
   uint32_t AddSignature(FunctionSig* sig);
   void AddIndirectFunction(uint32_t index);
@@ -220,7 +235,10 @@
 
   // Writing methods.
   void WriteTo(ZoneBuffer& buffer) const;
+  void WriteAsmJsOffsetTable(ZoneBuffer& buffer) const;
 
+  // TODO(titzer): use SignatureMap from signature-map.h here.
+  // This signature map is zone-allocated, but the other is heap allocated.
   struct CompareFunctionSigs {
     bool operator()(FunctionSig* a, FunctionSig* b) const;
   };
@@ -241,6 +259,7 @@
     LocalType type;
     bool exported;
     bool mutability;
+    WasmInitExpr init;
   };
 
   struct WasmDataSegment {
diff --git a/src/wasm/wasm-module.cc b/src/wasm/wasm-module.cc
index f4cf505..79b99fe 100644
--- a/src/wasm/wasm-module.cc
+++ b/src/wasm/wasm-module.cc
@@ -16,17 +16,16 @@
 
 #include "src/wasm/ast-decoder.h"
 #include "src/wasm/module-decoder.h"
-#include "src/wasm/wasm-debug.h"
-#include "src/wasm/wasm-function-name-table.h"
 #include "src/wasm/wasm-js.h"
 #include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
 #include "src/wasm/wasm-result.h"
 
 #include "src/compiler/wasm-compiler.h"
 
-namespace v8 {
-namespace internal {
-namespace wasm {
+using namespace v8::internal;
+using namespace v8::internal::wasm;
+namespace base = v8::base;
 
 #define TRACE(...)                                      \
   do {                                                  \
@@ -40,132 +39,39 @@
 
 namespace {
 
+static const int kInvalidSigIndex = -1;
 static const int kPlaceholderMarker = 1000000000;
 
-enum JSFunctionExportInternalField {
-  kInternalModuleInstance,
-  kInternalArity,
-  kInternalSignature
-};
-
-// Internal constants for the layout of the module object.
-enum WasmInstanceObjectFields {
-  kWasmCompiledModule = 0,
-  kWasmModuleFunctionTable,
-  kWasmModuleCodeTable,
-  kWasmMemArrayBuffer,
-  kWasmGlobalsArrayBuffer,
-  // TODO(clemensh): Remove function name array, extract names from module
-  // bytes.
-  kWasmFunctionNamesArray,
-  kWasmModuleBytesString,
-  kWasmDebugInfo,
-  kWasmNumImportedFunctions,
-  kWasmModuleInternalFieldCount
-};
-
-enum WasmImportData {
-  kModuleName,         // String
-  kFunctionName,       // maybe String
-  kOutputCount,        // Smi. an uint32_t
-  kSignature,          // ByteArray. A copy of the data in FunctionSig
-  kWasmImportDataSize  // Sentinel value.
-};
-
-enum WasmExportData {
-  kExportName,             // String
-  kExportArity,            // Smi, an int
-  kExportedFunctionIndex,  // Smi, an uint32_t
-  kExportedSignature,      // ByteArray. A copy of the data in FunctionSig
-  kWasmExportDataSize      // Sentinel value.
-};
-
-enum WasmSegmentInfo {
-  kDestAddr,            // Smi. an uint32_t
-  kSourceSize,          // Smi. an uint32_t
-  kWasmSegmentInfoSize  // Sentinel value.
-};
-
-enum WasmIndirectFunctionTableData {
-  kSize,                              // Smi. an uint32_t
-  kTable,                             // FixedArray of indirect function table
-  kWasmIndirectFunctionTableDataSize  // Sentinel value.
-};
-
-uint32_t GetMinModuleMemSize(const WasmModule* module) {
-  return WasmModule::kPageSize * module->min_mem_pages;
+byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
+  return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
 }
 
-void LoadDataSegments(Handle<WasmCompiledModule> compiled_module,
-                      Address mem_addr, size_t mem_size) {
-  CHECK(compiled_module->has_data_segments() ==
-        compiled_module->has_data_segments_info());
-
-  // If we have neither, we're done.
-  if (!compiled_module->has_data_segments()) return;
-
-  Handle<ByteArray> data = compiled_module->data_segments();
-  Handle<FixedArray> segments = compiled_module->data_segments_info();
-
-  uint32_t last_extraction_pos = 0;
-  for (int i = 0; i < segments->length(); ++i) {
-    Handle<ByteArray> segment =
-        Handle<ByteArray>(ByteArray::cast(segments->get(i)));
-    uint32_t dest_addr = static_cast<uint32_t>(segment->get_int(kDestAddr));
-    uint32_t source_size = static_cast<uint32_t>(segment->get_int(kSourceSize));
-    CHECK_LT(dest_addr, mem_size);
-    CHECK_LE(source_size, mem_size);
-    CHECK_LE(dest_addr, mem_size - source_size);
-    byte* addr = mem_addr + dest_addr;
-    data->copy_out(last_extraction_pos, addr, source_size);
-    last_extraction_pos += source_size;
-  }
+MaybeHandle<String> ExtractStringFromModuleBytes(
+    Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+    uint32_t offset, uint32_t size) {
+  // TODO(wasm): cache strings from modules if it's a performance win.
+  Handle<SeqOneByteString> module_bytes = compiled_module->module_bytes();
+  DCHECK_GE(static_cast<size_t>(module_bytes->length()), offset);
+  DCHECK_GE(static_cast<size_t>(module_bytes->length() - offset), size);
+  Address raw = module_bytes->GetCharsAddress() + offset;
+  if (!unibrow::Utf8::Validate(reinterpret_cast<const byte*>(raw), size))
+    return {};  // UTF8 decoding error for name.
+  return isolate->factory()->NewStringFromUtf8SubString(
+      module_bytes, static_cast<int>(offset), static_cast<int>(size));
 }
 
-void SaveDataSegmentInfo(Factory* factory, const WasmModule* module,
-                         Handle<WasmCompiledModule> compiled_module) {
-  Handle<FixedArray> segments = factory->NewFixedArray(
-      static_cast<int>(module->data_segments.size()), TENURED);
-  uint32_t data_size = 0;
-  for (const WasmDataSegment& segment : module->data_segments) {
-    if (segment.source_size == 0) continue;
-    data_size += segment.source_size;
-  }
-  Handle<ByteArray> data = factory->NewByteArray(data_size, TENURED);
-
-  uint32_t last_insertion_pos = 0;
-  for (uint32_t i = 0; i < module->data_segments.size(); ++i) {
-    const WasmDataSegment& segment = module->data_segments[i];
-    if (segment.source_size == 0) continue;
-    Handle<ByteArray> js_segment =
-        factory->NewByteArray(kWasmSegmentInfoSize * sizeof(uint32_t), TENURED);
-    // TODO(titzer): add support for global offsets for dest_addr
-    CHECK_EQ(WasmInitExpr::kI32Const, segment.dest_addr.kind);
-    js_segment->set_int(kDestAddr, segment.dest_addr.val.i32_const);
-    js_segment->set_int(kSourceSize, segment.source_size);
-    segments->set(i, *js_segment);
-    data->copy_in(last_insertion_pos,
-                  module->module_start + segment.source_offset,
-                  segment.source_size);
-    last_insertion_pos += segment.source_size;
-  }
-  compiled_module->set_data_segments_info(segments);
-  compiled_module->set_data_segments(data);
-}
-
-void PatchFunctionTable(Handle<Code> code,
-                        Handle<FixedArray> old_indirect_table,
-                        Handle<FixedArray> new_indirect_table) {
+void ReplaceReferenceInCode(Handle<Code> code, Handle<Object> old_ref,
+                            Handle<Object> new_ref) {
   for (RelocIterator it(*code, 1 << RelocInfo::EMBEDDED_OBJECT); !it.done();
        it.next()) {
-    if (it.rinfo()->target_object() == *old_indirect_table) {
-      it.rinfo()->set_target_object(*new_indirect_table);
+    if (it.rinfo()->target_object() == *old_ref) {
+      it.rinfo()->set_target_object(*new_ref);
     }
   }
 }
 
 Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size) {
-  if (size > (WasmModule::kMaxMemPages * WasmModule::kPageSize)) {
+  if (size > (WasmModule::kV8MaxPages * WasmModule::kPageSize)) {
     // TODO(titzer): lift restriction on maximum memory allocated here.
     return Handle<JSArrayBuffer>::null();
   }
@@ -188,49 +94,30 @@
   return buffer;
 }
 
-void RelocateInstanceCode(Handle<JSObject> instance, Address old_start,
-                          Address start, uint32_t prev_size,
-                          uint32_t new_size) {
-  Handle<FixedArray> functions = Handle<FixedArray>(
-      FixedArray::cast(instance->GetInternalField(kWasmModuleCodeTable)));
-  for (int i = 0; i < functions->length(); ++i) {
-    Handle<Code> function = Handle<Code>(Code::cast(functions->get(i)));
+void RelocateMemoryReferencesInCode(Handle<FixedArray> code_table,
+                                    Address old_start, Address start,
+                                    uint32_t prev_size, uint32_t new_size) {
+  for (int i = 0; i < code_table->length(); ++i) {
+    DCHECK(code_table->get(i)->IsCode());
+    Handle<Code> code = Handle<Code>(Code::cast(code_table->get(i)));
     AllowDeferredHandleDereference embedding_raw_address;
     int mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE) |
                (1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
-    for (RelocIterator it(*function, mask); !it.done(); it.next()) {
+    for (RelocIterator it(*code, mask); !it.done(); it.next()) {
       it.rinfo()->update_wasm_memory_reference(old_start, start, prev_size,
                                                new_size);
     }
   }
 }
 
-// Allocate memory for a module instance as a new JSArrayBuffer.
-Handle<JSArrayBuffer> AllocateMemory(ErrorThrower* thrower, Isolate* isolate,
-                                     uint32_t min_mem_pages) {
-  if (min_mem_pages > WasmModule::kMaxMemPages) {
-    thrower->Error("Out of memory: wasm memory too large");
-    return Handle<JSArrayBuffer>::null();
-  }
-  Handle<JSArrayBuffer> mem_buffer =
-      NewArrayBuffer(isolate, min_mem_pages * WasmModule::kPageSize);
-
-  if (mem_buffer.is_null()) {
-    thrower->Error("Out of memory: wasm memory");
-  }
-  return mem_buffer;
-}
-
-void RelocateGlobals(Handle<JSObject> instance, Address old_start,
+void RelocateGlobals(Handle<FixedArray> code_table, Address old_start,
                      Address globals_start) {
-  Handle<FixedArray> functions = Handle<FixedArray>(
-      FixedArray::cast(instance->GetInternalField(kWasmModuleCodeTable)));
-  uint32_t function_count = static_cast<uint32_t>(functions->length());
-  for (uint32_t i = 0; i < function_count; ++i) {
-    Handle<Code> function = Handle<Code>(Code::cast(functions->get(i)));
+  for (int i = 0; i < code_table->length(); ++i) {
+    DCHECK(code_table->get(i)->IsCode());
+    Handle<Code> code = Handle<Code>(Code::cast(code_table->get(i)));
     AllowDeferredHandleDereference embedding_raw_address;
     int mask = 1 << RelocInfo::WASM_GLOBAL_REFERENCE;
-    for (RelocIterator it(*function, mask); !it.done(); it.next()) {
+    for (RelocIterator it(*code, mask); !it.done(); it.next()) {
       it.rinfo()->update_wasm_global_reference(old_start, globals_start);
     }
   }
@@ -240,8 +127,8 @@
                                Code::Kind kind) {
   // Create a placeholder code object and encode the corresponding index in
   // the {constant_pool_offset} field of the code object.
-  // TODO(titzer): placeholder code objects are somewhat dangerous.
-  static byte buffer[] = {0, 0, 0, 0, 0, 0, 0, 0};  // fake instructions.
+  // TODO(titzer): instead of placeholders, use a reloc_info mode.
+  static byte buffer[] = {0, 0, 0, 0};  // fake instructions.
   static CodeDesc desc = {
       buffer, arraysize(buffer), arraysize(buffer), 0, 0, nullptr, 0, nullptr};
   Handle<Code> code = factory->NewCode(desc, Code::KindField::encode(kind),
@@ -284,9 +171,9 @@
   return modified;
 }
 
-void FlushICache(Isolate* isolate, Handle<FixedArray> functions) {
-  for (int i = 0; i < functions->length(); ++i) {
-    Handle<Code> code = functions->GetValueChecked<Code>(isolate, i);
+void FlushICache(Isolate* isolate, Handle<FixedArray> code_table) {
+  for (int i = 0; i < code_table->length(); ++i) {
+    Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
     Assembler::FlushICache(isolate, code->instruction_start(),
                            code->instruction_size());
   }
@@ -365,190 +252,16 @@
 }
 
 Address GetGlobalStartAddressFromCodeTemplate(Object* undefined,
-                                              JSObject* owner) {
+                                              JSObject* object) {
+  auto instance = WasmInstanceObject::cast(object);
   Address old_address = nullptr;
-  Object* stored_value = owner->GetInternalField(kWasmGlobalsArrayBuffer);
-  if (stored_value != undefined) {
-    old_address = static_cast<Address>(
-        JSArrayBuffer::cast(stored_value)->backing_store());
+  if (instance->has_globals_buffer()) {
+    old_address =
+        static_cast<Address>(instance->get_globals_buffer()->backing_store());
   }
   return old_address;
 }
 
-Handle<FixedArray> GetImportsData(Factory* factory, const WasmModule* module) {
-  Handle<FixedArray> ret = factory->NewFixedArray(
-      static_cast<int>(module->import_table.size()), TENURED);
-  for (size_t i = 0; i < module->import_table.size(); ++i) {
-    const WasmImport& import = module->import_table[i];
-    if (import.kind != kExternalFunction) continue;
-    WasmName module_name = module->GetNameOrNull(import.module_name_offset,
-                                                 import.module_name_length);
-    WasmName function_name = module->GetNameOrNull(import.field_name_offset,
-                                                   import.field_name_length);
-
-    Handle<String> module_name_string =
-        factory->InternalizeUtf8String(module_name);
-    Handle<String> function_name_string =
-        function_name.is_empty()
-            ? Handle<String>::null()
-            : factory->InternalizeUtf8String(function_name);
-    FunctionSig* fsig = module->functions[import.index].sig;
-    Handle<ByteArray> sig = factory->NewByteArray(
-        static_cast<int>(fsig->parameter_count() + fsig->return_count()),
-        TENURED);
-    sig->copy_in(0, reinterpret_cast<const byte*>(fsig->raw_data()),
-                 sig->length());
-    Handle<FixedArray> encoded_import =
-        factory->NewFixedArray(kWasmImportDataSize, TENURED);
-    encoded_import->set(kModuleName, *module_name_string);
-    if (!function_name_string.is_null()) {
-      encoded_import->set(kFunctionName, *function_name_string);
-    }
-    encoded_import->set(kOutputCount,
-                        Smi::FromInt(static_cast<int>(fsig->return_count())));
-    encoded_import->set(kSignature, *sig);
-    ret->set(static_cast<int>(i), *encoded_import);
-  }
-  return ret;
-}
-
-static MaybeHandle<JSFunction> ReportFFIError(
-    ErrorThrower* thrower, const char* error, uint32_t index,
-    Handle<String> module_name, MaybeHandle<String> function_name) {
-  Handle<String> function_name_handle;
-  if (function_name.ToHandle(&function_name_handle)) {
-    thrower->Error("Import #%d module=\"%.*s\" function=\"%.*s\" error: %s",
-                   index, module_name->length(), module_name->ToCString().get(),
-                   function_name_handle->length(),
-                   function_name_handle->ToCString().get(), error);
-  } else {
-    thrower->Error("Import #%d module=\"%.*s\" error: %s", index,
-                   module_name->length(), module_name->ToCString().get(),
-                   error);
-  }
-  thrower->Error("Import ");
-  return MaybeHandle<JSFunction>();
-}
-
-static MaybeHandle<JSReceiver> LookupFunction(
-    ErrorThrower* thrower, Factory* factory, Handle<JSReceiver> ffi,
-    uint32_t index, Handle<String> module_name,
-    MaybeHandle<String> function_name) {
-  if (ffi.is_null()) {
-    return ReportFFIError(thrower, "FFI is not an object", index, module_name,
-                          function_name);
-  }
-
-  // Look up the module first.
-  MaybeHandle<Object> result = Object::GetProperty(ffi, module_name);
-  if (result.is_null()) {
-    return ReportFFIError(thrower, "module not found", index, module_name,
-                          function_name);
-  }
-
-  Handle<Object> module = result.ToHandleChecked();
-
-  if (!module->IsJSReceiver()) {
-    return ReportFFIError(thrower, "module is not an object or function", index,
-                          module_name, function_name);
-  }
-
-  Handle<Object> function;
-  if (!function_name.is_null()) {
-    // Look up the function in the module.
-    MaybeHandle<Object> result =
-        Object::GetProperty(module, function_name.ToHandleChecked());
-    if (result.is_null()) {
-      return ReportFFIError(thrower, "function not found", index, module_name,
-                            function_name);
-    }
-    function = result.ToHandleChecked();
-  } else {
-    // No function specified. Use the "default export".
-    function = module;
-  }
-
-  if (!function->IsCallable()) {
-    return ReportFFIError(thrower, "not a callable", index, module_name,
-                          function_name);
-  }
-
-  return Handle<JSReceiver>::cast(function);
-}
-
-Handle<Code> CompileImportWrapper(Isolate* isolate,
-                                  const Handle<JSReceiver> ffi, int index,
-                                  Handle<FixedArray> import_data,
-                                  ErrorThrower* thrower) {
-  Handle<FixedArray> data =
-      import_data->GetValueChecked<FixedArray>(isolate, index);
-  Handle<String> module_name =
-      data->GetValueChecked<String>(isolate, kModuleName);
-  MaybeHandle<String> function_name =
-      data->GetValue<String>(isolate, kFunctionName);
-
-  // TODO(mtrofin): this is an uint32_t, actually. We should rationalize
-  // it when we rationalize signed/unsigned stuff.
-  int ret_count = Smi::cast(data->get(kOutputCount))->value();
-  CHECK_GE(ret_count, 0);
-  Handle<ByteArray> sig_data =
-      data->GetValueChecked<ByteArray>(isolate, kSignature);
-  int sig_data_size = sig_data->length();
-  int param_count = sig_data_size - ret_count;
-  CHECK(param_count >= 0);
-
-  MaybeHandle<JSReceiver> function = LookupFunction(
-      thrower, isolate->factory(), ffi, index, module_name, function_name);
-  if (function.is_null()) return Handle<Code>::null();
-  Handle<Code> code;
-  Handle<JSReceiver> target = function.ToHandleChecked();
-  bool isMatch = false;
-  Handle<Code> export_wrapper_code;
-  if (target->IsJSFunction()) {
-    Handle<JSFunction> func = Handle<JSFunction>::cast(target);
-    export_wrapper_code = handle(func->code());
-    if (export_wrapper_code->kind() == Code::JS_TO_WASM_FUNCTION) {
-      int exported_param_count =
-          Smi::cast(func->GetInternalField(kInternalArity))->value();
-      Handle<ByteArray> exportedSig = Handle<ByteArray>(
-          ByteArray::cast(func->GetInternalField(kInternalSignature)));
-      if (exported_param_count == param_count &&
-          exportedSig->length() == sig_data->length() &&
-          memcmp(exportedSig->data(), sig_data->data(),
-                 exportedSig->length()) == 0) {
-        isMatch = true;
-      }
-    }
-  }
-  if (isMatch) {
-    int wasm_count = 0;
-    int const mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
-    for (RelocIterator it(*export_wrapper_code, mask); !it.done(); it.next()) {
-      RelocInfo* rinfo = it.rinfo();
-      Address target_address = rinfo->target_address();
-      Code* target = Code::GetCodeFromTargetAddress(target_address);
-      if (target->kind() == Code::WASM_FUNCTION) {
-        ++wasm_count;
-        code = handle(target);
-      }
-    }
-    DCHECK(wasm_count == 1);
-    return code;
-  } else {
-    // Copy the signature to avoid a raw pointer into a heap object when
-    // GC can happen.
-    Zone zone(isolate->allocator());
-    MachineRepresentation* reps =
-        zone.NewArray<MachineRepresentation>(sig_data_size);
-    memcpy(reps, sig_data->data(),
-           sizeof(MachineRepresentation) * sig_data_size);
-    FunctionSig sig(ret_count, param_count, reps);
-
-    return compiler::CompileWasmToJSWrapper(isolate, target, &sig, index,
-                                            module_name, function_name);
-  }
-}
-
 void InitializeParallelCompilation(
     Isolate* isolate, const std::vector<WasmFunction>& functions,
     std::vector<compiler::WasmCompilationUnit*>& compilation_units,
@@ -590,7 +303,8 @@
   for (size_t i = 0; i < num_tasks; ++i) {
     // If the task has not started yet, then we abort it. Otherwise we wait for
     // it to finish.
-    if (!isolate->cancelable_task_manager()->TryAbort(task_ids[i])) {
+    if (isolate->cancelable_task_manager()->TryAbort(task_ids[i]) !=
+        CancelableTaskManager::kTaskAborted) {
       pending_tasks->Wait();
     }
   }
@@ -695,8 +409,8 @@
     code = compiler::WasmCompilationUnit::CompileWasmFunction(
         thrower, isolate, module_env, &func);
     if (code.is_null()) {
-      thrower->Error("Compilation of #%d:%.*s failed.", i, str.length(),
-                     str.start());
+      thrower->CompileError("Compilation of #%d:%.*s failed.", i, str.length(),
+                            str.start());
       break;
     }
       // Install the code into the linker table.
@@ -736,19 +450,26 @@
   }
 }
 
-static void ResetCompiledModule(Isolate* isolate, JSObject* owner,
+static void ResetCompiledModule(Isolate* isolate, WasmInstanceObject* owner,
                                 WasmCompiledModule* compiled_module) {
   TRACE("Resetting %d\n", compiled_module->instance_id());
   Object* undefined = *isolate->factory()->undefined_value();
-  uint32_t old_mem_size = compiled_module->has_heap()
-                              ? compiled_module->mem_size()
-                              : compiled_module->default_mem_size();
+  uint32_t old_mem_size = compiled_module->mem_size();
   uint32_t default_mem_size = compiled_module->default_mem_size();
-  Object* mem_start = compiled_module->ptr_to_heap();
+  Object* mem_start = compiled_module->ptr_to_memory();
   Address old_mem_address = nullptr;
   Address globals_start =
       GetGlobalStartAddressFromCodeTemplate(undefined, owner);
 
+  // Reset function tables.
+  FixedArray* function_tables = nullptr;
+  FixedArray* empty_function_tables = nullptr;
+  if (compiled_module->has_function_tables()) {
+    function_tables = compiled_module->ptr_to_function_tables();
+    empty_function_tables = compiled_module->ptr_to_empty_function_tables();
+    compiled_module->set_ptr_to_function_tables(empty_function_tables);
+  }
+
   if (old_mem_size > 0) {
     CHECK_NE(mem_start, undefined);
     old_mem_address =
@@ -756,11 +477,14 @@
   }
   int mode_mask = RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_REFERENCE) |
                   RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_SIZE_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::WASM_GLOBAL_REFERENCE);
+                  RelocInfo::ModeMask(RelocInfo::WASM_GLOBAL_REFERENCE) |
+                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
 
+  // Patch code to update memory references, global references, and function
+  // table references.
   Object* fct_obj = compiled_module->ptr_to_code_table();
   if (fct_obj != nullptr && fct_obj != undefined &&
-      (old_mem_size > 0 || globals_start != nullptr)) {
+      (old_mem_size > 0 || globals_start != nullptr || function_tables)) {
     FixedArray* functions = FixedArray::cast(fct_obj);
     for (int i = 0; i < functions->length(); ++i) {
       Code* code = Code::cast(functions->get(i));
@@ -772,10 +496,17 @@
           it.rinfo()->update_wasm_memory_reference(
               old_mem_address, nullptr, old_mem_size, default_mem_size);
           changed = true;
-        } else {
-          CHECK(RelocInfo::IsWasmGlobalReference(mode));
+        } else if (RelocInfo::IsWasmGlobalReference(mode)) {
           it.rinfo()->update_wasm_global_reference(globals_start, nullptr);
           changed = true;
+        } else if (RelocInfo::IsEmbeddedObject(mode) && function_tables) {
+          Object* old = it.rinfo()->target_object();
+          for (int j = 0; j < function_tables->length(); ++j) {
+            if (function_tables->get(j) == old) {
+              it.rinfo()->set_target_object(empty_function_tables->get(j));
+              changed = true;
+            }
+          }
         }
       }
       if (changed) {
@@ -784,26 +515,25 @@
       }
     }
   }
-  compiled_module->reset_heap();
+  compiled_module->reset_memory();
 }
 
 static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
   JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
-  JSObject* owner = *p;
-  WasmCompiledModule* compiled_module =
-      WasmCompiledModule::cast(owner->GetInternalField(kWasmCompiledModule));
+  WasmInstanceObject* owner = reinterpret_cast<WasmInstanceObject*>(*p);
+  WasmCompiledModule* compiled_module = owner->get_compiled_module();
   TRACE("Finalizing %d {\n", compiled_module->instance_id());
   Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
-  DCHECK(compiled_module->has_weak_module_object());
-  WeakCell* weak_module_obj = compiled_module->ptr_to_weak_module_object();
+  DCHECK(compiled_module->has_weak_wasm_module());
+  WeakCell* weak_wasm_module = compiled_module->ptr_to_weak_wasm_module();
 
-  // weak_module_obj may have been cleared, meaning the module object
+  // weak_wasm_module may have been cleared, meaning the module object
   // was GC-ed. In that case, there won't be any new instances created,
   // and we don't need to maintain the links between instances.
-  if (!weak_module_obj->cleared()) {
-    JSObject* module_obj = JSObject::cast(weak_module_obj->value());
+  if (!weak_wasm_module->cleared()) {
+    JSObject* wasm_module = JSObject::cast(weak_wasm_module->value());
     WasmCompiledModule* current_template =
-        WasmCompiledModule::cast(module_obj->GetInternalField(0));
+        WasmCompiledModule::cast(wasm_module->GetInternalField(0));
 
     TRACE("chain before {\n");
     TRACE_CHAIN(current_template);
@@ -818,7 +548,7 @@
         ResetCompiledModule(isolate, owner, compiled_module);
       } else {
         DCHECK(next->value()->IsFixedArray());
-        module_obj->SetInternalField(0, next->value());
+        wasm_module->SetInternalField(0, next->value());
         DCHECK_NULL(prev);
         WasmCompiledModule::cast(next->value())->reset_weak_prev_instance();
       }
@@ -847,7 +577,7 @@
       }
     }
     TRACE("chain after {\n");
-    TRACE_CHAIN(WasmCompiledModule::cast(module_obj->GetInternalField(0)));
+    TRACE_CHAIN(WasmCompiledModule::cast(wasm_module->GetInternalField(0)));
     TRACE("}\n");
   }
   compiled_module->reset_weak_owning_instance();
@@ -855,39 +585,21 @@
   TRACE("}\n");
 }
 
-Handle<FixedArray> SetupIndirectFunctionTable(
-    Isolate* isolate, Handle<FixedArray> wasm_functions,
-    Handle<FixedArray> indirect_table_template,
-    Handle<FixedArray> tables_to_replace) {
-  Factory* factory = isolate->factory();
-  Handle<FixedArray> cloned_indirect_tables =
-      factory->CopyFixedArray(indirect_table_template);
-  for (int i = 0; i < cloned_indirect_tables->length(); ++i) {
-    Handle<FixedArray> orig_metadata =
-        cloned_indirect_tables->GetValueChecked<FixedArray>(isolate, i);
-    Handle<FixedArray> cloned_metadata = factory->CopyFixedArray(orig_metadata);
-    cloned_indirect_tables->set(i, *cloned_metadata);
-
-    Handle<FixedArray> orig_table =
-        cloned_metadata->GetValueChecked<FixedArray>(isolate, kTable);
-    Handle<FixedArray> cloned_table = factory->CopyFixedArray(orig_table);
-    cloned_metadata->set(kTable, *cloned_table);
-    // Patch the cloned code to refer to the cloned kTable.
-    Handle<FixedArray> table_to_replace =
-        tables_to_replace->GetValueChecked<FixedArray>(isolate, i)
-            ->GetValueChecked<FixedArray>(isolate, kTable);
-    for (int fct_index = 0; fct_index < wasm_functions->length(); ++fct_index) {
-      Handle<Code> wasm_function =
-          wasm_functions->GetValueChecked<Code>(isolate, fct_index);
-      PatchFunctionTable(wasm_function, table_to_replace, cloned_table);
-    }
+std::pair<int, int> GetFunctionOffsetAndLength(
+    Handle<WasmCompiledModule> compiled_module, int func_index) {
+  WasmModule* module = compiled_module->module();
+  if (func_index < 0 ||
+      static_cast<size_t>(func_index) > module->functions.size()) {
+    return {0, 0};
   }
-  return cloned_indirect_tables;
+  WasmFunction& func = module->functions[func_index];
+  return {static_cast<int>(func.code_start_offset),
+          static_cast<int>(func.code_end_offset - func.code_start_offset)};
 }
 
 }  // namespace
 
-const char* SectionName(WasmSectionCode code) {
+const char* wasm::SectionName(WasmSectionCode code) {
   switch (code) {
     case kUnknownSectionCode:
       return "Unknown";
@@ -920,7 +632,7 @@
   }
 }
 
-std::ostream& operator<<(std::ostream& os, const WasmModule& module) {
+std::ostream& wasm::operator<<(std::ostream& os, const WasmModule& module) {
   os << "WASM module with ";
   os << (module.min_mem_pages * module.kPageSize) << " min mem";
   os << (module.max_mem_pages * module.kPageSize) << " max mem";
@@ -930,7 +642,7 @@
   return os;
 }
 
-std::ostream& operator<<(std::ostream& os, const WasmFunction& function) {
+std::ostream& wasm::operator<<(std::ostream& os, const WasmFunction& function) {
   os << "WASM function with signature " << *function.sig;
 
   os << " code bytes: "
@@ -938,7 +650,7 @@
   return os;
 }
 
-std::ostream& operator<<(std::ostream& os, const WasmFunctionName& pair) {
+std::ostream& wasm::operator<<(std::ostream& os, const WasmFunctionName& pair) {
   os << "#" << pair.function_->func_index << ":";
   if (pair.function_->name_offset > 0) {
     if (pair.module_) {
@@ -954,29 +666,7 @@
   return os;
 }
 
-Handle<JSFunction> WrapExportCodeAsJSFunction(
-    Isolate* isolate, Handle<Code> export_code, Handle<String> name, int arity,
-    MaybeHandle<ByteArray> maybe_signature, Handle<JSObject> module_instance) {
-  Handle<SharedFunctionInfo> shared =
-      isolate->factory()->NewSharedFunctionInfo(name, export_code, false);
-  shared->set_length(arity);
-  shared->set_internal_formal_parameter_count(arity);
-  Handle<JSFunction> function = isolate->factory()->NewFunction(
-      isolate->wasm_function_map(), name, export_code);
-  function->set_shared(*shared);
-
-  function->SetInternalField(kInternalModuleInstance, *module_instance);
-  // add another Internal Field as the function arity
-  function->SetInternalField(kInternalArity, Smi::FromInt(arity));
-  // add another Internal Field as the signature of the foreign function
-  Handle<ByteArray> signature;
-  if (maybe_signature.ToHandle(&signature)) {
-    function->SetInternalField(kInternalSignature, *signature);
-  }
-  return function;
-}
-
-Object* GetOwningWasmInstance(Code* code) {
+Object* wasm::GetOwningWasmInstance(Code* code) {
   DCHECK(code->kind() == Code::WASM_FUNCTION);
   DisallowHeapAllocation no_gc;
   FixedArray* deopt_data = code->deoptimization_data();
@@ -988,52 +678,65 @@
   return cell->value();
 }
 
-uint32_t GetNumImportedFunctions(Handle<JSObject> wasm_object) {
-  return static_cast<uint32_t>(
-      Smi::cast(wasm_object->GetInternalField(kWasmNumImportedFunctions))
-          ->value());
+int wasm::GetFunctionCodeOffset(Handle<WasmCompiledModule> compiled_module,
+                                int func_index) {
+  return GetFunctionOffsetAndLength(compiled_module, func_index).first;
 }
 
-WasmModule::WasmModule(byte* module_start)
-    : module_start(module_start),
-      module_end(nullptr),
-      min_mem_pages(0),
-      max_mem_pages(0),
-      mem_export(false),
-      start_function_index(-1),
-      origin(kWasmOrigin),
-      globals_size(0),
-      num_imported_functions(0),
-      num_declared_functions(0),
-      num_exported_functions(0),
+bool wasm::GetPositionInfo(Handle<WasmCompiledModule> compiled_module,
+                           uint32_t position, Script::PositionInfo* info) {
+  std::vector<WasmFunction>& functions = compiled_module->module()->functions;
+
+  // Binary search for a function containing the given position.
+  int left = 0;                                    // inclusive
+  int right = static_cast<int>(functions.size());  // exclusive
+  if (right == 0) return false;
+  while (right - left > 1) {
+    int mid = left + (right - left) / 2;
+    if (functions[mid].code_start_offset <= position) {
+      left = mid;
+    } else {
+      right = mid;
+    }
+  }
+  // If the found entry does not contains the given position, return false.
+  WasmFunction& func = functions[left];
+  if (position < func.code_start_offset || position >= func.code_end_offset) {
+    return false;
+  }
+
+  info->line = left;
+  info->column = position - func.code_start_offset;
+  info->line_start = func.code_start_offset;
+  info->line_end = func.code_end_offset;
+  return true;
+}
+
+WasmModule::WasmModule(Zone* owned, const byte* module_start)
+    : owned_zone(owned),
+      module_start(module_start),
       pending_tasks(new base::Semaphore(0)) {}
 
 MaybeHandle<WasmCompiledModule> WasmModule::CompileFunctions(
-    Isolate* isolate, ErrorThrower* thrower) const {
+    Isolate* isolate, Handle<WasmModuleWrapper> module_wrapper,
+    ErrorThrower* thrower) const {
   Factory* factory = isolate->factory();
 
   MaybeHandle<WasmCompiledModule> nothing;
 
-  WasmModuleInstance temp_instance(this);
+  WasmInstance temp_instance(this);
   temp_instance.context = isolate->native_context();
-  temp_instance.mem_size = GetMinModuleMemSize(this);
+  temp_instance.mem_size = WasmModule::kPageSize * this->min_mem_pages;
   temp_instance.mem_start = nullptr;
   temp_instance.globals_start = nullptr;
 
-  MaybeHandle<FixedArray> indirect_table =
-      function_tables.size()
-          ? factory->NewFixedArray(static_cast<int>(function_tables.size()),
-                                   TENURED)
-          : MaybeHandle<FixedArray>();
-  for (uint32_t i = 0; i < function_tables.size(); ++i) {
-    Handle<FixedArray> values = wasm::BuildFunctionTable(isolate, i, this);
-    temp_instance.function_tables[i] = values;
-
-    Handle<FixedArray> metadata = isolate->factory()->NewFixedArray(
-        kWasmIndirectFunctionTableDataSize, TENURED);
-    metadata->set(kSize, Smi::FromInt(function_tables[i].size));
-    metadata->set(kTable, *values);
-    indirect_table.ToHandleChecked()->set(i, *metadata);
+  // Initialize the indirect tables with placeholders.
+  int function_table_count = static_cast<int>(this->function_tables.size());
+  Handle<FixedArray> function_tables =
+      factory->NewFixedArray(function_table_count);
+  for (int i = 0; i < function_table_count; ++i) {
+    temp_instance.function_tables[i] = factory->NewFixedArray(0);
+    function_tables->set(i, *temp_instance.function_tables[i]);
   }
 
   HistogramTimerScope wasm_compile_module_time_scope(
@@ -1052,7 +755,7 @@
       factory->NewFixedArray(static_cast<int>(code_table_size), TENURED);
 
   // Initialize the code table with placeholders.
-  for (uint32_t i = 0; i < functions.size(); i++) {
+  for (uint32_t i = 0; i < functions.size(); ++i) {
     Code::Kind kind = Code::WASM_FUNCTION;
     if (i < num_imported_functions) kind = Code::WASM_TO_JS_FUNCTION;
     Handle<Code> placeholder = CreatePlaceholder(factory, i, kind);
@@ -1066,12 +769,12 @@
     // Avoid a race condition by collecting results into a second vector.
     std::vector<Handle<Code>> results;
     results.reserve(temp_instance.function_code.size());
-    for (size_t i = 0; i < temp_instance.function_code.size(); i++) {
+    for (size_t i = 0; i < temp_instance.function_code.size(); ++i) {
       results.push_back(temp_instance.function_code[i]);
     }
     CompileInParallel(isolate, this, results, thrower, &module_env);
 
-    for (size_t i = 0; i < results.size(); i++) {
+    for (size_t i = 0; i < results.size(); ++i) {
       temp_instance.function_code[i] = results[i];
     }
   } else {
@@ -1103,68 +806,32 @@
   // and information needed at instantiation time. This object needs to be
   // serializable. Instantiation may occur off a deserialized version of this
   // object.
-  Handle<WasmCompiledModule> ret = WasmCompiledModule::New(
-      isolate, min_mem_pages, globals_size, mem_export, origin);
+  Handle<WasmCompiledModule> ret =
+      WasmCompiledModule::New(isolate, module_wrapper);
   ret->set_code_table(code_table);
-  if (!indirect_table.is_null()) {
-    ret->set_indirect_function_tables(indirect_table.ToHandleChecked());
-  }
-  Handle<FixedArray> import_data = GetImportsData(factory, this);
-  ret->set_import_data(import_data);
-
-  // Compile exported function wrappers.
-  int export_size = static_cast<int>(num_exported_functions);
-  if (export_size > 0) {
-    Handle<FixedArray> exports = factory->NewFixedArray(export_size, TENURED);
-    int index = -1;
-
-    for (const WasmExport& exp : export_table) {
-      if (exp.kind != kExternalFunction)
-        continue;  // skip non-function exports.
-      index++;
-      Handle<FixedArray> export_data =
-          factory->NewFixedArray(kWasmExportDataSize, TENURED);
-      FunctionSig* funcSig = functions[exp.index].sig;
-      Handle<ByteArray> exportedSig =
-          factory->NewByteArray(static_cast<int>(funcSig->parameter_count() +
-                                                 funcSig->return_count()),
-                                TENURED);
-      exportedSig->copy_in(0,
-                           reinterpret_cast<const byte*>(funcSig->raw_data()),
-                           exportedSig->length());
-      export_data->set(kExportedSignature, *exportedSig);
-      WasmName str = GetName(exp.name_offset, exp.name_length);
-      Handle<String> name = factory->InternalizeUtf8String(str);
-      Handle<Code> code = code_table->GetValueChecked<Code>(isolate, exp.index);
-      Handle<Code> export_code = compiler::CompileJSToWasmWrapper(
-          isolate, &module_env, code, exp.index);
-      if (thrower->error()) return nothing;
-      export_data->set(kExportName, *name);
-      export_data->set(kExportArity,
-                       Smi::FromInt(static_cast<int>(
-                           functions[exp.index].sig->parameter_count())));
-      export_data->set(kExportedFunctionIndex,
-                       Smi::FromInt(static_cast<int>(exp.index)));
-      exports->set(index, *export_data);
-      code_table->set(static_cast<int>(functions.size() + index), *export_code);
-    }
-    ret->set_exports(exports);
+  ret->set_min_mem_pages(min_mem_pages);
+  ret->set_max_mem_pages(max_mem_pages);
+  if (function_table_count > 0) {
+    ret->set_function_tables(function_tables);
+    ret->set_empty_function_tables(function_tables);
   }
 
-  // Record data for startup function.
-  if (start_function_index >= 0) {
-    HandleScope scope(isolate);
-    Handle<FixedArray> startup_data =
-        factory->NewFixedArray(kWasmExportDataSize, TENURED);
-    startup_data->set(kExportArity, Smi::FromInt(0));
-    startup_data->set(kExportedFunctionIndex,
-                      Smi::FromInt(start_function_index));
-    ret->set_startup_function(startup_data);
+  // Compile JS->WASM wrappers for exported functions.
+  int func_index = 0;
+  for (auto exp : export_table) {
+    if (exp.kind != kExternalFunction) continue;
+    Handle<Code> wasm_code =
+        code_table->GetValueChecked<Code>(isolate, exp.index);
+    Handle<Code> wrapper_code = compiler::CompileJSToWasmWrapper(
+        isolate, &module_env, wasm_code, exp.index);
+    int export_index = static_cast<int>(functions.size() + func_index);
+    code_table->set(export_index, *wrapper_code);
+    func_index++;
   }
 
-  // TODO(wasm): saving the module bytes for debugging is wasteful. We should
-  // consider downloading this on-demand.
   {
+    // TODO(wasm): only save the sections necessary to deserialize a
+    // {WasmModule}. E.g. function bodies could be omitted.
     size_t module_bytes_len = module_end - module_start;
     DCHECK_LE(module_bytes_len, static_cast<size_t>(kMaxInt));
     Vector<const uint8_t> module_bytes_vec(module_start,
@@ -1172,644 +839,1185 @@
     Handle<String> module_bytes_string =
         factory->NewStringFromOneByte(module_bytes_vec, TENURED)
             .ToHandleChecked();
-    ret->set_module_bytes(module_bytes_string);
+    DCHECK(module_bytes_string->IsSeqOneByteString());
+    ret->set_module_bytes(Handle<SeqOneByteString>::cast(module_bytes_string));
   }
 
-  Handle<ByteArray> function_name_table =
-      BuildFunctionNamesTable(isolate, module_env.module);
-  ret->set_function_names(function_name_table);
-  if (data_segments.size() > 0) SaveDataSegmentInfo(factory, this, ret);
-  DCHECK_EQ(ret->default_mem_size(), temp_instance.mem_size);
   return ret;
 }
 
+static WasmFunction* GetWasmFunctionForImportWrapper(Isolate* isolate,
+                                                     Handle<Object> target) {
+  if (target->IsJSFunction()) {
+    Handle<JSFunction> func = Handle<JSFunction>::cast(target);
+    if (func->code()->kind() == Code::JS_TO_WASM_FUNCTION) {
+      auto exported = Handle<WasmExportedFunction>::cast(func);
+      Handle<WasmInstanceObject> other_instance(exported->instance(), isolate);
+      int func_index = exported->function_index();
+      return &other_instance->module()->functions[func_index];
+    }
+  }
+  return nullptr;
+}
+
+static Handle<Code> UnwrapImportWrapper(Handle<Object> target) {
+  Handle<JSFunction> func = Handle<JSFunction>::cast(target);
+  Handle<Code> export_wrapper_code = handle(func->code());
+  int found = 0;
+  int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+  Handle<Code> code;
+  for (RelocIterator it(*export_wrapper_code, mask); !it.done(); it.next()) {
+    RelocInfo* rinfo = it.rinfo();
+    Address target_address = rinfo->target_address();
+    Code* target = Code::GetCodeFromTargetAddress(target_address);
+    if (target->kind() == Code::WASM_FUNCTION ||
+        target->kind() == Code::WASM_TO_JS_FUNCTION) {
+      ++found;
+      code = handle(target);
+    }
+  }
+  DCHECK(found == 1);
+  return code;
+}
+
+static Handle<Code> CompileImportWrapper(Isolate* isolate, int index,
+                                         FunctionSig* sig,
+                                         Handle<JSReceiver> target,
+                                         Handle<String> module_name,
+                                         MaybeHandle<String> import_name) {
+  Handle<Code> code;
+  WasmFunction* other_func = GetWasmFunctionForImportWrapper(isolate, target);
+  if (other_func) {
+    if (sig->Equals(other_func->sig)) {
+      // Signature matched. Unwrap the JS->WASM wrapper and return the raw
+      // WASM function code.
+      return UnwrapImportWrapper(target);
+    } else {
+      return Handle<Code>::null();
+    }
+  } else {
+    // Signature mismatch. Compile a new wrapper for the new signature.
+    return compiler::CompileWasmToJSWrapper(isolate, target, sig, index,
+                                            module_name, import_name);
+  }
+}
+
+static void UpdateDispatchTablesInternal(Isolate* isolate,
+                                         Handle<FixedArray> dispatch_tables,
+                                         int index, WasmFunction* function,
+                                         Handle<Code> code) {
+  DCHECK_EQ(0, dispatch_tables->length() % 3);
+  for (int i = 0; i < dispatch_tables->length(); i += 3) {
+    int table_index = Smi::cast(dispatch_tables->get(i + 1))->value();
+    Handle<FixedArray> dispatch_table(
+        FixedArray::cast(dispatch_tables->get(i + 2)), isolate);
+    if (function) {
+      // TODO(titzer): the signature might need to be copied to avoid
+      // a dangling pointer in the signature map.
+      Handle<WasmInstanceObject> instance(
+          WasmInstanceObject::cast(dispatch_tables->get(i)), isolate);
+      int sig_index = static_cast<int>(
+          instance->module()->function_tables[table_index].map.FindOrInsert(
+              function->sig));
+      dispatch_table->set(index, Smi::FromInt(sig_index));
+      dispatch_table->set(index + (dispatch_table->length() / 2), *code);
+    } else {
+      Code* code = nullptr;
+      dispatch_table->set(index, Smi::FromInt(-1));
+      dispatch_table->set(index + (dispatch_table->length() / 2), code);
+    }
+  }
+}
+
+void wasm::UpdateDispatchTables(Isolate* isolate,
+                                Handle<FixedArray> dispatch_tables, int index,
+                                Handle<JSFunction> function) {
+  if (function.is_null()) {
+    UpdateDispatchTablesInternal(isolate, dispatch_tables, index, nullptr,
+                                 Handle<Code>::null());
+  } else {
+    UpdateDispatchTablesInternal(
+        isolate, dispatch_tables, index,
+        GetWasmFunctionForImportWrapper(isolate, function),
+        UnwrapImportWrapper(function));
+  }
+}
+
+// A helper class to simplify instantiating a module from a compiled module.
+// It closes over the {Isolate}, the {ErrorThrower}, the {WasmCompiledModule},
+// etc.
+class WasmInstanceBuilder {
+ public:
+  WasmInstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
+                      Handle<JSObject> module_object, Handle<JSReceiver> ffi,
+                      Handle<JSArrayBuffer> memory)
+      : isolate_(isolate),
+        thrower_(thrower),
+        module_object_(module_object),
+        ffi_(ffi),
+        memory_(memory) {}
+
+  // Build an instance, in all of its glory.
+  MaybeHandle<JSObject> Build() {
+    MaybeHandle<JSObject> nothing;
+    HistogramTimerScope wasm_instantiate_module_time_scope(
+        isolate_->counters()->wasm_instantiate_module_time());
+    Factory* factory = isolate_->factory();
+
+    //--------------------------------------------------------------------------
+    // Reuse the compiled module (if no owner), otherwise clone.
+    //--------------------------------------------------------------------------
+    Handle<FixedArray> code_table;
+    Handle<FixedArray> old_code_table;
+    MaybeHandle<WasmInstanceObject> owner;
+
+    TRACE("Starting new module instantiation\n");
+    {
+      // Root the owner, if any, before doing any allocations, which
+      // may trigger GC.
+      // Both owner and original template need to be in sync. Even
+      // after we lose the original template handle, the code
+      // objects we copied from it have data relative to the
+      // instance - such as globals addresses.
+      Handle<WasmCompiledModule> original;
+      {
+        DisallowHeapAllocation no_gc;
+        original = handle(
+            WasmCompiledModule::cast(module_object_->GetInternalField(0)));
+        if (original->has_weak_owning_instance()) {
+          owner = handle(WasmInstanceObject::cast(
+              original->weak_owning_instance()->value()));
+        }
+      }
+      DCHECK(!original.is_null());
+      // Always make a new copy of the code_table, since the old_code_table
+      // may still have placeholders for imports.
+      old_code_table = original->code_table();
+      code_table = factory->CopyFixedArray(old_code_table);
+
+      if (original->has_weak_owning_instance()) {
+        // Clone, but don't insert yet the clone in the instances chain.
+        // We do that last. Since we are holding on to the owner instance,
+        // the owner + original state used for cloning and patching
+        // won't be mutated by possible finalizer runs.
+        DCHECK(!owner.is_null());
+        TRACE("Cloning from %d\n", original->instance_id());
+        compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
+        // Avoid creating too many handles in the outer scope.
+        HandleScope scope(isolate_);
+
+        // Clone the code for WASM functions and exports.
+        for (int i = 0; i < code_table->length(); ++i) {
+          Handle<Code> orig_code =
+              code_table->GetValueChecked<Code>(isolate_, i);
+          switch (orig_code->kind()) {
+            case Code::WASM_TO_JS_FUNCTION:
+              // Imports will be overwritten with newly compiled wrappers.
+              break;
+            case Code::JS_TO_WASM_FUNCTION:
+            case Code::WASM_FUNCTION: {
+              Handle<Code> code = factory->CopyCode(orig_code);
+              code_table->set(i, *code);
+              break;
+            }
+            default:
+              UNREACHABLE();
+          }
+        }
+        RecordStats(isolate_, code_table);
+      } else {
+        // There was no owner, so we can reuse the original.
+        compiled_module_ = original;
+        TRACE("Reusing existing instance %d\n",
+              compiled_module_->instance_id());
+      }
+      compiled_module_->set_code_table(code_table);
+    }
+    module_ = reinterpret_cast<WasmModuleWrapper*>(
+                  *compiled_module_->module_wrapper())
+                  ->get();
+
+    //--------------------------------------------------------------------------
+    // Allocate the instance object.
+    //--------------------------------------------------------------------------
+    Handle<WasmInstanceObject> instance =
+        WasmInstanceObject::New(isolate_, compiled_module_);
+
+    //--------------------------------------------------------------------------
+    // Set up the globals for the new instance.
+    //--------------------------------------------------------------------------
+    MaybeHandle<JSArrayBuffer> old_globals;
+    uint32_t globals_size = module_->globals_size;
+    if (globals_size > 0) {
+      Handle<JSArrayBuffer> global_buffer =
+          NewArrayBuffer(isolate_, globals_size);
+      globals_ = global_buffer;
+      if (globals_.is_null()) {
+        thrower_->RangeError("Out of memory: wasm globals");
+        return nothing;
+      }
+      Address old_address =
+          owner.is_null() ? nullptr : GetGlobalStartAddressFromCodeTemplate(
+                                          isolate_->heap()->undefined_value(),
+                                          *owner.ToHandleChecked());
+      RelocateGlobals(code_table, old_address,
+                      static_cast<Address>(global_buffer->backing_store()));
+      instance->set_globals_buffer(*global_buffer);
+    }
+
+    //--------------------------------------------------------------------------
+    // Prepare for initialization of function tables.
+    //--------------------------------------------------------------------------
+    int function_table_count =
+        static_cast<int>(module_->function_tables.size());
+    table_instances_.reserve(module_->function_tables.size());
+    for (int index = 0; index < function_table_count; ++index) {
+      table_instances_.push_back({Handle<WasmTableObject>::null(),
+                                  Handle<FixedArray>::null(),
+                                  Handle<FixedArray>::null()});
+    }
+
+    //--------------------------------------------------------------------------
+    // Process the imports for the module.
+    //--------------------------------------------------------------------------
+    int num_imported_functions = ProcessImports(code_table, instance);
+    if (num_imported_functions < 0) return nothing;
+
+    //--------------------------------------------------------------------------
+    // Process the initialization for the module's globals.
+    //--------------------------------------------------------------------------
+    InitGlobals();
+
+    //--------------------------------------------------------------------------
+    // Set up the memory for the new instance.
+    //--------------------------------------------------------------------------
+    MaybeHandle<JSArrayBuffer> old_memory;
+
+    uint32_t min_mem_pages = module_->min_mem_pages;
+    isolate_->counters()->wasm_min_mem_pages_count()->AddSample(min_mem_pages);
+
+    if (!memory_.is_null()) {
+      // Set externally passed ArrayBuffer non neuterable.
+      memory_->set_is_neuterable(false);
+    } else if (min_mem_pages > 0) {
+      memory_ = AllocateMemory(min_mem_pages);
+      if (memory_.is_null()) return nothing;  // failed to allocate memory
+    }
+
+    if (!memory_.is_null()) {
+      instance->set_memory_buffer(*memory_);
+      Address mem_start = static_cast<Address>(memory_->backing_store());
+      uint32_t mem_size =
+          static_cast<uint32_t>(memory_->byte_length()->Number());
+      LoadDataSegments(mem_start, mem_size);
+
+      uint32_t old_mem_size = compiled_module_->mem_size();
+      Address old_mem_start =
+          compiled_module_->has_memory()
+              ? static_cast<Address>(
+                    compiled_module_->memory()->backing_store())
+              : nullptr;
+      RelocateMemoryReferencesInCode(code_table, old_mem_start, mem_start,
+                                     old_mem_size, mem_size);
+      compiled_module_->set_memory(memory_);
+    } else {
+      LoadDataSegments(nullptr, 0);
+    }
+
+    //--------------------------------------------------------------------------
+    // Set up the runtime support for the new instance.
+    //--------------------------------------------------------------------------
+    Handle<WeakCell> weak_link = factory->NewWeakCell(instance);
+
+    for (int i = num_imported_functions + FLAG_skip_compiling_wasm_funcs;
+         i < code_table->length(); ++i) {
+      Handle<Code> code = code_table->GetValueChecked<Code>(isolate_, i);
+      if (code->kind() == Code::WASM_FUNCTION) {
+        Handle<FixedArray> deopt_data = factory->NewFixedArray(2, TENURED);
+        deopt_data->set(0, *weak_link);
+        deopt_data->set(1, Smi::FromInt(static_cast<int>(i)));
+        deopt_data->set_length(2);
+        code->set_deoptimization_data(*deopt_data);
+      }
+    }
+
+    //--------------------------------------------------------------------------
+    // Set up the exports object for the new instance.
+    //--------------------------------------------------------------------------
+    ProcessExports(code_table, instance);
+
+    //--------------------------------------------------------------------------
+    // Set up the indirect function tables for the new instance.
+    //--------------------------------------------------------------------------
+    if (function_table_count > 0) InitializeTables(code_table, instance);
+
+    if (num_imported_functions > 0 || !owner.is_null()) {
+      // If the code was cloned, or new imports were compiled, patch.
+      PatchDirectCalls(old_code_table, code_table, num_imported_functions);
+    }
+
+    FlushICache(isolate_, code_table);
+
+    //--------------------------------------------------------------------------
+    // Set up and link the new instance.
+    //--------------------------------------------------------------------------
+    {
+      Handle<Object> global_handle =
+          isolate_->global_handles()->Create(*instance);
+      Handle<WeakCell> link_to_clone = factory->NewWeakCell(compiled_module_);
+      Handle<WeakCell> link_to_owning_instance = factory->NewWeakCell(instance);
+      MaybeHandle<WeakCell> link_to_original;
+      MaybeHandle<WasmCompiledModule> original;
+      if (!owner.is_null()) {
+        // prepare the data needed for publishing in a chain, but don't link
+        // just yet, because
+        // we want all the publishing to happen free from GC interruptions, and
+        // so we do it in
+        // one GC-free scope afterwards.
+        original = handle(owner.ToHandleChecked()->get_compiled_module());
+        link_to_original = factory->NewWeakCell(original.ToHandleChecked());
+      }
+      // Publish the new instance to the instances chain.
+      {
+        DisallowHeapAllocation no_gc;
+        if (!link_to_original.is_null()) {
+          compiled_module_->set_weak_next_instance(
+              link_to_original.ToHandleChecked());
+          original.ToHandleChecked()->set_weak_prev_instance(link_to_clone);
+          compiled_module_->set_weak_wasm_module(
+              original.ToHandleChecked()->weak_wasm_module());
+        }
+        module_object_->SetInternalField(0, *compiled_module_);
+        compiled_module_->set_weak_owning_instance(link_to_owning_instance);
+        GlobalHandles::MakeWeak(global_handle.location(),
+                                global_handle.location(), &InstanceFinalizer,
+                                v8::WeakCallbackType::kFinalizer);
+      }
+    }
+
+    DCHECK(wasm::IsWasmInstance(*instance));
+    if (instance->has_memory_object()) {
+      instance->get_memory_object()->AddInstance(*instance);
+    }
+
+    //--------------------------------------------------------------------------
+    // Run the start function if one was specified.
+    //--------------------------------------------------------------------------
+    if (module_->start_function_index >= 0) {
+      HandleScope scope(isolate_);
+      ModuleEnv module_env;
+      module_env.module = module_;
+      module_env.instance = nullptr;
+      module_env.origin = module_->origin;
+      int start_index = module_->start_function_index;
+      Handle<Code> startup_code =
+          code_table->GetValueChecked<Code>(isolate_, start_index);
+      FunctionSig* sig = module_->functions[start_index].sig;
+      Handle<Code> wrapper_code = compiler::CompileJSToWasmWrapper(
+          isolate_, &module_env, startup_code, start_index);
+      Handle<WasmExportedFunction> startup_fct = WasmExportedFunction::New(
+          isolate_, instance, factory->InternalizeUtf8String("start"),
+          wrapper_code, static_cast<int>(sig->parameter_count()), start_index);
+      RecordStats(isolate_, *startup_code);
+      // Call the JS function.
+      Handle<Object> undefined = factory->undefined_value();
+      MaybeHandle<Object> retval =
+          Execution::Call(isolate_, startup_fct, undefined, 0, nullptr);
+
+      if (retval.is_null()) {
+        DCHECK(isolate_->has_pending_exception());
+        isolate_->OptionalRescheduleException(false);
+        // It's unfortunate that the new instance is already linked in the
+        // chain. However, we need to set up everything before executing the
+        // start function, such that stack trace information can be generated
+        // correctly already in the start function.
+        return nothing;
+      }
+    }
+
+    DCHECK(!isolate_->has_pending_exception());
+    TRACE("Finishing instance %d\n", compiled_module_->instance_id());
+    TRACE_CHAIN(WasmCompiledModule::cast(module_object_->GetInternalField(0)));
+    return instance;
+  }
+
+ private:
+  // Represents the initialized state of a table.
+  struct TableInstance {
+    Handle<WasmTableObject> table_object;    // WebAssembly.Table instance
+    Handle<FixedArray> js_wrappers;          // JSFunctions exported
+    Handle<FixedArray> dispatch_table;       // internal (code, sig) pairs
+  };
+
+  Isolate* isolate_;
+  WasmModule* module_;
+  ErrorThrower* thrower_;
+  Handle<JSObject> module_object_;
+  Handle<JSReceiver> ffi_;
+  Handle<JSArrayBuffer> memory_;
+  Handle<JSArrayBuffer> globals_;
+  Handle<WasmCompiledModule> compiled_module_;
+  std::vector<TableInstance> table_instances_;
+  std::vector<Handle<JSFunction>> js_wrappers_;
+
+  // Helper routine to print out errors with imports (FFI).
+  MaybeHandle<JSFunction> ReportFFIError(const char* error, uint32_t index,
+                                         Handle<String> module_name,
+                                         MaybeHandle<String> function_name) {
+    Handle<String> function_name_handle;
+    if (function_name.ToHandle(&function_name_handle)) {
+      thrower_->TypeError(
+          "Import #%d module=\"%.*s\" function=\"%.*s\" error: %s", index,
+          module_name->length(), module_name->ToCString().get(),
+          function_name_handle->length(),
+          function_name_handle->ToCString().get(), error);
+    } else {
+      thrower_->TypeError("Import #%d module=\"%.*s\" error: %s", index,
+                          module_name->length(), module_name->ToCString().get(),
+                          error);
+    }
+    thrower_->TypeError("Import ");
+    return MaybeHandle<JSFunction>();
+  }
+
+  // Look up an import value in the {ffi_} object.
+  MaybeHandle<Object> LookupImport(uint32_t index, Handle<String> module_name,
+                                   MaybeHandle<String> import_name) {
+    if (ffi_.is_null()) {
+      return ReportFFIError("FFI is not an object", index, module_name,
+                            import_name);
+    }
+
+    // Look up the module first.
+    MaybeHandle<Object> result = Object::GetProperty(ffi_, module_name);
+    if (result.is_null()) {
+      return ReportFFIError("module not found", index, module_name,
+                            import_name);
+    }
+
+    Handle<Object> module = result.ToHandleChecked();
+
+    if (!import_name.is_null()) {
+      // Look up the value in the module.
+      if (!module->IsJSReceiver()) {
+        return ReportFFIError("module is not an object or function", index,
+                              module_name, import_name);
+      }
+
+      result = Object::GetProperty(module, import_name.ToHandleChecked());
+      if (result.is_null()) {
+        return ReportFFIError("import not found", index, module_name,
+                              import_name);
+      }
+    } else {
+      // No function specified. Use the "default export".
+      result = module;
+    }
+
+    return result;
+  }
+
+  uint32_t EvalUint32InitExpr(const WasmInitExpr& expr) {
+    switch (expr.kind) {
+      case WasmInitExpr::kI32Const:
+        return expr.val.i32_const;
+      case WasmInitExpr::kGlobalIndex: {
+        uint32_t offset = module_->globals[expr.val.global_index].offset;
+        return *reinterpret_cast<uint32_t*>(raw_buffer_ptr(globals_, offset));
+      }
+      default:
+        UNREACHABLE();
+        return 0;
+    }
+  }
+
+  // Load data segments into the memory.
+  void LoadDataSegments(Address mem_addr, size_t mem_size) {
+    Handle<SeqOneByteString> module_bytes = compiled_module_->module_bytes();
+    for (const WasmDataSegment& segment : module_->data_segments) {
+      uint32_t source_size = segment.source_size;
+      // Segments of size == 0 are just nops.
+      if (source_size == 0) continue;
+      uint32_t dest_offset = EvalUint32InitExpr(segment.dest_addr);
+      if (dest_offset >= mem_size || source_size >= mem_size ||
+          dest_offset > (mem_size - source_size)) {
+        thrower_->TypeError("data segment (start = %" PRIu32 ", size = %" PRIu32
+                            ") does not fit into memory (size = %" PRIuS ")",
+                            dest_offset, source_size, mem_size);
+        return;
+      }
+      byte* dest = mem_addr + dest_offset;
+      const byte* src = reinterpret_cast<const byte*>(
+          module_bytes->GetCharsAddress() + segment.source_offset);
+      memcpy(dest, src, source_size);
+    }
+  }
+
+  void WriteGlobalValue(WasmGlobal& global, Handle<Object> value) {
+    double num = 0;
+    if (value->IsSmi()) {
+      num = Smi::cast(*value)->value();
+    } else if (value->IsHeapNumber()) {
+      num = HeapNumber::cast(*value)->value();
+    } else {
+      UNREACHABLE();
+    }
+    TRACE("init [globals+%u] = %lf, type = %s\n", global.offset, num,
+          WasmOpcodes::TypeName(global.type));
+    switch (global.type) {
+      case kAstI32:
+        *GetRawGlobalPtr<int32_t>(global) = static_cast<int32_t>(num);
+        break;
+      case kAstI64:
+        // TODO(titzer): initialization of imported i64 globals.
+        UNREACHABLE();
+        break;
+      case kAstF32:
+        *GetRawGlobalPtr<float>(global) = static_cast<float>(num);
+        break;
+      case kAstF64:
+        *GetRawGlobalPtr<double>(global) = static_cast<double>(num);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+
+  // Process the imports, including functions, tables, globals, and memory, in
+  // order, loading them from the {ffi_} object. Returns the number of imported
+  // functions.
+  int ProcessImports(Handle<FixedArray> code_table,
+                     Handle<WasmInstanceObject> instance) {
+    int num_imported_functions = 0;
+    int num_imported_tables = 0;
+    for (int index = 0; index < static_cast<int>(module_->import_table.size());
+         ++index) {
+      WasmImport& import = module_->import_table[index];
+      Handle<String> module_name =
+          ExtractStringFromModuleBytes(isolate_, compiled_module_,
+                                       import.module_name_offset,
+                                       import.module_name_length)
+              .ToHandleChecked();
+      Handle<String> function_name = Handle<String>::null();
+      if (import.field_name_length > 0) {
+        function_name = ExtractStringFromModuleBytes(isolate_, compiled_module_,
+                                                     import.field_name_offset,
+                                                     import.field_name_length)
+                            .ToHandleChecked();
+      }
+
+      MaybeHandle<Object> result =
+          LookupImport(index, module_name, function_name);
+      if (thrower_->error()) return -1;
+
+      switch (import.kind) {
+        case kExternalFunction: {
+          // Function imports must be callable.
+          Handle<Object> function = result.ToHandleChecked();
+          if (!function->IsCallable()) {
+            ReportFFIError("function import requires a callable", index,
+                           module_name, function_name);
+            return -1;
+          }
+
+          Handle<Code> import_wrapper = CompileImportWrapper(
+              isolate_, index, module_->functions[import.index].sig,
+              Handle<JSReceiver>::cast(function), module_name, function_name);
+          if (import_wrapper.is_null()) {
+            ReportFFIError("imported function does not match the expected type",
+                           index, module_name, function_name);
+            return -1;
+          }
+          code_table->set(num_imported_functions, *import_wrapper);
+          RecordStats(isolate_, *import_wrapper);
+          num_imported_functions++;
+          break;
+        }
+        case kExternalTable: {
+          Handle<Object> value = result.ToHandleChecked();
+          if (!WasmJs::IsWasmTableObject(isolate_, value)) {
+            ReportFFIError("table import requires a WebAssembly.Table", index,
+                           module_name, function_name);
+            return -1;
+          }
+          WasmIndirectFunctionTable& table =
+              module_->function_tables[num_imported_tables];
+          TableInstance& table_instance = table_instances_[num_imported_tables];
+          table_instance.table_object = Handle<WasmTableObject>::cast(value);
+          table_instance.js_wrappers = Handle<FixedArray>(
+              table_instance.table_object->get_functions(), isolate_);
+
+          // TODO(titzer): import table size must match exactly for now.
+          int table_size = table_instance.js_wrappers->length();
+          if (table_size != static_cast<int>(table.min_size)) {
+            thrower_->TypeError(
+                "table import %d is wrong size (%d), expected %u", index,
+                table_size, table.min_size);
+            return -1;
+          }
+
+          // Allocate a new dispatch table.
+          table_instance.dispatch_table =
+              isolate_->factory()->NewFixedArray(table_size * 2);
+          for (int i = 0; i < table_size * 2; ++i) {
+            table_instance.dispatch_table->set(i,
+                                               Smi::FromInt(kInvalidSigIndex));
+          }
+          // Initialize the dispatch table with the (foreign) JS functions
+          // that are already in the table.
+          for (int i = 0; i < table_size; ++i) {
+            Handle<Object> val(table_instance.js_wrappers->get(i), isolate_);
+            if (!val->IsJSFunction()) continue;
+            WasmFunction* function =
+                GetWasmFunctionForImportWrapper(isolate_, val);
+            if (function == nullptr) {
+              thrower_->TypeError("table import %d[%d] is not a WASM function",
+                                  index, i);
+              return -1;
+            }
+            int sig_index = table.map.FindOrInsert(function->sig);
+            table_instance.dispatch_table->set(i, Smi::FromInt(sig_index));
+            table_instance.dispatch_table->set(i + table_size,
+                                               *UnwrapImportWrapper(val));
+          }
+
+          num_imported_tables++;
+          break;
+        }
+        case kExternalMemory: {
+          Handle<Object> object = result.ToHandleChecked();
+          if (!WasmJs::IsWasmMemoryObject(isolate_, object)) {
+            ReportFFIError("memory import must be a WebAssembly.Memory object",
+                           index, module_name, function_name);
+            return -1;
+          }
+          auto memory = Handle<WasmMemoryObject>::cast(object);
+          instance->set_memory_object(*memory);
+          memory_ = Handle<JSArrayBuffer>(memory->get_buffer(), isolate_);
+          break;
+        }
+        case kExternalGlobal: {
+          // Global imports are converted to numbers and written into the
+          // {globals_} array buffer.
+          Handle<Object> object = result.ToHandleChecked();
+          MaybeHandle<Object> number = Object::ToNumber(object);
+          if (number.is_null()) {
+            ReportFFIError("global import could not be converted to number",
+                           index, module_name, function_name);
+            return -1;
+          }
+          Handle<Object> val = number.ToHandleChecked();
+          WriteGlobalValue(module_->globals[import.index], val);
+          break;
+        }
+        default:
+          UNREACHABLE();
+          break;
+      }
+    }
+    return num_imported_functions;
+  }
+
+  template <typename T>
+  T* GetRawGlobalPtr(WasmGlobal& global) {
+    return reinterpret_cast<T*>(raw_buffer_ptr(globals_, global.offset));
+  }
+
+  // Process initialization of globals.
+  void InitGlobals() {
+    for (auto global : module_->globals) {
+      switch (global.init.kind) {
+        case WasmInitExpr::kI32Const:
+          *GetRawGlobalPtr<int32_t>(global) = global.init.val.i32_const;
+          break;
+        case WasmInitExpr::kI64Const:
+          *GetRawGlobalPtr<int64_t>(global) = global.init.val.i64_const;
+          break;
+        case WasmInitExpr::kF32Const:
+          *GetRawGlobalPtr<float>(global) = global.init.val.f32_const;
+          break;
+        case WasmInitExpr::kF64Const:
+          *GetRawGlobalPtr<double>(global) = global.init.val.f64_const;
+          break;
+        case WasmInitExpr::kGlobalIndex: {
+          // Initialize with another global.
+          uint32_t new_offset = global.offset;
+          uint32_t old_offset =
+              module_->globals[global.init.val.global_index].offset;
+          TRACE("init [globals+%u] = [globals+%d]\n", global.offset,
+                old_offset);
+          size_t size = (global.type == kAstI64 || global.type == kAstF64)
+                            ? sizeof(double)
+                            : sizeof(int32_t);
+          memcpy(raw_buffer_ptr(globals_, new_offset),
+                 raw_buffer_ptr(globals_, old_offset), size);
+          break;
+        }
+        case WasmInitExpr::kNone:
+          // Happens with imported globals.
+          break;
+        default:
+          UNREACHABLE();
+          break;
+      }
+    }
+  }
+
+  // Allocate memory for a module instance as a new JSArrayBuffer.
+  Handle<JSArrayBuffer> AllocateMemory(uint32_t min_mem_pages) {
+    if (min_mem_pages > WasmModule::kV8MaxPages) {
+      thrower_->RangeError("Out of memory: wasm memory too large");
+      return Handle<JSArrayBuffer>::null();
+    }
+    Handle<JSArrayBuffer> mem_buffer =
+        NewArrayBuffer(isolate_, min_mem_pages * WasmModule::kPageSize);
+
+    if (mem_buffer.is_null()) {
+      thrower_->RangeError("Out of memory: wasm memory");
+    }
+    return mem_buffer;
+  }
+
+  // Process the exports, creating wrappers for functions, tables, memories,
+  // and globals.
+  void ProcessExports(Handle<FixedArray> code_table,
+                      Handle<WasmInstanceObject> instance) {
+    bool needs_wrappers = module_->num_exported_functions > 0;
+    for (auto table_instance : table_instances_) {
+      if (!table_instance.js_wrappers.is_null()) {
+        needs_wrappers = true;
+        break;
+      }
+    }
+    for (auto table : module_->function_tables) {
+      if (table.exported) {
+        needs_wrappers = true;
+        break;
+      }
+    }
+    if (needs_wrappers) {
+      // Fill the table to cache the exported JSFunction wrappers.
+      js_wrappers_.insert(js_wrappers_.begin(), module_->functions.size(),
+                          Handle<JSFunction>::null());
+    }
+
+    Handle<JSObject> exports_object = instance;
+    if (module_->export_table.size() > 0 && module_->origin == kWasmOrigin) {
+      // Create the "exports" object.
+      Handle<JSFunction> object_function = Handle<JSFunction>(
+          isolate_->native_context()->object_function(), isolate_);
+      exports_object =
+          isolate_->factory()->NewJSObject(object_function, TENURED);
+      Handle<String> exports_name =
+          isolate_->factory()->InternalizeUtf8String("exports");
+      JSObject::AddProperty(instance, exports_name, exports_object, READ_ONLY);
+    }
+
+    PropertyDescriptor desc;
+    desc.set_writable(false);
+
+    // Process each export in the export table.
+    int export_index = 0;
+    for (auto exp : module_->export_table) {
+      Handle<String> name =
+          ExtractStringFromModuleBytes(isolate_, compiled_module_,
+                                       exp.name_offset, exp.name_length)
+              .ToHandleChecked();
+      switch (exp.kind) {
+        case kExternalFunction: {
+          // Wrap and export the code as a JSFunction.
+          WasmFunction& function = module_->functions[exp.index];
+          int func_index =
+              static_cast<int>(module_->functions.size() + export_index);
+          Handle<JSFunction> js_function = js_wrappers_[exp.index];
+          if (js_function.is_null()) {
+            // Wrap the exported code as a JSFunction.
+            Handle<Code> export_code =
+                code_table->GetValueChecked<Code>(isolate_, func_index);
+            js_function = WasmExportedFunction::New(
+                isolate_, instance, name, export_code,
+                static_cast<int>(function.sig->parameter_count()),
+                function.func_index);
+            js_wrappers_[exp.index] = js_function;
+          }
+          desc.set_value(js_function);
+          export_index++;
+          break;
+        }
+        case kExternalTable: {
+          // Export a table as a WebAssembly.Table object.
+          TableInstance& table_instance = table_instances_[exp.index];
+          WasmIndirectFunctionTable& table =
+              module_->function_tables[exp.index];
+          if (table_instance.table_object.is_null()) {
+            uint32_t maximum =
+                table.has_max ? table.max_size : WasmModule::kV8MaxTableSize;
+            table_instance.table_object = WasmTableObject::New(
+                isolate_, table.min_size, maximum, &table_instance.js_wrappers);
+          }
+          desc.set_value(table_instance.table_object);
+          break;
+        }
+        case kExternalMemory: {
+          // Export the memory as a WebAssembly.Memory object.
+          Handle<WasmMemoryObject> memory_object;
+          if (!instance->has_memory_object()) {
+            // If there was no imported WebAssembly.Memory object, create one.
+            Handle<JSArrayBuffer> buffer(instance->get_memory_buffer(),
+                                         isolate_);
+            memory_object = WasmMemoryObject::New(
+                isolate_, buffer,
+                (module_->max_mem_pages != 0) ? module_->max_mem_pages : -1);
+            instance->set_memory_object(*memory_object);
+          } else {
+            memory_object = Handle<WasmMemoryObject>(
+                instance->get_memory_object(), isolate_);
+          }
+
+          desc.set_value(memory_object);
+          break;
+        }
+        case kExternalGlobal: {
+          // Export the value of the global variable as a number.
+          WasmGlobal& global = module_->globals[exp.index];
+          double num = 0;
+          switch (global.type) {
+            case kAstI32:
+              num = *GetRawGlobalPtr<int32_t>(global);
+              break;
+            case kAstF32:
+              num = *GetRawGlobalPtr<float>(global);
+              break;
+            case kAstF64:
+              num = *GetRawGlobalPtr<double>(global);
+              break;
+            default:
+              UNREACHABLE();
+          }
+          desc.set_value(isolate_->factory()->NewNumber(num));
+          break;
+        }
+        default:
+          UNREACHABLE();
+          break;
+      }
+
+      v8::Maybe<bool> status = JSReceiver::DefineOwnProperty(
+          isolate_, exports_object, name, &desc, Object::THROW_ON_ERROR);
+      if (!status.IsJust()) {
+        thrower_->TypeError("export of %.*s failed.", name->length(),
+                            name->ToCString().get());
+        return;
+      }
+    }
+  }
+
+  void InitializeTables(Handle<FixedArray> code_table,
+                        Handle<WasmInstanceObject> instance) {
+    Handle<FixedArray> old_function_tables =
+        compiled_module_->function_tables();
+    int function_table_count =
+        static_cast<int>(module_->function_tables.size());
+    Handle<FixedArray> new_function_tables =
+        isolate_->factory()->NewFixedArray(function_table_count);
+    for (int index = 0; index < function_table_count; ++index) {
+      WasmIndirectFunctionTable& table = module_->function_tables[index];
+      TableInstance& table_instance = table_instances_[index];
+      int table_size = static_cast<int>(table.min_size);
+
+      if (table_instance.dispatch_table.is_null()) {
+        // Create a new dispatch table if necessary.
+        table_instance.dispatch_table =
+            isolate_->factory()->NewFixedArray(table_size * 2);
+        for (int i = 0; i < table_size; ++i) {
+          // Fill the table with invalid signature indexes so that
+          // uninitialized entries will always fail the signature check.
+          table_instance.dispatch_table->set(i, Smi::FromInt(kInvalidSigIndex));
+        }
+      }
+
+      new_function_tables->set(static_cast<int>(index),
+                               *table_instance.dispatch_table);
+
+      Handle<FixedArray> all_dispatch_tables;
+      if (!table_instance.table_object.is_null()) {
+        // Get the existing dispatch table(s) with the WebAssembly.Table object.
+        all_dispatch_tables = WasmTableObject::AddDispatchTable(
+            isolate_, table_instance.table_object,
+            Handle<WasmInstanceObject>::null(), index,
+            Handle<FixedArray>::null());
+      }
+
+      // TODO(titzer): this does redundant work if there are multiple tables,
+      // since initializations are not sorted by table index.
+      for (auto table_init : module_->table_inits) {
+        uint32_t base = EvalUint32InitExpr(table_init.offset);
+        if (base > static_cast<uint32_t>(table_size) ||
+            (base + table_init.entries.size() >
+             static_cast<uint32_t>(table_size))) {
+          thrower_->CompileError("table initializer is out of bounds");
+          continue;
+        }
+        for (int i = 0; i < static_cast<int>(table_init.entries.size()); ++i) {
+          uint32_t func_index = table_init.entries[i];
+          WasmFunction* function = &module_->functions[func_index];
+          int table_index = static_cast<int>(i + base);
+          int32_t sig_index = table.map.Find(function->sig);
+          DCHECK_GE(sig_index, 0);
+          table_instance.dispatch_table->set(table_index,
+                                             Smi::FromInt(sig_index));
+          table_instance.dispatch_table->set(table_index + table_size,
+                                             code_table->get(func_index));
+
+          if (!all_dispatch_tables.is_null()) {
+            Handle<Code> wasm_code(Code::cast(code_table->get(func_index)),
+                                   isolate_);
+            if (js_wrappers_[func_index].is_null()) {
+              // No JSFunction entry yet exists for this function. Create one.
+              // TODO(titzer): We compile JS->WASM wrappers for functions are
+              // not exported but are in an exported table. This should be done
+              // at module compile time and cached instead.
+              WasmInstance temp_instance(module_);
+              temp_instance.context = isolate_->native_context();
+              temp_instance.mem_size = 0;
+              temp_instance.mem_start = nullptr;
+              temp_instance.globals_start = nullptr;
+
+              ModuleEnv module_env;
+              module_env.module = module_;
+              module_env.instance = &temp_instance;
+              module_env.origin = module_->origin;
+
+              Handle<Code> wrapper_code = compiler::CompileJSToWasmWrapper(
+                  isolate_, &module_env, wasm_code, func_index);
+              Handle<WasmExportedFunction> js_function =
+                  WasmExportedFunction::New(
+                      isolate_, instance, isolate_->factory()->empty_string(),
+                      wrapper_code,
+                      static_cast<int>(function->sig->parameter_count()),
+                      func_index);
+              js_wrappers_[func_index] = js_function;
+            }
+            table_instance.js_wrappers->set(table_index,
+                                            *js_wrappers_[func_index]);
+
+            UpdateDispatchTablesInternal(isolate_, all_dispatch_tables,
+                                         table_index, function, wasm_code);
+          }
+        }
+      }
+
+      // TODO(titzer): we add the new dispatch table at the end to avoid
+      // redundant work and also because the new instance is not yet fully
+      // initialized.
+      if (!table_instance.table_object.is_null()) {
+        // Add the new dispatch table to the WebAssembly.Table object.
+        all_dispatch_tables = WasmTableObject::AddDispatchTable(
+            isolate_, table_instance.table_object, instance, index,
+            table_instance.dispatch_table);
+      }
+    }
+    // Patch all code that has references to the old indirect tables.
+    for (int i = 0; i < code_table->length(); ++i) {
+      if (!code_table->get(i)->IsCode()) continue;
+      Handle<Code> code(Code::cast(code_table->get(i)), isolate_);
+      for (int j = 0; j < function_table_count; ++j) {
+        ReplaceReferenceInCode(
+            code, Handle<Object>(old_function_tables->get(j), isolate_),
+            Handle<Object>(new_function_tables->get(j), isolate_));
+      }
+    }
+    compiled_module_->set_function_tables(new_function_tables);
+  }
+};
+
 // Instantiates a WASM module, creating a WebAssembly.Instance from a
 // WebAssembly.Module.
 MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
                                               ErrorThrower* thrower,
-                                              Handle<JSObject> module_object,
+                                              Handle<JSObject> wasm_module,
                                               Handle<JSReceiver> ffi,
                                               Handle<JSArrayBuffer> memory) {
-  MaybeHandle<JSObject> nothing;
-  HistogramTimerScope wasm_instantiate_module_time_scope(
-      isolate->counters()->wasm_instantiate_module_time());
-  Factory* factory = isolate->factory();
-
-  //--------------------------------------------------------------------------
-  // Reuse the compiled module (if no owner), otherwise clone.
-  //--------------------------------------------------------------------------
-  Handle<WasmCompiledModule> compiled_module;
-  Handle<FixedArray> code_table;
-  Handle<FixedArray> old_code_table;
-  Handle<JSObject> owner;
-  // If we don't clone, this will be null(). Otherwise, this will
-  // be a weak link to the original. If we lose the original to GC,
-  // this will be a cleared. We'll link the instances chain last.
-  MaybeHandle<WeakCell> link_to_original;
-
-  TRACE("Starting new module instantiation\n");
-  {
-    Handle<WasmCompiledModule> original(
-        WasmCompiledModule::cast(module_object->GetInternalField(0)), isolate);
-    // Always make a new copy of the code_table, since the old_code_table
-    // may still have placeholders for imports.
-    old_code_table = original->code_table();
-    code_table = factory->CopyFixedArray(old_code_table);
-
-    if (original->has_weak_owning_instance()) {
-      WeakCell* tmp = original->ptr_to_weak_owning_instance();
-      DCHECK(!tmp->cleared());
-      // There is already an owner, clone everything.
-      owner = Handle<JSObject>(JSObject::cast(tmp->value()), isolate);
-      // Insert the latest clone in front.
-      TRACE("Cloning from %d\n", original->instance_id());
-      compiled_module = WasmCompiledModule::Clone(isolate, original);
-      // Replace the strong reference to point to the new instance here.
-      // This allows any of the other instances, including the original,
-      // to be collected.
-      module_object->SetInternalField(0, *compiled_module);
-      compiled_module->set_weak_module_object(original->weak_module_object());
-      link_to_original = factory->NewWeakCell(original);
-      // Don't link to original here. We remember the original
-      // as a weak link. If that link isn't clear by the time we finish
-      // instantiating this instance, then we link it at that time.
-      compiled_module->reset_weak_next_instance();
-
-      // Clone the code for WASM functions and exports.
-      for (int i = 0; i < code_table->length(); ++i) {
-        Handle<Code> orig_code = code_table->GetValueChecked<Code>(isolate, i);
-        switch (orig_code->kind()) {
-          case Code::WASM_TO_JS_FUNCTION:
-            // Imports will be overwritten with newly compiled wrappers.
-            break;
-          case Code::JS_TO_WASM_FUNCTION:
-          case Code::WASM_FUNCTION: {
-            Handle<Code> code = factory->CopyCode(orig_code);
-            code_table->set(i, *code);
-            break;
-          }
-          default:
-            UNREACHABLE();
-        }
-      }
-      RecordStats(isolate, code_table);
-    } else {
-      // There was no owner, so we can reuse the original.
-      compiled_module = original;
-      TRACE("Reusing existing instance %d\n", compiled_module->instance_id());
-    }
-    compiled_module->set_code_table(code_table);
-  }
-
-  //--------------------------------------------------------------------------
-  // Allocate the instance object.
-  //--------------------------------------------------------------------------
-  Handle<Map> map = factory->NewMap(
-      JS_OBJECT_TYPE,
-      JSObject::kHeaderSize + kWasmModuleInternalFieldCount * kPointerSize);
-  Handle<JSObject> instance = factory->NewJSObjectFromMap(map, TENURED);
-  instance->SetInternalField(kWasmModuleCodeTable, *code_table);
-
-  //--------------------------------------------------------------------------
-  // Set up the memory for the new instance.
-  //--------------------------------------------------------------------------
-  MaybeHandle<JSArrayBuffer> old_memory;
-  // TODO(titzer): handle imported memory properly.
-
-  uint32_t min_mem_pages = compiled_module->min_memory_pages();
-  isolate->counters()->wasm_min_mem_pages_count()->AddSample(min_mem_pages);
-  // TODO(wasm): re-enable counter for max_mem_pages when we use that field.
-
-  if (memory.is_null() && min_mem_pages > 0) {
-    memory = AllocateMemory(thrower, isolate, min_mem_pages);
-    if (memory.is_null()) return nothing;  // failed to allocate memory
-  }
-
-  if (!memory.is_null()) {
-    instance->SetInternalField(kWasmMemArrayBuffer, *memory);
-    Address mem_start = static_cast<Address>(memory->backing_store());
-    uint32_t mem_size = static_cast<uint32_t>(memory->byte_length()->Number());
-    LoadDataSegments(compiled_module, mem_start, mem_size);
-
-    uint32_t old_mem_size = compiled_module->has_heap()
-                                ? compiled_module->mem_size()
-                                : compiled_module->default_mem_size();
-    Address old_mem_start =
-        compiled_module->has_heap()
-            ? static_cast<Address>(compiled_module->heap()->backing_store())
-            : nullptr;
-    RelocateInstanceCode(instance, old_mem_start, mem_start, old_mem_size,
-                         mem_size);
-    compiled_module->set_heap(memory);
-  }
-
-  //--------------------------------------------------------------------------
-  // Set up the globals for the new instance.
-  //--------------------------------------------------------------------------
-  MaybeHandle<JSArrayBuffer> old_globals;
-  MaybeHandle<JSArrayBuffer> globals;
-  uint32_t globals_size = compiled_module->globals_size();
-  if (globals_size > 0) {
-    Handle<JSArrayBuffer> global_buffer = NewArrayBuffer(isolate, globals_size);
-    globals = global_buffer;
-    if (globals.is_null()) {
-      thrower->Error("Out of memory: wasm globals");
-      return nothing;
-    }
-    Address old_address =
-        owner.is_null() ? nullptr : GetGlobalStartAddressFromCodeTemplate(
-                                        *isolate->factory()->undefined_value(),
-                                        JSObject::cast(*owner));
-    RelocateGlobals(instance, old_address,
-                    static_cast<Address>(global_buffer->backing_store()));
-    instance->SetInternalField(kWasmGlobalsArrayBuffer, *global_buffer);
-  }
-
-  //--------------------------------------------------------------------------
-  // Compile the import wrappers for the new instance.
-  //--------------------------------------------------------------------------
-  // TODO(titzer): handle imported globals and function tables.
-  int num_imported_functions = 0;
-  if (compiled_module->has_import_data()) {
-    Handle<FixedArray> import_data = compiled_module->import_data();
-    num_imported_functions = import_data->length();
-    for (int index = 0; index < num_imported_functions; index++) {
-      Handle<Code> import_wrapper =
-          CompileImportWrapper(isolate, ffi, index, import_data, thrower);
-      if (thrower->error()) return nothing;
-      code_table->set(index, *import_wrapper);
-      RecordStats(isolate, *import_wrapper);
-    }
-  }
-
-  //--------------------------------------------------------------------------
-  // Set up the debug support for the new instance.
-  //--------------------------------------------------------------------------
-  // TODO(wasm): avoid referencing this stuff from the instance, use it off
-  // the compiled module instead. See the following 3 assignments:
-  if (compiled_module->has_module_bytes()) {
-    instance->SetInternalField(kWasmModuleBytesString,
-                               compiled_module->ptr_to_module_bytes());
-  }
-
-  if (compiled_module->has_function_names()) {
-    instance->SetInternalField(kWasmFunctionNamesArray,
-                               compiled_module->ptr_to_function_names());
-  }
-
-  {
-    Handle<Object> handle = factory->NewNumber(num_imported_functions);
-    instance->SetInternalField(kWasmNumImportedFunctions, *handle);
-  }
-
-  //--------------------------------------------------------------------------
-  // Set up the runtime support for the new instance.
-  //--------------------------------------------------------------------------
-  Handle<WeakCell> weak_link = isolate->factory()->NewWeakCell(instance);
-
-  for (int i = num_imported_functions + FLAG_skip_compiling_wasm_funcs;
-       i < code_table->length(); ++i) {
-    Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
-    if (code->kind() == Code::WASM_FUNCTION) {
-      Handle<FixedArray> deopt_data =
-          isolate->factory()->NewFixedArray(2, TENURED);
-      deopt_data->set(0, *weak_link);
-      deopt_data->set(1, Smi::FromInt(static_cast<int>(i)));
-      deopt_data->set_length(2);
-      code->set_deoptimization_data(*deopt_data);
-    }
-  }
-
-  //--------------------------------------------------------------------------
-  // Set up the indirect function tables for the new instance.
-  //--------------------------------------------------------------------------
-  {
-    std::vector<Handle<Code>> functions(
-        static_cast<size_t>(code_table->length()));
-    for (int i = 0; i < code_table->length(); ++i) {
-      functions[i] = code_table->GetValueChecked<Code>(isolate, i);
-    }
-
-    if (compiled_module->has_indirect_function_tables()) {
-      Handle<FixedArray> indirect_tables_template =
-          compiled_module->indirect_function_tables();
-      Handle<FixedArray> to_replace =
-          owner.is_null() ? indirect_tables_template
-                          : handle(FixedArray::cast(owner->GetInternalField(
-                                kWasmModuleFunctionTable)));
-      Handle<FixedArray> indirect_tables = SetupIndirectFunctionTable(
-          isolate, code_table, indirect_tables_template, to_replace);
-      for (int i = 0; i < indirect_tables->length(); ++i) {
-        Handle<FixedArray> metadata =
-            indirect_tables->GetValueChecked<FixedArray>(isolate, i);
-        uint32_t size = Smi::cast(metadata->get(kSize))->value();
-        Handle<FixedArray> table =
-            metadata->GetValueChecked<FixedArray>(isolate, kTable);
-        PopulateFunctionTable(table, size, &functions);
-      }
-      instance->SetInternalField(kWasmModuleFunctionTable, *indirect_tables);
-    }
-  }
-
-  //--------------------------------------------------------------------------
-  // Set up the exports object for the new instance.
-  //--------------------------------------------------------------------------
-  bool mem_export = compiled_module->export_memory();
-  ModuleOrigin origin = compiled_module->origin();
-
-  if (compiled_module->has_exports() || mem_export) {
-    PropertyDescriptor desc;
-    desc.set_writable(false);
-
-    Handle<JSObject> exports_object = instance;
-    if (origin == kWasmOrigin) {
-      // Create the "exports" object.
-      Handle<JSFunction> object_function = Handle<JSFunction>(
-          isolate->native_context()->object_function(), isolate);
-      exports_object = factory->NewJSObject(object_function, TENURED);
-      Handle<String> exports_name = factory->InternalizeUtf8String("exports");
-      JSObject::AddProperty(instance, exports_name, exports_object, READ_ONLY);
-    }
-    int first_export = -1;
-    // TODO(wasm): another iteration over the code objects.
-    for (int i = 0; i < code_table->length(); i++) {
-      Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
-      if (code->kind() == Code::JS_TO_WASM_FUNCTION) {
-        first_export = i;
-        break;
-      }
-    }
-    if (compiled_module->has_exports()) {
-      Handle<FixedArray> exports = compiled_module->exports();
-      int export_size = exports->length();
-      for (int i = 0; i < export_size; ++i) {
-        Handle<FixedArray> export_data =
-            exports->GetValueChecked<FixedArray>(isolate, i);
-        Handle<String> name =
-            export_data->GetValueChecked<String>(isolate, kExportName);
-        int arity = Smi::cast(export_data->get(kExportArity))->value();
-        MaybeHandle<ByteArray> signature =
-            export_data->GetValue<ByteArray>(isolate, kExportedSignature);
-        Handle<Code> export_code =
-            code_table->GetValueChecked<Code>(isolate, first_export + i);
-        Handle<JSFunction> function = WrapExportCodeAsJSFunction(
-            isolate, export_code, name, arity, signature, instance);
-        desc.set_value(function);
-        Maybe<bool> status = JSReceiver::DefineOwnProperty(
-            isolate, exports_object, name, &desc, Object::THROW_ON_ERROR);
-        if (!status.IsJust()) {
-          thrower->Error("export of %.*s failed.", name->length(),
-                         name->ToCString().get());
-          return nothing;
-        }
-      }
-    }
-    if (mem_export) {
-      // Export the memory as a named property.
-      Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>(
-          JSArrayBuffer::cast(instance->GetInternalField(kWasmMemArrayBuffer)));
-      Handle<Object> memory_object =
-          WasmJs::CreateWasmMemoryObject(isolate, buffer, false, 0);
-      // TODO(titzer): export the memory with the correct name.
-      Handle<String> name = factory->InternalizeUtf8String("memory");
-      JSObject::AddProperty(exports_object, name, memory_object, READ_ONLY);
-    }
-  }
-
-  if (num_imported_functions > 0 || !owner.is_null()) {
-    // If the code was cloned, or new imports were compiled, patch.
-    PatchDirectCalls(old_code_table, code_table, num_imported_functions);
-  }
-
-  FlushICache(isolate, code_table);
-
-  //--------------------------------------------------------------------------
-  // Run the start function if one was specified.
-  //--------------------------------------------------------------------------
-  if (compiled_module->has_startup_function()) {
-    Handle<FixedArray> startup_data = compiled_module->startup_function();
-    HandleScope scope(isolate);
-    int32_t start_index =
-        startup_data->GetValueChecked<Smi>(isolate, kExportedFunctionIndex)
-            ->value();
-    Handle<Code> startup_code =
-        code_table->GetValueChecked<Code>(isolate, start_index);
-    int arity = Smi::cast(startup_data->get(kExportArity))->value();
-    MaybeHandle<ByteArray> startup_signature =
-        startup_data->GetValue<ByteArray>(isolate, kExportedSignature);
-    Handle<JSFunction> startup_fct = WrapExportCodeAsJSFunction(
-        isolate, startup_code, factory->InternalizeUtf8String("start"), arity,
-        startup_signature, instance);
-    RecordStats(isolate, *startup_code);
-    // Call the JS function.
-    Handle<Object> undefined = isolate->factory()->undefined_value();
-    MaybeHandle<Object> retval =
-        Execution::Call(isolate, startup_fct, undefined, 0, nullptr);
-
-    if (retval.is_null()) {
-      thrower->Error("WASM.instantiateModule(): start function failed");
-      return nothing;
-    }
-  }
-
-  DCHECK(wasm::IsWasmObject(*instance));
-
-  {
-    Handle<WeakCell> link_to_owner = factory->NewWeakCell(instance);
-
-    Handle<Object> global_handle = isolate->global_handles()->Create(*instance);
-    Handle<WeakCell> link_to_clone = factory->NewWeakCell(compiled_module);
-    {
-      DisallowHeapAllocation no_gc;
-      compiled_module->set_weak_owning_instance(link_to_owner);
-      Handle<WeakCell> next;
-      if (link_to_original.ToHandle(&next) && !next->cleared()) {
-        WasmCompiledModule* original = WasmCompiledModule::cast(next->value());
-        DCHECK(original->has_weak_owning_instance());
-        DCHECK(!original->weak_owning_instance()->cleared());
-        compiled_module->set_weak_next_instance(next);
-        original->set_weak_prev_instance(link_to_clone);
-      }
-
-      compiled_module->set_weak_owning_instance(link_to_owner);
-      instance->SetInternalField(kWasmCompiledModule, *compiled_module);
-      GlobalHandles::MakeWeak(global_handle.location(),
-                              global_handle.location(), &InstanceFinalizer,
-                              v8::WeakCallbackType::kFinalizer);
-    }
-  }
-  TRACE("Finishing instance %d\n", compiled_module->instance_id());
-  TRACE_CHAIN(WasmCompiledModule::cast(module_object->GetInternalField(0)));
-  return instance;
+  WasmInstanceBuilder builder(isolate, thrower, wasm_module, ffi, memory);
+  return builder.Build();
 }
 
-#if DEBUG
-uint32_t WasmCompiledModule::instance_id_counter_ = 0;
-#endif
-
-Handle<WasmCompiledModule> WasmCompiledModule::New(Isolate* isolate,
-                                                   uint32_t min_memory_pages,
-                                                   uint32_t globals_size,
-                                                   bool export_memory,
-                                                   ModuleOrigin origin) {
-  Handle<FixedArray> ret =
-      isolate->factory()->NewFixedArray(PropertyIndices::Count, TENURED);
-  // Globals size is expected to fit into an int without overflow. This is not
-  // supported by the spec at the moment, however, we don't support array
-  // buffer sizes over 1g, so, for now, we avoid alocating a HeapNumber for
-  // the globals size. The CHECK guards this assumption.
-  CHECK_GE(static_cast<int>(globals_size), 0);
-  ret->set(kID_min_memory_pages,
-           Smi::FromInt(static_cast<int>(min_memory_pages)));
-  ret->set(kID_globals_size, Smi::FromInt(static_cast<int>(globals_size)));
-  ret->set(kID_export_memory, Smi::FromInt(static_cast<int>(export_memory)));
-  ret->set(kID_origin, Smi::FromInt(static_cast<int>(origin)));
-  WasmCompiledModule::cast(*ret)->Init();
-  return handle(WasmCompiledModule::cast(*ret));
-}
-
-void WasmCompiledModule::Init() {
-#if DEBUG
-  set(kID_instance_id, Smi::FromInt(instance_id_counter_++));
-  TRACE("New compiled module id: %d\n", instance_id());
-#endif
-}
-
-void WasmCompiledModule::PrintInstancesChain() {
-#if DEBUG
-  if (!FLAG_trace_wasm_instances) return;
-  for (WasmCompiledModule* current = this; current != nullptr;) {
-    PrintF("->%d", current->instance_id());
-    if (current->ptr_to_weak_next_instance() == nullptr) break;
-    CHECK(!current->ptr_to_weak_next_instance()->cleared());
-    current =
-        WasmCompiledModule::cast(current->ptr_to_weak_next_instance()->value());
-  }
-  PrintF("\n");
-#endif
-}
-
-Handle<Object> GetWasmFunctionNameOrNull(Isolate* isolate, Handle<Object> wasm,
+Handle<String> wasm::GetWasmFunctionName(Isolate* isolate,
+                                         Handle<Object> instance_or_undef,
                                          uint32_t func_index) {
-  if (!wasm->IsUndefined(isolate)) {
-    Handle<ByteArray> func_names_arr_obj(
-        ByteArray::cast(Handle<JSObject>::cast(wasm)->GetInternalField(
-            kWasmFunctionNamesArray)),
-        isolate);
-    // TODO(clemens): Extract this from the module bytes; skip whole function
-    // name table.
-    Handle<Object> name;
-    if (GetWasmFunctionNameFromTable(func_names_arr_obj, func_index)
-            .ToHandle(&name)) {
-      return name;
-    }
-  }
-  return isolate->factory()->null_value();
-}
-
-Handle<String> GetWasmFunctionName(Isolate* isolate, Handle<Object> wasm,
-                                   uint32_t func_index) {
-  Handle<Object> name_or_null =
-      GetWasmFunctionNameOrNull(isolate, wasm, func_index);
-  if (!name_or_null->IsNull(isolate)) {
-    return Handle<String>::cast(name_or_null);
+  if (!instance_or_undef->IsUndefined(isolate)) {
+    Handle<WasmCompiledModule> compiled_module(
+        Handle<WasmInstanceObject>::cast(instance_or_undef)
+            ->get_compiled_module());
+    MaybeHandle<String> maybe_name =
+        WasmCompiledModule::GetFunctionName(compiled_module, func_index);
+    if (!maybe_name.is_null()) return maybe_name.ToHandleChecked();
   }
   return isolate->factory()->NewStringFromStaticChars("<WASM UNNAMED>");
 }
 
-bool IsWasmObject(Object* object) {
-  if (!object->IsJSObject()) return false;
-
-  JSObject* obj = JSObject::cast(object);
-  Isolate* isolate = obj->GetIsolate();
-  if (obj->GetInternalFieldCount() != kWasmModuleInternalFieldCount) {
-    return false;
-  }
-
-  Object* mem = obj->GetInternalField(kWasmMemArrayBuffer);
-  if (obj->GetInternalField(kWasmModuleCodeTable)->IsFixedArray() &&
-      (mem->IsUndefined(isolate) || mem->IsJSArrayBuffer()) &&
-      obj->GetInternalField(kWasmFunctionNamesArray)->IsByteArray()) {
-    Object* debug_bytes = obj->GetInternalField(kWasmModuleBytesString);
-    if (!debug_bytes->IsUndefined(isolate)) {
-      if (!debug_bytes->IsSeqOneByteString()) {
-        return false;
-      }
-      DisallowHeapAllocation no_gc;
-      SeqOneByteString* bytes = SeqOneByteString::cast(debug_bytes);
-      if (bytes->length() < 4) return false;
-      if (memcmp(bytes->GetChars(), "\0asm", 4)) return false;
-      // All checks passed.
-    }
-    return true;
-  }
-  return false;
+bool wasm::IsWasmInstance(Object* object) {
+  return WasmInstanceObject::IsWasmInstanceObject(object);
 }
 
-SeqOneByteString* GetWasmBytes(JSObject* wasm) {
-  return SeqOneByteString::cast(wasm->GetInternalField(kWasmModuleBytesString));
+WasmCompiledModule* wasm::GetCompiledModule(Object* object) {
+  return WasmInstanceObject::cast(object)->get_compiled_module();
 }
 
-Handle<WasmDebugInfo> GetDebugInfo(Handle<JSObject> wasm) {
-  Handle<Object> info(wasm->GetInternalField(kWasmDebugInfo),
-                      wasm->GetIsolate());
-  if (!info->IsUndefined(wasm->GetIsolate()))
-    return Handle<WasmDebugInfo>::cast(info);
-  Handle<WasmDebugInfo> new_info = WasmDebugInfo::New(wasm);
-  wasm->SetInternalField(kWasmDebugInfo, *new_info);
+bool wasm::WasmIsAsmJs(Object* instance, Isolate* isolate) {
+  if (instance->IsUndefined(isolate)) return false;
+  DCHECK(IsWasmInstance(instance));
+  WasmCompiledModule* compiled_module =
+      GetCompiledModule(JSObject::cast(instance));
+  DCHECK_EQ(compiled_module->has_asm_js_offset_tables(),
+            compiled_module->script()->type() == Script::TYPE_NORMAL);
+  return compiled_module->has_asm_js_offset_tables();
+}
+
+Handle<Script> wasm::GetScript(Handle<JSObject> instance) {
+  DCHECK(IsWasmInstance(*instance));
+  WasmCompiledModule* compiled_module = GetCompiledModule(*instance);
+  DCHECK(compiled_module->has_script());
+  return compiled_module->script();
+}
+
+int wasm::GetAsmWasmSourcePosition(Handle<JSObject> instance, int func_index,
+                                   int byte_offset) {
+  return WasmDebugInfo::GetAsmJsSourcePosition(GetDebugInfo(instance),
+                                               func_index, byte_offset);
+}
+
+Handle<SeqOneByteString> wasm::GetWasmBytes(Handle<JSObject> object) {
+  return Handle<WasmInstanceObject>::cast(object)
+      ->get_compiled_module()
+      ->module_bytes();
+}
+
+Handle<WasmDebugInfo> wasm::GetDebugInfo(Handle<JSObject> object) {
+  auto instance = Handle<WasmInstanceObject>::cast(object);
+  if (instance->has_debug_info()) {
+    Handle<WasmDebugInfo> info(instance->get_debug_info(),
+                               instance->GetIsolate());
+    return info;
+  }
+  Handle<WasmDebugInfo> new_info = WasmDebugInfo::New(instance);
+  instance->set_debug_info(*new_info);
   return new_info;
 }
 
-bool UpdateWasmModuleMemory(Handle<JSObject> object, Address old_start,
-                            Address new_start, uint32_t old_size,
-                            uint32_t new_size) {
-  DisallowHeapAllocation no_allocation;
-  if (!IsWasmObject(*object)) {
-    return false;
-  }
-
-  // Get code table associated with the module js_object
-  Object* obj = object->GetInternalField(kWasmModuleCodeTable);
-  Handle<FixedArray> code_table(FixedArray::cast(obj));
-
-  // Iterate through the code objects in the code table and update relocation
-  // information
-  for (int i = 0; i < code_table->length(); i++) {
-    obj = code_table->get(i);
-    Handle<Code> code(Code::cast(obj));
-
-    int mode_mask = RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_REFERENCE) |
-                    RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
-    for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
-      RelocInfo::Mode mode = it.rinfo()->rmode();
-      if (RelocInfo::IsWasmMemoryReference(mode) ||
-          RelocInfo::IsWasmMemorySizeReference(mode)) {
-        it.rinfo()->update_wasm_memory_reference(old_start, new_start, old_size,
-                                                 new_size);
-      }
-    }
-  }
-  return true;
+int wasm::GetNumberOfFunctions(Handle<JSObject> object) {
+  return static_cast<int>(
+      Handle<WasmInstanceObject>::cast(object)->module()->functions.size());
 }
 
-Handle<FixedArray> BuildFunctionTable(Isolate* isolate, uint32_t index,
-                                      const WasmModule* module) {
-  const WasmIndirectFunctionTable* table = &module->function_tables[index];
-  DCHECK_EQ(table->size, table->values.size());
-  DCHECK_GE(table->max_size, table->size);
-  Handle<FixedArray> values =
-      isolate->factory()->NewFixedArray(2 * table->max_size, TENURED);
-  for (uint32_t i = 0; i < table->size; ++i) {
-    const WasmFunction* function = &module->functions[table->values[i]];
-    values->set(i, Smi::FromInt(function->sig_index));
-    values->set(i + table->max_size, Smi::FromInt(table->values[i]));
-  }
-  // Set the remaining elements to -1 (instead of "undefined"). These
-  // elements are accessed directly as SMIs (without a check). On 64-bit
-  // platforms, it is possible to have the top bits of "undefined" take
-  // small integer values (or zero), which are more likely to be equal to
-  // the signature index we check against.
-  for (uint32_t i = table->size; i < table->max_size; i++) {
-    values->set(i, Smi::FromInt(-1));
-  }
-  return values;
-}
-
-void PopulateFunctionTable(Handle<FixedArray> table, uint32_t table_size,
-                           const std::vector<Handle<Code>>* code_table) {
-  uint32_t max_size = table->length() / 2;
-  for (uint32_t i = max_size; i < max_size + table_size; ++i) {
-    int index = Smi::cast(table->get(static_cast<int>(i)))->value();
-    DCHECK_GE(index, 0);
-    DCHECK_LT(static_cast<size_t>(index), code_table->size());
-    table->set(static_cast<int>(i), *(*code_table)[index]);
-  }
-}
-
-int GetNumberOfFunctions(JSObject* wasm) {
-  Object* func_names_obj = wasm->GetInternalField(kWasmFunctionNamesArray);
-  // TODO(clemensh): this looks inside an array constructed elsewhere. Refactor.
-  return ByteArray::cast(func_names_obj)->get_int(0);
-}
-
-Handle<JSObject> CreateCompiledModuleObject(Isolate* isolate,
-                                            Handle<FixedArray> compiled_module,
-                                            ModuleOrigin origin) {
-  Handle<JSObject> module_obj;
-  if (origin == ModuleOrigin::kWasmOrigin) {
-    Handle<JSFunction> module_cons(
-        isolate->native_context()->wasm_module_constructor());
-    module_obj = isolate->factory()->NewJSObject(module_cons);
-  } else {
-    DCHECK(origin == ModuleOrigin::kAsmJsOrigin);
-    Handle<Map> map = isolate->factory()->NewMap(
-        JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
-    module_obj = isolate->factory()->NewJSObjectFromMap(map, TENURED);
-  }
-  module_obj->SetInternalField(0, *compiled_module);
-  if (origin == ModuleOrigin::kWasmOrigin) {
-    Handle<Symbol> module_sym(isolate->native_context()->wasm_module_sym());
-    Object::SetProperty(module_obj, module_sym, module_obj, STRICT).Check();
-  }
-  Handle<WeakCell> link_to_module = isolate->factory()->NewWeakCell(module_obj);
-  WasmCompiledModule::cast(*compiled_module)
-      ->set_weak_module_object(link_to_module);
-  return module_obj;
-}
-
-MaybeHandle<JSObject> CreateModuleObjectFromBytes(Isolate* isolate,
-                                                  const byte* start,
-                                                  const byte* end,
-                                                  ErrorThrower* thrower,
-                                                  ModuleOrigin origin) {
-  MaybeHandle<JSObject> nothing;
-  Zone zone(isolate->allocator());
-  ModuleResult result =
-      DecodeWasmModule(isolate, &zone, start, end, false, origin);
-  std::unique_ptr<const WasmModule> decoded_module(result.val);
+// TODO(clemensh): origin can be inferred from asm_js_script; remove it.
+MaybeHandle<WasmModuleObject> wasm::CreateModuleObjectFromBytes(
+    Isolate* isolate, const byte* start, const byte* end, ErrorThrower* thrower,
+    ModuleOrigin origin, Handle<Script> asm_js_script,
+    const byte* asm_js_offset_tables_start,
+    const byte* asm_js_offset_tables_end) {
+  MaybeHandle<WasmModuleObject> nothing;
+  ModuleResult result = DecodeWasmModule(isolate, start, end, false, origin);
   if (result.failed()) {
-    thrower->Failed("Wasm decoding failed", result);
+    if (result.val) delete result.val;
+    thrower->CompileFailed("Wasm decoding failed", result);
     return nothing;
   }
-  MaybeHandle<FixedArray> compiled_module =
-      decoded_module->CompileFunctions(isolate, thrower);
-  if (compiled_module.is_null()) return nothing;
+  // The {module_wrapper} will take ownership of the {WasmModule} object,
+  // and it will be destroyed when the GC reclaims the wrapper object.
+  Handle<WasmModuleWrapper> module_wrapper =
+      WasmModuleWrapper::New(isolate, const_cast<WasmModule*>(result.val));
 
-  return CreateCompiledModuleObject(isolate, compiled_module.ToHandleChecked(),
-                                    origin);
-}
+  // Compile the functions of the module, producing a compiled module.
+  MaybeHandle<WasmCompiledModule> maybe_compiled_module =
+      result.val->CompileFunctions(isolate, module_wrapper, thrower);
 
-bool ValidateModuleBytes(Isolate* isolate, const byte* start, const byte* end,
-                         ErrorThrower* thrower, ModuleOrigin origin) {
-  Zone zone(isolate->allocator());
-  ModuleResult result =
-      DecodeWasmModule(isolate, &zone, start, end, false, origin);
-  if (result.ok()) {
-    DCHECK_NOT_NULL(result.val);
-    delete result.val;
-    return true;
+  if (maybe_compiled_module.is_null()) return nothing;
+
+  Handle<WasmCompiledModule> compiled_module =
+      maybe_compiled_module.ToHandleChecked();
+
+  DCHECK_EQ(origin == kAsmJsOrigin, !asm_js_script.is_null());
+  DCHECK(!compiled_module->has_script());
+  DCHECK(!compiled_module->has_asm_js_offset_tables());
+  if (origin == kAsmJsOrigin) {
+    // Set script for the asm.js source, and the offset table mapping wasm byte
+    // offsets to source positions.
+    compiled_module->set_script(asm_js_script);
+    size_t offset_tables_len =
+        asm_js_offset_tables_end - asm_js_offset_tables_start;
+    DCHECK_GE(static_cast<size_t>(kMaxInt), offset_tables_len);
+    Handle<ByteArray> offset_tables =
+        isolate->factory()->NewByteArray(static_cast<int>(offset_tables_len));
+    memcpy(offset_tables->GetDataStartAddress(), asm_js_offset_tables_start,
+           offset_tables_len);
+    compiled_module->set_asm_js_offset_tables(offset_tables);
+  } else {
+    // Create a new Script object representing this wasm module, store it in the
+    // compiled wasm module, and register it at the debugger.
+    Handle<Script> script =
+        isolate->factory()->NewScript(isolate->factory()->empty_string());
+    script->set_type(Script::TYPE_WASM);
+
+    DCHECK_GE(kMaxInt, end - start);
+    int hash = StringHasher::HashSequentialString(
+        reinterpret_cast<const char*>(start), static_cast<int>(end - start),
+        kZeroHashSeed);
+
+    const int kBufferSize = 50;
+    char buffer[kBufferSize];
+    int url_chars = SNPrintF(ArrayVector(buffer), "wasm://wasm/%08x", hash);
+    DCHECK(url_chars >= 0 && url_chars < kBufferSize);
+    MaybeHandle<String> url_str = isolate->factory()->NewStringFromOneByte(
+        Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), url_chars),
+        TENURED);
+    script->set_source_url(*url_str.ToHandleChecked());
+
+    int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
+    DCHECK(name_chars >= 0 && name_chars < kBufferSize);
+    MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte(
+        Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), name_chars),
+        TENURED);
+    script->set_name(*name_str.ToHandleChecked());
+
+    script->set_wasm_compiled_module(*compiled_module);
+    compiled_module->set_script(script);
+    isolate->debug()->OnAfterCompile(script);
   }
-  return false;
+
+  return WasmModuleObject::New(isolate, compiled_module);
 }
 
-MaybeHandle<JSArrayBuffer> GetInstanceMemory(Isolate* isolate,
-                                             Handle<JSObject> instance) {
-  Object* mem = instance->GetInternalField(kWasmMemArrayBuffer);
-  DCHECK(IsWasmObject(*instance));
-  if (mem->IsUndefined(isolate)) return MaybeHandle<JSArrayBuffer>();
-  return Handle<JSArrayBuffer>(JSArrayBuffer::cast(mem));
+bool wasm::ValidateModuleBytes(Isolate* isolate, const byte* start,
+                               const byte* end, ErrorThrower* thrower,
+                               ModuleOrigin origin) {
+  ModuleResult result = DecodeWasmModule(isolate, start, end, true, origin);
+  if (result.val) {
+    delete result.val;
+  } else {
+    DCHECK(!result.ok());
+  }
+  return result.ok();
 }
 
-void SetInstanceMemory(Handle<JSObject> instance, JSArrayBuffer* buffer) {
+MaybeHandle<JSArrayBuffer> wasm::GetInstanceMemory(Isolate* isolate,
+                                                   Handle<JSObject> object) {
+  auto instance = Handle<WasmInstanceObject>::cast(object);
+  if (instance->has_memory_buffer()) {
+    return Handle<JSArrayBuffer>(instance->get_memory_buffer(), isolate);
+  }
+  return MaybeHandle<JSArrayBuffer>();
+}
+
+void SetInstanceMemory(Handle<JSObject> object, JSArrayBuffer* buffer) {
   DisallowHeapAllocation no_gc;
-  DCHECK(IsWasmObject(*instance));
-  instance->SetInternalField(kWasmMemArrayBuffer, buffer);
-  WasmCompiledModule* module =
-      WasmCompiledModule::cast(instance->GetInternalField(kWasmCompiledModule));
-  module->set_ptr_to_heap(buffer);
+  auto instance = Handle<WasmInstanceObject>::cast(object);
+  instance->set_memory_buffer(buffer);
+  instance->get_compiled_module()->set_ptr_to_memory(buffer);
 }
 
-int32_t GetInstanceMemorySize(Isolate* isolate, Handle<JSObject> instance) {
+int32_t wasm::GetInstanceMemorySize(Isolate* isolate,
+                                    Handle<JSObject> instance) {
   MaybeHandle<JSArrayBuffer> maybe_mem_buffer =
       GetInstanceMemory(isolate, instance);
   Handle<JSArrayBuffer> buffer;
@@ -1820,35 +2028,55 @@
   }
 }
 
-int32_t GrowInstanceMemory(Isolate* isolate, Handle<JSObject> instance,
-                           uint32_t pages) {
+uint32_t GetMaxInstanceMemorySize(Isolate* isolate,
+                                  Handle<WasmInstanceObject> instance) {
+  if (instance->has_memory_object()) {
+    Handle<WasmMemoryObject> memory_object(instance->get_memory_object(),
+                                           isolate);
+
+    int maximum = memory_object->maximum_pages();
+    if (maximum > 0) return static_cast<uint32_t>(maximum);
+  }
+  uint32_t compiled_max_pages =
+      instance->get_compiled_module()->max_mem_pages();
+  isolate->counters()->wasm_max_mem_pages_count()->AddSample(
+      compiled_max_pages);
+  if (compiled_max_pages != 0) return compiled_max_pages;
+  return WasmModule::kV8MaxPages;
+}
+
+int32_t wasm::GrowInstanceMemory(Isolate* isolate, Handle<JSObject> object,
+                                 uint32_t pages) {
+  if (!IsWasmInstance(*object)) return -1;
+  auto instance = Handle<WasmInstanceObject>::cast(object);
+  if (pages == 0) return GetInstanceMemorySize(isolate, instance);
+  uint32_t max_pages = GetMaxInstanceMemorySize(isolate, instance);
+
   Address old_mem_start = nullptr;
   uint32_t old_size = 0, new_size = 0;
 
   MaybeHandle<JSArrayBuffer> maybe_mem_buffer =
       GetInstanceMemory(isolate, instance);
   Handle<JSArrayBuffer> old_buffer;
-  if (!maybe_mem_buffer.ToHandle(&old_buffer)) {
+  if (!maybe_mem_buffer.ToHandle(&old_buffer) ||
+      old_buffer->backing_store() == nullptr) {
     // If module object does not have linear memory associated with it,
     // Allocate new array buffer of given size.
-    // TODO(gdeepti): Fix bounds check to take into account size of memtype.
     new_size = pages * WasmModule::kPageSize;
-    // The code generated in the wasm compiler guarantees this precondition.
-    DCHECK(pages <= WasmModule::kMaxMemPages);
+    if (max_pages < pages) return -1;
   } else {
     old_mem_start = static_cast<Address>(old_buffer->backing_store());
     old_size = old_buffer->byte_length()->Number();
     // If the old memory was zero-sized, we should have been in the
     // "undefined" case above.
     DCHECK_NOT_NULL(old_mem_start);
-    DCHECK_NE(0, old_size);
     DCHECK(old_size + pages * WasmModule::kPageSize <=
            std::numeric_limits<uint32_t>::max());
     new_size = old_size + pages * WasmModule::kPageSize;
   }
 
-  if (new_size <= old_size ||
-      WasmModule::kMaxMemPages * WasmModule::kPageSize <= new_size) {
+  if (new_size <= old_size || max_pages * WasmModule::kPageSize < new_size ||
+      WasmModule::kV8MaxPages * WasmModule::kPageSize < new_size) {
     return -1;
   }
   Handle<JSArrayBuffer> buffer = NewArrayBuffer(isolate, new_size);
@@ -1858,35 +2086,36 @@
     memcpy(new_mem_start, old_mem_start, old_size);
   }
   SetInstanceMemory(instance, *buffer);
-  if (!UpdateWasmModuleMemory(instance, old_mem_start, new_mem_start, old_size,
-                              new_size)) {
-    return -1;
+  Handle<FixedArray> code_table = instance->get_compiled_module()->code_table();
+  RelocateMemoryReferencesInCode(code_table, old_mem_start, new_mem_start,
+                                 old_size, new_size);
+  if (instance->has_memory_object()) {
+    instance->get_memory_object()->set_buffer(*buffer);
   }
+
   DCHECK(old_size % WasmModule::kPageSize == 0);
   return (old_size / WasmModule::kPageSize);
 }
 
-namespace testing {
-
-void ValidateInstancesChain(Isolate* isolate, Handle<JSObject> module_obj,
-                            int instance_count) {
+void testing::ValidateInstancesChain(Isolate* isolate,
+                                     Handle<JSObject> wasm_module,
+                                     int instance_count) {
   CHECK_GE(instance_count, 0);
   DisallowHeapAllocation no_gc;
   WasmCompiledModule* compiled_module =
-      WasmCompiledModule::cast(module_obj->GetInternalField(0));
-  CHECK_EQ(
-      JSObject::cast(compiled_module->ptr_to_weak_module_object()->value()),
-      *module_obj);
+      WasmCompiledModule::cast(wasm_module->GetInternalField(0));
+  CHECK_EQ(JSObject::cast(compiled_module->ptr_to_weak_wasm_module()->value()),
+           *wasm_module);
   Object* prev = nullptr;
   int found_instances = compiled_module->has_weak_owning_instance() ? 1 : 0;
   WasmCompiledModule* current_instance = compiled_module;
   while (current_instance->has_weak_next_instance()) {
     CHECK((prev == nullptr && !current_instance->has_weak_prev_instance()) ||
           current_instance->ptr_to_weak_prev_instance()->value() == prev);
-    CHECK_EQ(current_instance->ptr_to_weak_module_object()->value(),
-             *module_obj);
-    CHECK(
-        IsWasmObject(current_instance->ptr_to_weak_owning_instance()->value()));
+    CHECK_EQ(current_instance->ptr_to_weak_wasm_module()->value(),
+             *wasm_module);
+    CHECK(IsWasmInstance(
+        current_instance->ptr_to_weak_owning_instance()->value()));
     prev = current_instance;
     current_instance = WasmCompiledModule::cast(
         current_instance->ptr_to_weak_next_instance()->value());
@@ -1896,27 +2125,64 @@
   CHECK_EQ(found_instances, instance_count);
 }
 
-void ValidateModuleState(Isolate* isolate, Handle<JSObject> module_obj) {
+void testing::ValidateModuleState(Isolate* isolate,
+                                  Handle<JSObject> wasm_module) {
   DisallowHeapAllocation no_gc;
   WasmCompiledModule* compiled_module =
-      WasmCompiledModule::cast(module_obj->GetInternalField(0));
-  CHECK(compiled_module->has_weak_module_object());
-  CHECK_EQ(compiled_module->ptr_to_weak_module_object()->value(), *module_obj);
+      WasmCompiledModule::cast(wasm_module->GetInternalField(0));
+  CHECK(compiled_module->has_weak_wasm_module());
+  CHECK_EQ(compiled_module->ptr_to_weak_wasm_module()->value(), *wasm_module);
   CHECK(!compiled_module->has_weak_prev_instance());
   CHECK(!compiled_module->has_weak_next_instance());
   CHECK(!compiled_module->has_weak_owning_instance());
 }
 
-void ValidateOrphanedInstance(Isolate* isolate, Handle<JSObject> instance) {
+void testing::ValidateOrphanedInstance(Isolate* isolate,
+                                       Handle<JSObject> object) {
   DisallowHeapAllocation no_gc;
-  CHECK(IsWasmObject(*instance));
-  WasmCompiledModule* compiled_module =
-      WasmCompiledModule::cast(instance->GetInternalField(kWasmCompiledModule));
-  CHECK(compiled_module->has_weak_module_object());
-  CHECK(compiled_module->ptr_to_weak_module_object()->cleared());
+  WasmInstanceObject* instance = WasmInstanceObject::cast(*object);
+  WasmCompiledModule* compiled_module = instance->get_compiled_module();
+  CHECK(compiled_module->has_weak_wasm_module());
+  CHECK(compiled_module->ptr_to_weak_wasm_module()->cleared());
 }
 
-}  // namespace testing
-}  // namespace wasm
-}  // namespace internal
-}  // namespace v8
+void WasmCompiledModule::RecreateModuleWrapper(Isolate* isolate,
+                                               Handle<FixedArray> array) {
+  Handle<WasmCompiledModule> compiled_module(
+      reinterpret_cast<WasmCompiledModule*>(*array), isolate);
+
+  WasmModule* module = nullptr;
+  {
+    Handle<SeqOneByteString> module_bytes = compiled_module->module_bytes();
+    // We parse the module again directly from the module bytes, so
+    // the underlying storage must not be moved meanwhile.
+    DisallowHeapAllocation no_allocation;
+    const byte* start =
+        reinterpret_cast<const byte*>(module_bytes->GetCharsAddress());
+    const byte* end = start + module_bytes->length();
+    // TODO(titzer): remember the module origin in the compiled_module
+    // For now, we assume serialized modules did not originate from asm.js.
+    ModuleResult result =
+        DecodeWasmModule(isolate, start, end, false, kWasmOrigin);
+    CHECK(result.ok());
+    CHECK_NOT_NULL(result.val);
+    module = const_cast<WasmModule*>(result.val);
+  }
+
+  Handle<WasmModuleWrapper> module_wrapper =
+      WasmModuleWrapper::New(isolate, module);
+
+  compiled_module->set_module_wrapper(module_wrapper);
+  DCHECK(WasmCompiledModule::IsWasmCompiledModule(*compiled_module));
+}
+
+MaybeHandle<String> WasmCompiledModule::GetFunctionName(
+    Handle<WasmCompiledModule> compiled_module, uint32_t func_index) {
+  DCHECK_LT(func_index, compiled_module->module()->functions.size());
+  WasmFunction& function = compiled_module->module()->functions[func_index];
+  Isolate* isolate = compiled_module->GetIsolate();
+  MaybeHandle<String> string = ExtractStringFromModuleBytes(
+      isolate, compiled_module, function.name_offset, function.name_length);
+  if (!string.is_null()) return string.ToHandleChecked();
+  return {};
+}
diff --git a/src/wasm/wasm-module.h b/src/wasm/wasm-module.h
index ac75042..2ad46e2 100644
--- a/src/wasm/wasm-module.h
+++ b/src/wasm/wasm-module.h
@@ -8,29 +8,37 @@
 #include <memory>
 
 #include "src/api.h"
+#include "src/globals.h"
 #include "src/handles.h"
 #include "src/parsing/preparse-data.h"
 
+#include "src/wasm/managed.h"
+#include "src/wasm/signature-map.h"
 #include "src/wasm/wasm-opcodes.h"
-#include "src/wasm/wasm-result.h"
 
 namespace v8 {
 namespace internal {
 
+class WasmCompiledModule;
+class WasmDebugInfo;
+class WasmModuleObject;
+
 namespace compiler {
 class CallDescriptor;
 class WasmCompilationUnit;
 }
 
 namespace wasm {
+class ErrorThrower;
+
 const size_t kMaxModuleSize = 1024 * 1024 * 1024;
 const size_t kMaxFunctionSize = 128 * 1024;
 const size_t kMaxStringSize = 256;
 const uint32_t kWasmMagic = 0x6d736100;
-const uint32_t kWasmVersion = 0x0c;
+const uint32_t kWasmVersion = 0x0d;
 
-const uint8_t kWasmFunctionTypeForm = 0x40;
-const uint8_t kWasmAnyFunctionTypeForm = 0x20;
+const uint8_t kWasmFunctionTypeForm = 0x60;
+const uint8_t kWasmAnyFunctionTypeForm = 0x70;
 
 enum WasmSectionCode {
   kUnknownSectionCode = 0,   // code for unknown sections
@@ -54,8 +62,6 @@
 
 const char* SectionName(WasmSectionCode code);
 
-class WasmDebugInfo;
-
 // Constants for fixed-size elements within a module.
 static const uint32_t kMaxReturnCount = 1;
 static const uint8_t kResizableMaximumFlag = 1;
@@ -86,12 +92,16 @@
     double f64_const;
     uint32_t global_index;
   } val;
-};
 
-#define NO_INIT                 \
-  {                             \
-    WasmInitExpr::kNone, { 0u } \
+  WasmInitExpr() : kind(kNone) {}
+  explicit WasmInitExpr(int32_t v) : kind(kI32Const) { val.i32_const = v; }
+  explicit WasmInitExpr(int64_t v) : kind(kI64Const) { val.i64_const = v; }
+  explicit WasmInitExpr(float v) : kind(kF32Const) { val.f32_const = v; }
+  explicit WasmInitExpr(double v) : kind(kF64Const) { val.f64_const = v; }
+  WasmInitExpr(WasmInitKind kind, uint32_t global_index) : kind(kGlobalIndex) {
+    val.global_index = global_index;
   }
+};
 
 // Static representation of a WASM function.
 struct WasmFunction {
@@ -125,11 +135,14 @@
 
 // Static representation of a wasm indirect call table.
 struct WasmIndirectFunctionTable {
-  uint32_t size;                // initial table size.
+  uint32_t min_size;            // minimum table size.
   uint32_t max_size;            // maximum table size.
+  bool has_max;                 // true if there is a maximum size.
+  // TODO(titzer): Move this to WasmInstance. Needed by interpreter only.
   std::vector<int32_t> values;  // function table, -1 indicating invalid.
   bool imported;                // true if imported.
   bool exported;                // true if exported.
+  SignatureMap map;             // canonicalizing map for sig indexes.
 };
 
 // Static representation of how to initialize a table.
@@ -159,31 +172,32 @@
 
 enum ModuleOrigin { kWasmOrigin, kAsmJsOrigin };
 
-class WasmCompiledModule;
-
 // Static representation of a module.
-struct WasmModule {
+struct V8_EXPORT_PRIVATE WasmModule {
   static const uint32_t kPageSize = 0x10000;    // Page size, 64kb.
-  static const uint32_t kMaxLegalPages = 65536;  // Maximum legal pages
   static const uint32_t kMinMemPages = 1;       // Minimum memory size = 64kb
-  static const uint32_t kMaxMemPages = 16384;   // Maximum memory size =  1gb
+  static const size_t kV8MaxPages = 16384;      // Maximum memory size = 1gb
+  static const size_t kSpecMaxPages = 65536;    // Maximum according to the spec
+  static const size_t kV8MaxTableSize = 16 * 1024 * 1024;
 
-  const byte* module_start;   // starting address for the module bytes.
-  const byte* module_end;     // end address for the module bytes.
-  uint32_t min_mem_pages;     // minimum size of the memory in 64k pages.
-  uint32_t max_mem_pages;     // maximum size of the memory in 64k pages.
-  bool mem_export;            // true if the memory is exported.
+  Zone* owned_zone;
+  const byte* module_start = nullptr;  // starting address for the module bytes
+  const byte* module_end = nullptr;    // end address for the module bytes
+  uint32_t min_mem_pages = 0;  // minimum size of the memory in 64k pages
+  uint32_t max_mem_pages = 0;  // maximum size of the memory in 64k pages
+  bool has_memory = false;     // true if the memory was defined or imported
+  bool mem_export = false;     // true if the memory is exported
   // TODO(wasm): reconcile start function index being an int with
   // the fact that we index on uint32_t, so we may technically not be
   // able to represent some start_function_index -es.
-  int start_function_index;   // start function, if any.
-  ModuleOrigin origin;        // origin of the module
+  int start_function_index = -1;      // start function, if any
+  ModuleOrigin origin = kWasmOrigin;  // origin of the module
 
   std::vector<WasmGlobal> globals;             // globals in this module.
-  uint32_t globals_size;                       // size of globals table.
-  uint32_t num_imported_functions;             // number of imported functions.
-  uint32_t num_declared_functions;             // number of declared functions.
-  uint32_t num_exported_functions;             // number of exported functions.
+  uint32_t globals_size = 0;                   // size of globals table.
+  uint32_t num_imported_functions = 0;         // number of imported functions.
+  uint32_t num_declared_functions = 0;         // number of declared functions.
+  uint32_t num_exported_functions = 0;         // number of exported functions.
   std::vector<FunctionSig*> signatures;        // signatures in this module.
   std::vector<WasmFunction> functions;         // functions in this module.
   std::vector<WasmDataSegment> data_segments;  // data segments in this module.
@@ -200,8 +214,11 @@
   // switch to libc-2.21 or higher.
   std::unique_ptr<base::Semaphore> pending_tasks;
 
-  WasmModule() : WasmModule(nullptr) {}
-  explicit WasmModule(byte* module_start);
+  WasmModule() : WasmModule(nullptr, nullptr) {}
+  WasmModule(Zone* owned_zone, const byte* module_start);
+  ~WasmModule() {
+    if (owned_zone) delete owned_zone;
+  }
 
   // Get a string stored in the module bytes representing a name.
   WasmName GetName(uint32_t offset, uint32_t length) const {
@@ -238,19 +255,21 @@
   }
 
   // Creates a new instantiation of the module in the given isolate.
-  V8_EXPORT_PRIVATE static MaybeHandle<JSObject> Instantiate(
-      Isolate* isolate, ErrorThrower* thrower, Handle<JSObject> module_object,
-      Handle<JSReceiver> ffi, Handle<JSArrayBuffer> memory);
+  static MaybeHandle<JSObject> Instantiate(Isolate* isolate,
+                                           ErrorThrower* thrower,
+                                           Handle<JSObject> wasm_module,
+                                           Handle<JSReceiver> ffi,
+                                           Handle<JSArrayBuffer> memory);
 
-  MaybeHandle<WasmCompiledModule> CompileFunctions(Isolate* isolate,
-                                                   ErrorThrower* thrower) const;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(WasmModule);
+  MaybeHandle<WasmCompiledModule> CompileFunctions(
+      Isolate* isolate, Handle<Managed<WasmModule>> module_wrapper,
+      ErrorThrower* thrower) const;
 };
 
+typedef Managed<WasmModule> WasmModuleWrapper;
+
 // An instantiated WASM module, including memory, function table, etc.
-struct WasmModuleInstance {
+struct WasmInstance {
   const WasmModule* module;  // static representation of the module.
   // -- Heap allocated --------------------------------------------------------
   Handle<JSObject> js_object;            // JavaScript module object.
@@ -260,25 +279,22 @@
   std::vector<Handle<FixedArray>> function_tables;  // indirect function tables.
   std::vector<Handle<Code>> function_code;  // code objects for each function.
   // -- raw memory ------------------------------------------------------------
-  byte* mem_start;  // start of linear memory.
-  uint32_t mem_size;  // size of the linear memory.
+  byte* mem_start = nullptr;  // start of linear memory.
+  uint32_t mem_size = 0;      // size of the linear memory.
   // -- raw globals -----------------------------------------------------------
-  byte* globals_start;  // start of the globals area.
+  byte* globals_start = nullptr;  // start of the globals area.
 
-  explicit WasmModuleInstance(const WasmModule* m)
+  explicit WasmInstance(const WasmModule* m)
       : module(m),
         function_tables(m->function_tables.size()),
-        function_code(m->functions.size()),
-        mem_start(nullptr),
-        mem_size(0),
-        globals_start(nullptr) {}
+        function_code(m->functions.size()) {}
 };
 
 // Interface provided to the decoder/graph builder which contains only
 // minimal information about the globals, functions, and function tables.
-struct ModuleEnv {
+struct V8_EXPORT_PRIVATE ModuleEnv {
   const WasmModule* module;
-  WasmModuleInstance* instance;
+  WasmInstance* instance;
   ModuleOrigin origin;
 
   bool IsValidGlobal(uint32_t index) const {
@@ -321,6 +337,8 @@
                                                          FunctionSig* sig);
   static compiler::CallDescriptor* GetI32WasmCallDescriptor(
       Zone* zone, compiler::CallDescriptor* descriptor);
+  static compiler::CallDescriptor* GetI32WasmCallDescriptorForSimd(
+      Zone* zone, compiler::CallDescriptor* descriptor);
 };
 
 // A helper for printing out the names of functions.
@@ -335,215 +353,93 @@
 std::ostream& operator<<(std::ostream& os, const WasmFunction& function);
 std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
 
-typedef Result<const WasmModule*> ModuleResult;
-typedef Result<WasmFunction*> FunctionResult;
-typedef std::vector<std::pair<int, int>> FunctionOffsets;
-typedef Result<FunctionOffsets> FunctionOffsetsResult;
-
-class WasmCompiledModule : public FixedArray {
- public:
-  static WasmCompiledModule* cast(Object* fixed_array) {
-    return reinterpret_cast<WasmCompiledModule*>(fixed_array);
-  }
-
-#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID)                           \
-  Handle<TYPE> NAME() const { return handle(ptr_to_##NAME()); }      \
-                                                                     \
-  MaybeHandle<TYPE> maybe_##NAME() const {                           \
-    if (has_##NAME()) return NAME();                                 \
-    return MaybeHandle<TYPE>();                                      \
-  }                                                                  \
-                                                                     \
-  TYPE* ptr_to_##NAME() const {                                      \
-    Object* obj = get(ID);                                           \
-    if (!obj->Is##TYPE()) return nullptr;                            \
-    return TYPE::cast(obj);                                          \
-  }                                                                  \
-                                                                     \
-  void set_##NAME(Handle<TYPE> value) { set_ptr_to_##NAME(*value); } \
-                                                                     \
-  void set_ptr_to_##NAME(TYPE* value) { set(ID, value); }            \
-                                                                     \
-  bool has_##NAME() const { return get(ID)->Is##TYPE(); }            \
-                                                                     \
-  void reset_##NAME() { set_undefined(ID); }
-
-#define WCM_OBJECT(TYPE, NAME) WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME)
-
-#define WCM_SMALL_NUMBER(TYPE, NAME)                               \
-  TYPE NAME() const {                                              \
-    return static_cast<TYPE>(Smi::cast(get(kID_##NAME))->value()); \
-  }
-
-#define WCM_WEAK_LINK(TYPE, NAME)                        \
-  WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME); \
-                                                         \
-  Handle<TYPE> NAME() const {                            \
-    return handle(TYPE::cast(weak_##NAME()->value()));   \
-  }
-
-#define CORE_WCM_PROPERTY_TABLE(MACRO)                \
-  MACRO(OBJECT, FixedArray, code_table)               \
-  MACRO(OBJECT, FixedArray, import_data)              \
-  MACRO(OBJECT, FixedArray, exports)                  \
-  MACRO(OBJECT, FixedArray, startup_function)         \
-  MACRO(OBJECT, FixedArray, indirect_function_tables) \
-  MACRO(OBJECT, String, module_bytes)                 \
-  MACRO(OBJECT, ByteArray, function_names)            \
-  MACRO(SMALL_NUMBER, uint32_t, min_memory_pages)     \
-  MACRO(OBJECT, FixedArray, data_segments_info)       \
-  MACRO(OBJECT, ByteArray, data_segments)             \
-  MACRO(SMALL_NUMBER, uint32_t, globals_size)         \
-  MACRO(OBJECT, JSArrayBuffer, heap)                  \
-  MACRO(SMALL_NUMBER, bool, export_memory)            \
-  MACRO(SMALL_NUMBER, ModuleOrigin, origin)           \
-  MACRO(WEAK_LINK, WasmCompiledModule, next_instance) \
-  MACRO(WEAK_LINK, WasmCompiledModule, prev_instance) \
-  MACRO(WEAK_LINK, JSObject, owning_instance)         \
-  MACRO(WEAK_LINK, JSObject, module_object)
-
-#if DEBUG
-#define DEBUG_ONLY_TABLE(MACRO) MACRO(SMALL_NUMBER, uint32_t, instance_id)
-#else
-#define DEBUG_ONLY_TABLE(IGNORE)
-  uint32_t instance_id() const { return -1; }
-#endif
-
-#define WCM_PROPERTY_TABLE(MACRO) \
-  CORE_WCM_PROPERTY_TABLE(MACRO)  \
-  DEBUG_ONLY_TABLE(MACRO)
-
- private:
-  enum PropertyIndices {
-#define INDICES(IGNORE1, IGNORE2, NAME) kID_##NAME,
-    WCM_PROPERTY_TABLE(INDICES) Count
-#undef INDICES
-  };
-
- public:
-  static Handle<WasmCompiledModule> New(Isolate* isolate,
-                                        uint32_t min_memory_pages,
-                                        uint32_t globals_size,
-                                        bool export_memory,
-                                        ModuleOrigin origin);
-
-  static Handle<WasmCompiledModule> Clone(Isolate* isolate,
-                                          Handle<WasmCompiledModule> module) {
-    Handle<WasmCompiledModule> ret = Handle<WasmCompiledModule>::cast(
-        isolate->factory()->CopyFixedArray(module));
-    ret->Init();
-    ret->reset_weak_owning_instance();
-    ret->reset_weak_next_instance();
-    ret->reset_weak_prev_instance();
-    return ret;
-  }
-
-  uint32_t mem_size() const {
-    DCHECK(has_heap());
-    return heap()->byte_length()->Number();
-  }
-
-  uint32_t default_mem_size() const {
-    return min_memory_pages() * WasmModule::kPageSize;
-  }
-
-#define DECLARATION(KIND, TYPE, NAME) WCM_##KIND(TYPE, NAME)
-  WCM_PROPERTY_TABLE(DECLARATION)
-#undef DECLARATION
-
-  void PrintInstancesChain();
-
- private:
-#if DEBUG
-  static uint32_t instance_id_counter_;
-#endif
-  void Init();
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(WasmCompiledModule);
-};
-
-// Extract a function name from the given wasm object.
-// Returns "<WASM UNNAMED>" if the function is unnamed or the name is not a
-// valid UTF-8 string.
-Handle<String> GetWasmFunctionName(Isolate* isolate, Handle<Object> wasm,
+// Extract a function name from the given wasm instance.
+// Returns "<WASM UNNAMED>" if no instance is passed, the function is unnamed or
+// the name is not a valid UTF-8 string.
+// TODO(5620): Refactor once we always get a wasm instance.
+Handle<String> GetWasmFunctionName(Isolate* isolate, Handle<Object> instance,
                                    uint32_t func_index);
 
-// Extract a function name from the given wasm object.
-// Returns a null handle if the function is unnamed or the name is not a valid
-// UTF-8 string.
-Handle<Object> GetWasmFunctionNameOrNull(Isolate* isolate, Handle<Object> wasm,
-                                         uint32_t func_index);
-
 // Return the binary source bytes of a wasm module.
-SeqOneByteString* GetWasmBytes(JSObject* wasm);
+Handle<SeqOneByteString> GetWasmBytes(Handle<JSObject> wasm);
 
 // Get the debug info associated with the given wasm object.
 // If no debug info exists yet, it is created automatically.
 Handle<WasmDebugInfo> GetDebugInfo(Handle<JSObject> wasm);
 
 // Return the number of functions in the given wasm object.
-int GetNumberOfFunctions(JSObject* wasm);
+int GetNumberOfFunctions(Handle<JSObject> wasm);
 
 // Create and export JSFunction
 Handle<JSFunction> WrapExportCodeAsJSFunction(Isolate* isolate,
                                               Handle<Code> export_code,
-                                              Handle<String> name, int arity,
-                                              MaybeHandle<ByteArray> signature,
-                                              Handle<JSObject> module_instance);
+                                              Handle<String> name,
+                                              FunctionSig* sig, int func_index,
+                                              Handle<JSObject> instance);
 
-// Check whether the given object is a wasm object.
+// Check whether the given object represents a WebAssembly.Instance instance.
 // This checks the number and type of internal fields, so it's not 100 percent
 // secure. If it turns out that we need more complete checks, we could add a
 // special marker as internal field, which will definitely never occur anywhere
 // else.
-bool IsWasmObject(Object* object);
+bool IsWasmInstance(Object* instance);
 
-// Update memory references of code objects associated with the module
-bool UpdateWasmModuleMemory(Handle<JSObject> object, Address old_start,
-                            Address new_start, uint32_t old_size,
-                            uint32_t new_size);
+// Return the compiled module object for this WASM instance.
+WasmCompiledModule* GetCompiledModule(Object* wasm_instance);
 
-// Constructs a single function table as a FixedArray of double size,
-// populating it with function signature indices and function indices.
-Handle<FixedArray> BuildFunctionTable(Isolate* isolate, uint32_t index,
-                                      const WasmModule* module);
+// Check whether the wasm module was generated from asm.js code.
+bool WasmIsAsmJs(Object* instance, Isolate* isolate);
 
-// Populates a function table by replacing function indices with handles to
-// the compiled code.
-void PopulateFunctionTable(Handle<FixedArray> table, uint32_t table_size,
-                           const std::vector<Handle<Code>>* code_table);
+// Get the script of the wasm module. If the origin of the module is asm.js, the
+// returned Script will be a JavaScript Script of Script::TYPE_NORMAL, otherwise
+// it's of type TYPE_WASM.
+Handle<Script> GetScript(Handle<JSObject> instance);
 
-Handle<JSObject> CreateCompiledModuleObject(Isolate* isolate,
-                                            Handle<FixedArray> compiled_module,
-                                            ModuleOrigin origin);
+// Get the asm.js source position for the given byte offset in the given
+// function.
+int GetAsmWasmSourcePosition(Handle<JSObject> instance, int func_index,
+                             int byte_offset);
 
-V8_EXPORT_PRIVATE MaybeHandle<JSObject> CreateModuleObjectFromBytes(
+V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> CreateModuleObjectFromBytes(
     Isolate* isolate, const byte* start, const byte* end, ErrorThrower* thrower,
-    ModuleOrigin origin);
+    ModuleOrigin origin, Handle<Script> asm_js_script,
+    const byte* asm_offset_tables_start, const byte* asm_offset_tables_end);
 
 V8_EXPORT_PRIVATE bool ValidateModuleBytes(Isolate* isolate, const byte* start,
                                            const byte* end,
                                            ErrorThrower* thrower,
                                            ModuleOrigin origin);
 
-// Get the number of imported functions for a WASM instance.
-uint32_t GetNumImportedFunctions(Handle<JSObject> wasm_object);
+// Get the offset of the code of a function within a module.
+int GetFunctionCodeOffset(Handle<WasmCompiledModule> compiled_module,
+                          int func_index);
+
+// Translate from byte offset in the module to function number and byte offset
+// within that function, encoded as line and column in the position info.
+bool GetPositionInfo(Handle<WasmCompiledModule> compiled_module,
+                     uint32_t position, Script::PositionInfo* info);
 
 // Assumed to be called with a code object associated to a wasm module instance.
 // Intended to be called from runtime functions.
 // Returns nullptr on failing to get owning instance.
 Object* GetOwningWasmInstance(Code* code);
 
+MaybeHandle<JSArrayBuffer> GetInstanceMemory(Isolate* isolate,
+                                             Handle<JSObject> instance);
+
 int32_t GetInstanceMemorySize(Isolate* isolate, Handle<JSObject> instance);
 
 int32_t GrowInstanceMemory(Isolate* isolate, Handle<JSObject> instance,
                            uint32_t pages);
 
+void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
+                          int index, Handle<JSFunction> js_function);
+
 namespace testing {
 
-void ValidateInstancesChain(Isolate* isolate, Handle<JSObject> module_obj,
+void ValidateInstancesChain(Isolate* isolate, Handle<JSObject> wasm_module,
                             int instance_count);
-void ValidateModuleState(Isolate* isolate, Handle<JSObject> module_obj);
+void ValidateModuleState(Isolate* isolate, Handle<JSObject> wasm_module);
 void ValidateOrphanedInstance(Isolate* isolate, Handle<JSObject> instance);
 
 }  // namespace testing
diff --git a/src/wasm/wasm-objects.cc b/src/wasm/wasm-objects.cc
new file mode 100644
index 0000000..68f66d2
--- /dev/null
+++ b/src/wasm/wasm-objects.cc
@@ -0,0 +1,359 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-module.h"
+
+#define TRACE(...)                                      \
+  do {                                                  \
+    if (FLAG_trace_wasm_instances) PrintF(__VA_ARGS__); \
+  } while (false)
+
+#define TRACE_CHAIN(instance)        \
+  do {                               \
+    instance->PrintInstancesChain(); \
+  } while (false)
+
+using namespace v8::internal;
+using namespace v8::internal::wasm;
+
+#define DEFINE_ACCESSORS(Container, name, field, type) \
+  type* Container::get_##name() {                      \
+    return type::cast(GetInternalField(field));        \
+  }                                                    \
+  void Container::set_##name(type* value) {            \
+    return SetInternalField(field, value);             \
+  }
+
+#define DEFINE_OPTIONAL_ACCESSORS(Container, name, field, type) \
+  bool Container::has_##name() {                                \
+    return !GetInternalField(field)->IsUndefined(GetIsolate()); \
+  }                                                             \
+  type* Container::get_##name() {                               \
+    return type::cast(GetInternalField(field));                 \
+  }                                                             \
+  void Container::set_##name(type* value) {                     \
+    return SetInternalField(field, value);                      \
+  }
+
+#define DEFINE_GETTER(Container, name, field, type) \
+  type* Container::get_##name() { return type::cast(GetInternalField(field)); }
+
+static uint32_t SafeUint32(Object* value) {
+  if (value->IsSmi()) {
+    int32_t val = Smi::cast(value)->value();
+    CHECK_GE(val, 0);
+    return static_cast<uint32_t>(val);
+  }
+  DCHECK(value->IsHeapNumber());
+  HeapNumber* num = HeapNumber::cast(value);
+  CHECK_GE(num->value(), 0.0);
+  CHECK_LE(num->value(), static_cast<double>(kMaxUInt32));
+  return static_cast<uint32_t>(num->value());
+}
+
+static int32_t SafeInt32(Object* value) {
+  if (value->IsSmi()) {
+    return Smi::cast(value)->value();
+  }
+  DCHECK(value->IsHeapNumber());
+  HeapNumber* num = HeapNumber::cast(value);
+  CHECK_GE(num->value(), static_cast<double>(Smi::kMinValue));
+  CHECK_LE(num->value(), static_cast<double>(Smi::kMaxValue));
+  return static_cast<int32_t>(num->value());
+}
+
+Handle<WasmModuleObject> WasmModuleObject::New(
+    Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
+  ModuleOrigin origin = compiled_module->module()->origin;
+
+  Handle<JSObject> module_object;
+  if (origin == ModuleOrigin::kWasmOrigin) {
+    Handle<JSFunction> module_cons(
+        isolate->native_context()->wasm_module_constructor());
+    module_object = isolate->factory()->NewJSObject(module_cons);
+    Handle<Symbol> module_sym(isolate->native_context()->wasm_module_sym());
+    Object::SetProperty(module_object, module_sym, module_object, STRICT)
+        .Check();
+  } else {
+    DCHECK(origin == ModuleOrigin::kAsmJsOrigin);
+    Handle<Map> map = isolate->factory()->NewMap(
+        JS_OBJECT_TYPE,
+        JSObject::kHeaderSize + WasmModuleObject::kFieldCount * kPointerSize);
+    module_object = isolate->factory()->NewJSObjectFromMap(map, TENURED);
+  }
+  module_object->SetInternalField(WasmModuleObject::kCompiledModule,
+                                  *compiled_module);
+  Handle<WeakCell> link_to_module =
+      isolate->factory()->NewWeakCell(module_object);
+  compiled_module->set_weak_wasm_module(link_to_module);
+  return Handle<WasmModuleObject>::cast(module_object);
+}
+
+WasmModuleObject* WasmModuleObject::cast(Object* object) {
+  DCHECK(object->IsJSObject());
+  // TODO(titzer): brand check for WasmModuleObject.
+  return reinterpret_cast<WasmModuleObject*>(object);
+}
+
+Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
+                                             uint32_t maximum,
+                                             Handle<FixedArray>* js_functions) {
+  Handle<JSFunction> table_ctor(
+      isolate->native_context()->wasm_table_constructor());
+  Handle<JSObject> table_obj = isolate->factory()->NewJSObject(table_ctor);
+  *js_functions = isolate->factory()->NewFixedArray(initial);
+  Object* null = isolate->heap()->null_value();
+  for (int i = 0; i < static_cast<int>(initial); ++i) {
+    (*js_functions)->set(i, null);
+  }
+  table_obj->SetInternalField(kFunctions, *(*js_functions));
+  table_obj->SetInternalField(kMaximum,
+                              static_cast<Object*>(Smi::FromInt(maximum)));
+
+  Handle<FixedArray> dispatch_tables = isolate->factory()->NewFixedArray(0);
+  table_obj->SetInternalField(kDispatchTables, *dispatch_tables);
+  Handle<Symbol> table_sym(isolate->native_context()->wasm_table_sym());
+  Object::SetProperty(table_obj, table_sym, table_obj, STRICT).Check();
+  return Handle<WasmTableObject>::cast(table_obj);
+}
+
+DEFINE_GETTER(WasmTableObject, dispatch_tables, kDispatchTables, FixedArray)
+
+Handle<FixedArray> WasmTableObject::AddDispatchTable(
+    Isolate* isolate, Handle<WasmTableObject> table_obj,
+    Handle<WasmInstanceObject> instance, int table_index,
+    Handle<FixedArray> dispatch_table) {
+  Handle<FixedArray> dispatch_tables(
+      FixedArray::cast(table_obj->GetInternalField(kDispatchTables)), isolate);
+  DCHECK_EQ(0, dispatch_tables->length() % 3);
+
+  if (instance.is_null()) return dispatch_tables;
+  // TODO(titzer): use weak cells here to avoid leaking instances.
+
+  // Grow the dispatch table and add a new triple at the end.
+  Handle<FixedArray> new_dispatch_tables =
+      isolate->factory()->CopyFixedArrayAndGrow(dispatch_tables, 3);
+
+  new_dispatch_tables->set(dispatch_tables->length() + 0, *instance);
+  new_dispatch_tables->set(dispatch_tables->length() + 1,
+                           Smi::FromInt(table_index));
+  new_dispatch_tables->set(dispatch_tables->length() + 2, *dispatch_table);
+
+  table_obj->SetInternalField(WasmTableObject::kDispatchTables,
+                              *new_dispatch_tables);
+
+  return new_dispatch_tables;
+}
+
+DEFINE_ACCESSORS(WasmTableObject, functions, kFunctions, FixedArray)
+
+uint32_t WasmTableObject::current_length() { return get_functions()->length(); }
+
+uint32_t WasmTableObject::maximum_length() {
+  return SafeUint32(GetInternalField(kMaximum));
+}
+
+WasmTableObject* WasmTableObject::cast(Object* object) {
+  DCHECK(object && object->IsJSObject());
+  // TODO(titzer): brand check for WasmTableObject.
+  return reinterpret_cast<WasmTableObject*>(object);
+}
+
+Handle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
+                                               Handle<JSArrayBuffer> buffer,
+                                               int maximum) {
+  Handle<JSFunction> memory_ctor(
+      isolate->native_context()->wasm_memory_constructor());
+  Handle<JSObject> memory_obj = isolate->factory()->NewJSObject(memory_ctor);
+  memory_obj->SetInternalField(kArrayBuffer, *buffer);
+  memory_obj->SetInternalField(kMaximum,
+                               static_cast<Object*>(Smi::FromInt(maximum)));
+  Handle<Symbol> memory_sym(isolate->native_context()->wasm_memory_sym());
+  Object::SetProperty(memory_obj, memory_sym, memory_obj, STRICT).Check();
+  return Handle<WasmMemoryObject>::cast(memory_obj);
+}
+
+DEFINE_ACCESSORS(WasmMemoryObject, buffer, kArrayBuffer, JSArrayBuffer)
+
+uint32_t WasmMemoryObject::current_pages() {
+  return SafeUint32(get_buffer()->byte_length()) / wasm::WasmModule::kPageSize;
+}
+
+int32_t WasmMemoryObject::maximum_pages() {
+  return SafeInt32(GetInternalField(kMaximum));
+}
+
+WasmMemoryObject* WasmMemoryObject::cast(Object* object) {
+  DCHECK(object && object->IsJSObject());
+  // TODO(titzer): brand check for WasmMemoryObject.
+  return reinterpret_cast<WasmMemoryObject*>(object);
+}
+
+void WasmMemoryObject::AddInstance(WasmInstanceObject* instance) {
+  // TODO(gdeepti): This should be a weak list of instance objects
+  // for instances that share memory.
+  SetInternalField(kInstance, instance);
+}
+
+DEFINE_ACCESSORS(WasmInstanceObject, compiled_module, kCompiledModule,
+                 WasmCompiledModule)
+DEFINE_OPTIONAL_ACCESSORS(WasmInstanceObject, globals_buffer,
+                          kGlobalsArrayBuffer, JSArrayBuffer)
+DEFINE_OPTIONAL_ACCESSORS(WasmInstanceObject, memory_buffer, kMemoryArrayBuffer,
+                          JSArrayBuffer)
+DEFINE_OPTIONAL_ACCESSORS(WasmInstanceObject, memory_object, kMemoryObject,
+                          WasmMemoryObject)
+DEFINE_OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, kDebugInfo,
+                          WasmDebugInfo)
+
+WasmModuleObject* WasmInstanceObject::module_object() {
+  return WasmModuleObject::cast(*get_compiled_module()->wasm_module());
+}
+
+WasmModule* WasmInstanceObject::module() {
+  return reinterpret_cast<WasmModuleWrapper*>(
+             *get_compiled_module()->module_wrapper())
+      ->get();
+}
+
+WasmInstanceObject* WasmInstanceObject::cast(Object* object) {
+  DCHECK(IsWasmInstanceObject(object));
+  return reinterpret_cast<WasmInstanceObject*>(object);
+}
+
+bool WasmInstanceObject::IsWasmInstanceObject(Object* object) {
+  if (!object->IsObject()) return false;
+  if (!object->IsJSObject()) return false;
+
+  JSObject* obj = JSObject::cast(object);
+  Isolate* isolate = obj->GetIsolate();
+  if (obj->GetInternalFieldCount() != kFieldCount) {
+    return false;
+  }
+
+  Object* mem = obj->GetInternalField(kMemoryArrayBuffer);
+  if (!(mem->IsUndefined(isolate) || mem->IsJSArrayBuffer()) ||
+      !WasmCompiledModule::IsWasmCompiledModule(
+          obj->GetInternalField(kCompiledModule))) {
+    return false;
+  }
+
+  // All checks passed.
+  return true;
+}
+
+Handle<WasmInstanceObject> WasmInstanceObject::New(
+    Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
+  Handle<Map> map = isolate->factory()->NewMap(
+      JS_OBJECT_TYPE, JSObject::kHeaderSize + kFieldCount * kPointerSize);
+  Handle<WasmInstanceObject> instance(
+      reinterpret_cast<WasmInstanceObject*>(
+          *isolate->factory()->NewJSObjectFromMap(map, TENURED)),
+      isolate);
+
+  instance->SetInternalField(kCompiledModule, *compiled_module);
+  instance->SetInternalField(kMemoryObject, isolate->heap()->undefined_value());
+  return instance;
+}
+
+WasmInstanceObject* WasmExportedFunction::instance() {
+  return WasmInstanceObject::cast(GetInternalField(kInstance));
+}
+
+int WasmExportedFunction::function_index() {
+  return SafeInt32(GetInternalField(kIndex));
+}
+
+WasmExportedFunction* WasmExportedFunction::cast(Object* object) {
+  DCHECK(object && object->IsJSFunction());
+  DCHECK_EQ(Code::JS_TO_WASM_FUNCTION,
+            JSFunction::cast(object)->code()->kind());
+  // TODO(titzer): brand check for WasmExportedFunction.
+  return reinterpret_cast<WasmExportedFunction*>(object);
+}
+
+Handle<WasmExportedFunction> WasmExportedFunction::New(
+    Isolate* isolate, Handle<WasmInstanceObject> instance, Handle<String> name,
+    Handle<Code> export_wrapper, int arity, int func_index) {
+  DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
+  Handle<SharedFunctionInfo> shared =
+      isolate->factory()->NewSharedFunctionInfo(name, export_wrapper, false);
+  shared->set_length(arity);
+  shared->set_internal_formal_parameter_count(arity);
+  Handle<JSFunction> function = isolate->factory()->NewFunction(
+      isolate->wasm_function_map(), name, export_wrapper);
+  function->set_shared(*shared);
+
+  function->SetInternalField(kInstance, *instance);
+  function->SetInternalField(kIndex, Smi::FromInt(func_index));
+  return Handle<WasmExportedFunction>::cast(function);
+}
+
+Handle<WasmCompiledModule> WasmCompiledModule::New(
+    Isolate* isolate, Handle<WasmModuleWrapper> module_wrapper) {
+  Handle<FixedArray> ret =
+      isolate->factory()->NewFixedArray(PropertyIndices::Count, TENURED);
+  // WasmCompiledModule::cast would fail since module bytes are not set yet.
+  Handle<WasmCompiledModule> compiled_module(
+      reinterpret_cast<WasmCompiledModule*>(*ret), isolate);
+  compiled_module->InitId();
+  compiled_module->set_module_wrapper(module_wrapper);
+  return compiled_module;
+}
+
+wasm::WasmModule* WasmCompiledModule::module() const {
+  return reinterpret_cast<WasmModuleWrapper*>(*module_wrapper())->get();
+}
+
+void WasmCompiledModule::InitId() {
+#if DEBUG
+  static uint32_t instance_id_counter = 0;
+  set(kID_instance_id, Smi::FromInt(instance_id_counter++));
+  TRACE("New compiled module id: %d\n", instance_id());
+#endif
+}
+
+bool WasmCompiledModule::IsWasmCompiledModule(Object* obj) {
+  if (!obj->IsFixedArray()) return false;
+  FixedArray* arr = FixedArray::cast(obj);
+  if (arr->length() != PropertyIndices::Count) return false;
+  Isolate* isolate = arr->GetIsolate();
+#define WCM_CHECK_SMALL_NUMBER(TYPE, NAME) \
+  if (!arr->get(kID_##NAME)->IsSmi()) return false;
+#define WCM_CHECK_OBJECT_OR_WEAK(TYPE, NAME)         \
+  if (!arr->get(kID_##NAME)->IsUndefined(isolate) && \
+      !arr->get(kID_##NAME)->Is##TYPE())             \
+    return false;
+#define WCM_CHECK_OBJECT(TYPE, NAME) WCM_CHECK_OBJECT_OR_WEAK(TYPE, NAME)
+#define WCM_CHECK_WEAK_LINK(TYPE, NAME) WCM_CHECK_OBJECT_OR_WEAK(WeakCell, NAME)
+#define WCM_CHECK(KIND, TYPE, NAME) WCM_CHECK_##KIND(TYPE, NAME)
+  WCM_PROPERTY_TABLE(WCM_CHECK)
+#undef WCM_CHECK
+
+  // All checks passed.
+  return true;
+}
+
+void WasmCompiledModule::PrintInstancesChain() {
+#if DEBUG
+  if (!FLAG_trace_wasm_instances) return;
+  for (WasmCompiledModule* current = this; current != nullptr;) {
+    PrintF("->%d", current->instance_id());
+    if (current->ptr_to_weak_next_instance() == nullptr) break;
+    CHECK(!current->ptr_to_weak_next_instance()->cleared());
+    current =
+        WasmCompiledModule::cast(current->ptr_to_weak_next_instance()->value());
+  }
+  PrintF("\n");
+#endif
+}
+
+uint32_t WasmCompiledModule::mem_size() const {
+  return has_memory() ? memory()->byte_length()->Number() : default_mem_size();
+}
+
+uint32_t WasmCompiledModule::default_mem_size() const {
+  return min_mem_pages() * WasmModule::kPageSize;
+}
diff --git a/src/wasm/wasm-objects.h b/src/wasm/wasm-objects.h
new file mode 100644
index 0000000..f74661f
--- /dev/null
+++ b/src/wasm/wasm-objects.h
@@ -0,0 +1,308 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_OBJECTS_H_
+#define V8_WASM_OBJECTS_H_
+
+#include "src/objects-inl.h"
+#include "src/wasm/managed.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+struct WasmModule;
+}
+
+class WasmCompiledModule;
+class WasmDebugInfo;
+class WasmInstanceObject;
+
+#define DECLARE_CASTS(name)             \
+  static bool Is##name(Object* object); \
+  static name* cast(Object* object)
+
+#define DECLARE_ACCESSORS(name, type) \
+  type* get_##name();                 \
+  void set_##name(type* value)
+
+#define DECLARE_OPTIONAL_ACCESSORS(name, type) \
+  bool has_##name();                           \
+  type* get_##name();                          \
+  void set_##name(type* value)
+
+// Representation of a WebAssembly.Module JavaScript-level object.
+class WasmModuleObject : public JSObject {
+ public:
+  // TODO(titzer): add the brand as an internal field instead of a property.
+  enum Fields { kCompiledModule, kFieldCount };
+
+  DECLARE_CASTS(WasmModuleObject);
+
+  WasmCompiledModule* compiled_module();
+  wasm::WasmModule* module();
+  int num_functions();
+  bool is_asm_js();
+  int GetAsmWasmSourcePosition(int func_index, int byte_offset);
+  WasmDebugInfo* debug_info();
+  void set_debug_info(WasmDebugInfo* debug_info);
+  MaybeHandle<String> GetFunctionName(Isolate* isolate, int func_index);
+
+  static Handle<WasmModuleObject> New(
+      Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
+};
+
+// Representation of a WebAssembly.Table JavaScript-level object.
+class WasmTableObject : public JSObject {
+ public:
+  // TODO(titzer): add the brand as an internal field instead of a property.
+  enum Fields { kFunctions, kMaximum, kDispatchTables, kFieldCount };
+
+  DECLARE_CASTS(WasmTableObject);
+  DECLARE_ACCESSORS(functions, FixedArray);
+
+  FixedArray* get_dispatch_tables();
+  uint32_t current_length();
+  uint32_t maximum_length();
+
+  static Handle<WasmTableObject> New(Isolate* isolate, uint32_t initial,
+                                     uint32_t maximum,
+                                     Handle<FixedArray>* js_functions);
+  static bool Grow(Handle<WasmTableObject> table, uint32_t count);
+  static Handle<FixedArray> AddDispatchTable(
+      Isolate* isolate, Handle<WasmTableObject> table,
+      Handle<WasmInstanceObject> instance, int table_index,
+      Handle<FixedArray> dispatch_table);
+};
+
+// Representation of a WebAssembly.Memory JavaScript-level object.
+class WasmMemoryObject : public JSObject {
+ public:
+  // TODO(titzer): add the brand as an internal field instead of a property.
+  enum Fields : uint8_t { kArrayBuffer, kMaximum, kInstance, kFieldCount };
+
+  DECLARE_CASTS(WasmMemoryObject);
+  DECLARE_ACCESSORS(buffer, JSArrayBuffer);
+
+  void AddInstance(WasmInstanceObject* object);
+  uint32_t current_pages();
+  int32_t maximum_pages();  // returns < 0 if there is no maximum
+
+  static Handle<WasmMemoryObject> New(Isolate* isolate,
+                                      Handle<JSArrayBuffer> buffer,
+                                      int maximum);
+
+  static bool Grow(Handle<WasmMemoryObject> memory, uint32_t count);
+};
+
+// Representation of a WebAssembly.Instance JavaScript-level object.
+class WasmInstanceObject : public JSObject {
+ public:
+  // TODO(titzer): add the brand as an internal field instead of a property.
+  enum Fields {
+    kCompiledModule,
+    kMemoryObject,
+    kMemoryArrayBuffer,
+    kGlobalsArrayBuffer,
+    kDebugInfo,
+    kFieldCount
+  };
+
+  DECLARE_CASTS(WasmInstanceObject);
+
+  DECLARE_ACCESSORS(compiled_module, WasmCompiledModule);
+  DECLARE_OPTIONAL_ACCESSORS(globals_buffer, JSArrayBuffer);
+  DECLARE_OPTIONAL_ACCESSORS(memory_buffer, JSArrayBuffer);
+  DECLARE_OPTIONAL_ACCESSORS(memory_object, WasmMemoryObject);
+  DECLARE_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo);
+
+  WasmModuleObject* module_object();
+  wasm::WasmModule* module();
+
+  static Handle<WasmInstanceObject> New(
+      Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
+};
+
+// Representation of an exported WASM function.
+class WasmExportedFunction : public JSFunction {
+ public:
+  enum Fields { kInstance, kIndex, kFieldCount };
+
+  DECLARE_CASTS(WasmExportedFunction);
+
+  WasmInstanceObject* instance();
+  int function_index();
+
+  static Handle<WasmExportedFunction> New(Isolate* isolate,
+                                          Handle<WasmInstanceObject> instance,
+                                          Handle<String> name,
+                                          Handle<Code> export_wrapper,
+                                          int arity, int func_index);
+};
+
+class WasmCompiledModule : public FixedArray {
+ public:
+  enum Fields { kFieldCount };
+
+  static WasmCompiledModule* cast(Object* fixed_array) {
+    SLOW_DCHECK(IsWasmCompiledModule(fixed_array));
+    return reinterpret_cast<WasmCompiledModule*>(fixed_array);
+  }
+
+#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID)                           \
+  Handle<TYPE> NAME() const { return handle(ptr_to_##NAME()); }      \
+                                                                     \
+  MaybeHandle<TYPE> maybe_##NAME() const {                           \
+    if (has_##NAME()) return NAME();                                 \
+    return MaybeHandle<TYPE>();                                      \
+  }                                                                  \
+                                                                     \
+  TYPE* ptr_to_##NAME() const {                                      \
+    Object* obj = get(ID);                                           \
+    if (!obj->Is##TYPE()) return nullptr;                            \
+    return TYPE::cast(obj);                                          \
+  }                                                                  \
+                                                                     \
+  void set_##NAME(Handle<TYPE> value) { set_ptr_to_##NAME(*value); } \
+                                                                     \
+  void set_ptr_to_##NAME(TYPE* value) { set(ID, value); }            \
+                                                                     \
+  bool has_##NAME() const { return get(ID)->Is##TYPE(); }            \
+                                                                     \
+  void reset_##NAME() { set_undefined(ID); }
+
+#define WCM_OBJECT(TYPE, NAME) WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME)
+
+#define WCM_SMALL_NUMBER(TYPE, NAME)                               \
+  TYPE NAME() const {                                              \
+    return static_cast<TYPE>(Smi::cast(get(kID_##NAME))->value()); \
+  }                                                                \
+  void set_##NAME(TYPE value) { set(kID_##NAME, Smi::FromInt(value)); }
+
+#define WCM_WEAK_LINK(TYPE, NAME)                        \
+  WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME); \
+                                                         \
+  Handle<TYPE> NAME() const {                            \
+    return handle(TYPE::cast(weak_##NAME()->value()));   \
+  }
+
+#define CORE_WCM_PROPERTY_TABLE(MACRO)                \
+  MACRO(OBJECT, FixedArray, code_table)               \
+  MACRO(OBJECT, Foreign, module_wrapper)              \
+  /* For debugging: */                                \
+  MACRO(OBJECT, SeqOneByteString, module_bytes)       \
+  MACRO(OBJECT, Script, script)                       \
+  MACRO(OBJECT, ByteArray, asm_js_offset_tables)      \
+  /* End of debugging stuff */                        \
+  MACRO(OBJECT, FixedArray, function_tables)          \
+  MACRO(OBJECT, FixedArray, empty_function_tables)    \
+  MACRO(OBJECT, JSArrayBuffer, memory)                \
+  MACRO(SMALL_NUMBER, uint32_t, min_mem_pages)        \
+  MACRO(SMALL_NUMBER, uint32_t, max_mem_pages)        \
+  MACRO(WEAK_LINK, WasmCompiledModule, next_instance) \
+  MACRO(WEAK_LINK, WasmCompiledModule, prev_instance) \
+  MACRO(WEAK_LINK, JSObject, owning_instance)         \
+  MACRO(WEAK_LINK, JSObject, wasm_module)
+
+#if DEBUG
+#define DEBUG_ONLY_TABLE(MACRO) MACRO(SMALL_NUMBER, uint32_t, instance_id)
+#else
+#define DEBUG_ONLY_TABLE(IGNORE)
+  uint32_t instance_id() const { return -1; }
+#endif
+
+#define WCM_PROPERTY_TABLE(MACRO) \
+  CORE_WCM_PROPERTY_TABLE(MACRO)  \
+  DEBUG_ONLY_TABLE(MACRO)
+
+ private:
+  enum PropertyIndices {
+#define INDICES(IGNORE1, IGNORE2, NAME) kID_##NAME,
+    WCM_PROPERTY_TABLE(INDICES) Count
+#undef INDICES
+  };
+
+ public:
+  static Handle<WasmCompiledModule> New(
+      Isolate* isolate, Handle<Managed<wasm::WasmModule>> module_wrapper);
+
+  static Handle<WasmCompiledModule> Clone(Isolate* isolate,
+                                          Handle<WasmCompiledModule> module) {
+    Handle<WasmCompiledModule> ret = Handle<WasmCompiledModule>::cast(
+        isolate->factory()->CopyFixedArray(module));
+    ret->InitId();
+    ret->reset_weak_owning_instance();
+    ret->reset_weak_next_instance();
+    ret->reset_weak_prev_instance();
+    return ret;
+  }
+
+  uint32_t mem_size() const;
+  uint32_t default_mem_size() const;
+
+  wasm::WasmModule* module() const;
+
+#define DECLARATION(KIND, TYPE, NAME) WCM_##KIND(TYPE, NAME)
+  WCM_PROPERTY_TABLE(DECLARATION)
+#undef DECLARATION
+
+  static bool IsWasmCompiledModule(Object* obj);
+
+  void PrintInstancesChain();
+
+  static void RecreateModuleWrapper(Isolate* isolate,
+                                    Handle<FixedArray> compiled_module);
+
+  // Extract a function name from the given wasm instance.
+  // Returns a null handle if the function is unnamed or the name is not a valid
+  // UTF-8 string.
+  static MaybeHandle<String> GetFunctionName(
+      Handle<WasmCompiledModule> compiled_module, uint32_t func_index);
+
+ private:
+  void InitId();
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(WasmCompiledModule);
+};
+
+class WasmDebugInfo : public FixedArray {
+ public:
+  enum class Fields { kFieldCount };
+
+  static Handle<WasmDebugInfo> New(Handle<JSObject> wasm);
+
+  static bool IsDebugInfo(Object* object);
+  static WasmDebugInfo* cast(Object* object);
+
+  JSObject* wasm_instance();
+
+  bool SetBreakPoint(int byte_offset);
+
+  // Get the Script for the specified function.
+  static Script* GetFunctionScript(Handle<WasmDebugInfo> debug_info,
+                                   int func_index);
+
+  // Disassemble the specified function from this module.
+  static Handle<String> DisassembleFunction(Handle<WasmDebugInfo> debug_info,
+                                            int func_index);
+
+  // Get the offset table for the specified function, mapping from byte offsets
+  // to position in the disassembly.
+  // Returns an array with three entries per instruction: byte offset, line and
+  // column.
+  static Handle<FixedArray> GetFunctionOffsetTable(
+      Handle<WasmDebugInfo> debug_info, int func_index);
+
+  // Get the asm.js source position from a byte offset.
+  // Must only be called if the associated wasm object was created from asm.js.
+  static int GetAsmJsSourcePosition(Handle<WasmDebugInfo> debug_info,
+                                    int func_index, int byte_offset);
+};
+
+#undef DECLARE_ACCESSORS
+#undef DECLARE_OPTIONAL_ACCESSORS
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_WASM_OBJECTS_H_
diff --git a/src/wasm/wasm-opcodes.cc b/src/wasm/wasm-opcodes.cc
index cd2dde4..8f81b81 100644
--- a/src/wasm/wasm-opcodes.cc
+++ b/src/wasm/wasm-opcodes.cc
@@ -88,6 +88,7 @@
 static byte kSimpleExprSigTable[256];
 static byte kSimpleAsmjsExprSigTable[256];
 static byte kSimdExprSigTable[256];
+static byte kAtomicExprSigTable[256];
 
 // Initialize the signature table.
 static void InitSigTables() {
@@ -105,6 +106,12 @@
   kSimdExprSigTable[simd_index] = static_cast<int>(kSigEnum_##sig) + 1;
   FOREACH_SIMD_0_OPERAND_OPCODE(SET_SIG_TABLE)
 #undef SET_SIG_TABLE
+  byte atomic_index;
+#define SET_ATOMIC_SIG_TABLE(name, opcode, sig) \
+  atomic_index = opcode & 0xff;                 \
+  kAtomicExprSigTable[atomic_index] = static_cast<int>(kSigEnum_##sig) + 1;
+  FOREACH_ATOMIC_OPCODE(SET_ATOMIC_SIG_TABLE)
+#undef SET_ATOMIC_SIG_TABLE
 }
 
 class SigTable {
@@ -125,6 +132,10 @@
     return const_cast<FunctionSig*>(
         kSimdExprSigs[kSimdExprSigTable[static_cast<byte>(opcode & 0xff)]]);
   }
+  FunctionSig* AtomicSignature(WasmOpcode opcode) const {
+    return const_cast<FunctionSig*>(
+        kSimpleExprSigs[kAtomicExprSigTable[static_cast<byte>(opcode & 0xff)]]);
+  }
 };
 
 static base::LazyInstance<SigTable>::type sig_table = LAZY_INSTANCE_INITIALIZER;
@@ -141,6 +152,10 @@
   return sig_table.Get().AsmjsSignature(opcode);
 }
 
+FunctionSig* WasmOpcodes::AtomicSignature(WasmOpcode opcode) {
+  return sig_table.Get().AtomicSignature(opcode);
+}
+
 // TODO(titzer): pull WASM_64 up to a common header.
 #if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
 #define WASM_64 1
diff --git a/src/wasm/wasm-opcodes.h b/src/wasm/wasm-opcodes.h
index 03827b2..ec22579 100644
--- a/src/wasm/wasm-opcodes.h
+++ b/src/wasm/wasm-opcodes.h
@@ -5,6 +5,7 @@
 #ifndef V8_WASM_OPCODES_H_
 #define V8_WASM_OPCODES_H_
 
+#include "src/globals.h"
 #include "src/machine-type.h"
 #include "src/signature.h"
 
@@ -14,12 +15,12 @@
 
 // Binary encoding of local types.
 enum LocalTypeCode {
-  kLocalVoid = 0,
-  kLocalI32 = 1,
-  kLocalI64 = 2,
-  kLocalF32 = 3,
-  kLocalF64 = 4,
-  kLocalS128 = 5
+  kLocalVoid = 0x40,
+  kLocalI32 = 0x7f,
+  kLocalI64 = 0x7e,
+  kLocalF32 = 0x7d,
+  kLocalF64 = 0x7c,
+  kLocalS128 = 0x7b
 };
 
 // Type code for multi-value block types.
@@ -46,199 +47,198 @@
 const WasmCodePosition kNoCodePosition = -1;
 
 // Control expressions and blocks.
-#define FOREACH_CONTROL_OPCODE(V) \
-  V(Unreachable, 0x00, _)         \
-  V(Block, 0x01, _)               \
-  V(Loop, 0x02, _)                \
-  V(If, 0x03, _)                  \
-  V(Else, 0x04, _)                \
-  V(Select, 0x05, _)              \
-  V(Br, 0x06, _)                  \
-  V(BrIf, 0x07, _)                \
-  V(BrTable, 0x08, _)             \
-  V(Return, 0x09, _)              \
-  V(Nop, 0x0a, _)                 \
-  V(Throw, 0xfa, _)               \
-  V(Try, 0xfb, _)                 \
-  V(Catch, 0xfe, _)               \
-  V(End, 0x0F, _)
+#define FOREACH_CONTROL_OPCODE(V)      \
+  V(Unreachable, 0x00, _)              \
+  V(Nop, 0x01, _)                      \
+  V(Block, 0x02, _)                    \
+  V(Loop, 0x03, _)                     \
+  V(If, 0x004, _)                      \
+  V(Else, 0x05, _)                     \
+  V(Try, 0x06, _ /* eh_prototype */)   \
+  V(Catch, 0x07, _ /* eh_prototype */) \
+  V(Throw, 0x08, _ /* eh_prototype */) \
+  V(End, 0x0b, _)                      \
+  V(Br, 0x0c, _)                       \
+  V(BrIf, 0x0d, _)                     \
+  V(BrTable, 0x0e, _)                  \
+  V(Return, 0x0f, _)
 
 // Constants, locals, globals, and calls.
 #define FOREACH_MISC_OPCODE(V) \
-  V(I32Const, 0x10, _)         \
-  V(I64Const, 0x11, _)         \
-  V(F64Const, 0x12, _)         \
-  V(F32Const, 0x13, _)         \
-  V(GetLocal, 0x14, _)         \
-  V(SetLocal, 0x15, _)         \
-  V(TeeLocal, 0x19, _)         \
-  V(Drop, 0x0b, _)             \
-  V(CallFunction, 0x16, _)     \
-  V(CallIndirect, 0x17, _)     \
-  V(I8Const, 0xcb, _)          \
-  V(GetGlobal, 0xbb, _)        \
-  V(SetGlobal, 0xbc, _)
+  V(CallFunction, 0x10, _)     \
+  V(CallIndirect, 0x11, _)     \
+  V(Drop, 0x1a, _)             \
+  V(Select, 0x1b, _)           \
+  V(GetLocal, 0x20, _)         \
+  V(SetLocal, 0x21, _)         \
+  V(TeeLocal, 0x22, _)         \
+  V(GetGlobal, 0x23, _)        \
+  V(SetGlobal, 0x24, _)        \
+  V(I32Const, 0x41, _)         \
+  V(I64Const, 0x42, _)         \
+  V(F32Const, 0x43, _)         \
+  V(F64Const, 0x44, _)         \
+  V(I8Const, 0xcb, _ /* TODO(titzer): V8 specific, remove */)
 
 // Load memory expressions.
 #define FOREACH_LOAD_MEM_OPCODE(V) \
-  V(I32LoadMem8S, 0x20, i_i)       \
-  V(I32LoadMem8U, 0x21, i_i)       \
-  V(I32LoadMem16S, 0x22, i_i)      \
-  V(I32LoadMem16U, 0x23, i_i)      \
-  V(I64LoadMem8S, 0x24, l_i)       \
-  V(I64LoadMem8U, 0x25, l_i)       \
-  V(I64LoadMem16S, 0x26, l_i)      \
-  V(I64LoadMem16U, 0x27, l_i)      \
-  V(I64LoadMem32S, 0x28, l_i)      \
-  V(I64LoadMem32U, 0x29, l_i)      \
-  V(I32LoadMem, 0x2a, i_i)         \
-  V(I64LoadMem, 0x2b, l_i)         \
-  V(F32LoadMem, 0x2c, f_i)         \
-  V(F64LoadMem, 0x2d, d_i)
+  V(I32LoadMem, 0x28, i_i)         \
+  V(I64LoadMem, 0x29, l_i)         \
+  V(F32LoadMem, 0x2a, f_i)         \
+  V(F64LoadMem, 0x2b, d_i)         \
+  V(I32LoadMem8S, 0x2c, i_i)       \
+  V(I32LoadMem8U, 0x2d, i_i)       \
+  V(I32LoadMem16S, 0x2e, i_i)      \
+  V(I32LoadMem16U, 0x2f, i_i)      \
+  V(I64LoadMem8S, 0x30, l_i)       \
+  V(I64LoadMem8U, 0x31, l_i)       \
+  V(I64LoadMem16S, 0x32, l_i)      \
+  V(I64LoadMem16U, 0x33, l_i)      \
+  V(I64LoadMem32S, 0x34, l_i)      \
+  V(I64LoadMem32U, 0x35, l_i)
 
 // Store memory expressions.
 #define FOREACH_STORE_MEM_OPCODE(V) \
-  V(I32StoreMem8, 0x2e, i_ii)       \
-  V(I32StoreMem16, 0x2f, i_ii)      \
-  V(I64StoreMem8, 0x30, l_il)       \
-  V(I64StoreMem16, 0x31, l_il)      \
-  V(I64StoreMem32, 0x32, l_il)      \
-  V(I32StoreMem, 0x33, i_ii)        \
-  V(I64StoreMem, 0x34, l_il)        \
-  V(F32StoreMem, 0x35, f_if)        \
-  V(F64StoreMem, 0x36, d_id)
+  V(I32StoreMem, 0x36, i_ii)        \
+  V(I64StoreMem, 0x37, l_il)        \
+  V(F32StoreMem, 0x38, f_if)        \
+  V(F64StoreMem, 0x39, d_id)        \
+  V(I32StoreMem8, 0x3a, i_ii)       \
+  V(I32StoreMem16, 0x3b, i_ii)      \
+  V(I64StoreMem8, 0x3c, l_il)       \
+  V(I64StoreMem16, 0x3d, l_il)      \
+  V(I64StoreMem32, 0x3e, l_il)
 
-#define FOREACH_SIMPLE_MEM_OPCODE(V) V(GrowMemory, 0x39, i_i)
-
-// Load memory expressions.
+// Miscellaneous memory expressions
 #define FOREACH_MISC_MEM_OPCODE(V) \
-  V(MemorySize, 0x3b, i_v)
+  V(MemorySize, 0x3f, i_v)         \
+  V(GrowMemory, 0x40, i_i)
 
 // Expressions with signatures.
 #define FOREACH_SIMPLE_OPCODE(V)  \
-  V(I32Add, 0x40, i_ii)           \
-  V(I32Sub, 0x41, i_ii)           \
-  V(I32Mul, 0x42, i_ii)           \
-  V(I32DivS, 0x43, i_ii)          \
-  V(I32DivU, 0x44, i_ii)          \
-  V(I32RemS, 0x45, i_ii)          \
-  V(I32RemU, 0x46, i_ii)          \
-  V(I32And, 0x47, i_ii)           \
-  V(I32Ior, 0x48, i_ii)           \
-  V(I32Xor, 0x49, i_ii)           \
-  V(I32Shl, 0x4a, i_ii)           \
-  V(I32ShrU, 0x4b, i_ii)          \
-  V(I32ShrS, 0x4c, i_ii)          \
-  V(I32Eq, 0x4d, i_ii)            \
-  V(I32Ne, 0x4e, i_ii)            \
-  V(I32LtS, 0x4f, i_ii)           \
-  V(I32LeS, 0x50, i_ii)           \
-  V(I32LtU, 0x51, i_ii)           \
-  V(I32LeU, 0x52, i_ii)           \
-  V(I32GtS, 0x53, i_ii)           \
-  V(I32GeS, 0x54, i_ii)           \
-  V(I32GtU, 0x55, i_ii)           \
-  V(I32GeU, 0x56, i_ii)           \
-  V(I32Clz, 0x57, i_i)            \
-  V(I32Ctz, 0x58, i_i)            \
-  V(I32Popcnt, 0x59, i_i)         \
-  V(I32Eqz, 0x5a, i_i)            \
-  V(I64Add, 0x5b, l_ll)           \
-  V(I64Sub, 0x5c, l_ll)           \
-  V(I64Mul, 0x5d, l_ll)           \
-  V(I64DivS, 0x5e, l_ll)          \
-  V(I64DivU, 0x5f, l_ll)          \
-  V(I64RemS, 0x60, l_ll)          \
-  V(I64RemU, 0x61, l_ll)          \
-  V(I64And, 0x62, l_ll)           \
-  V(I64Ior, 0x63, l_ll)           \
-  V(I64Xor, 0x64, l_ll)           \
-  V(I64Shl, 0x65, l_ll)           \
-  V(I64ShrU, 0x66, l_ll)          \
-  V(I64ShrS, 0x67, l_ll)          \
-  V(I64Eq, 0x68, i_ll)            \
-  V(I64Ne, 0x69, i_ll)            \
-  V(I64LtS, 0x6a, i_ll)           \
-  V(I64LeS, 0x6b, i_ll)           \
-  V(I64LtU, 0x6c, i_ll)           \
-  V(I64LeU, 0x6d, i_ll)           \
-  V(I64GtS, 0x6e, i_ll)           \
-  V(I64GeS, 0x6f, i_ll)           \
-  V(I64GtU, 0x70, i_ll)           \
-  V(I64GeU, 0x71, i_ll)           \
-  V(I64Clz, 0x72, l_l)            \
-  V(I64Ctz, 0x73, l_l)            \
-  V(I64Popcnt, 0x74, l_l)         \
-  V(I64Eqz, 0xba, i_l)            \
-  V(F32Add, 0x75, f_ff)           \
-  V(F32Sub, 0x76, f_ff)           \
-  V(F32Mul, 0x77, f_ff)           \
-  V(F32Div, 0x78, f_ff)           \
-  V(F32Min, 0x79, f_ff)           \
-  V(F32Max, 0x7a, f_ff)           \
-  V(F32Abs, 0x7b, f_f)            \
-  V(F32Neg, 0x7c, f_f)            \
-  V(F32CopySign, 0x7d, f_ff)      \
-  V(F32Ceil, 0x7e, f_f)           \
-  V(F32Floor, 0x7f, f_f)          \
-  V(F32Trunc, 0x80, f_f)          \
-  V(F32NearestInt, 0x81, f_f)     \
-  V(F32Sqrt, 0x82, f_f)           \
-  V(F32Eq, 0x83, i_ff)            \
-  V(F32Ne, 0x84, i_ff)            \
-  V(F32Lt, 0x85, i_ff)            \
-  V(F32Le, 0x86, i_ff)            \
-  V(F32Gt, 0x87, i_ff)            \
-  V(F32Ge, 0x88, i_ff)            \
-  V(F64Add, 0x89, d_dd)           \
-  V(F64Sub, 0x8a, d_dd)           \
-  V(F64Mul, 0x8b, d_dd)           \
-  V(F64Div, 0x8c, d_dd)           \
-  V(F64Min, 0x8d, d_dd)           \
-  V(F64Max, 0x8e, d_dd)           \
-  V(F64Abs, 0x8f, d_d)            \
-  V(F64Neg, 0x90, d_d)            \
-  V(F64CopySign, 0x91, d_dd)      \
-  V(F64Ceil, 0x92, d_d)           \
-  V(F64Floor, 0x93, d_d)          \
-  V(F64Trunc, 0x94, d_d)          \
-  V(F64NearestInt, 0x95, d_d)     \
-  V(F64Sqrt, 0x96, d_d)           \
-  V(F64Eq, 0x97, i_dd)            \
-  V(F64Ne, 0x98, i_dd)            \
-  V(F64Lt, 0x99, i_dd)            \
-  V(F64Le, 0x9a, i_dd)            \
-  V(F64Gt, 0x9b, i_dd)            \
-  V(F64Ge, 0x9c, i_dd)            \
-  V(I32SConvertF32, 0x9d, i_f)    \
-  V(I32SConvertF64, 0x9e, i_d)    \
-  V(I32UConvertF32, 0x9f, i_f)    \
-  V(I32UConvertF64, 0xa0, i_d)    \
-  V(I32ConvertI64, 0xa1, i_l)     \
-  V(I64SConvertF32, 0xa2, l_f)    \
-  V(I64SConvertF64, 0xa3, l_d)    \
-  V(I64UConvertF32, 0xa4, l_f)    \
-  V(I64UConvertF64, 0xa5, l_d)    \
-  V(I64SConvertI32, 0xa6, l_i)    \
-  V(I64UConvertI32, 0xa7, l_i)    \
-  V(F32SConvertI32, 0xa8, f_i)    \
-  V(F32UConvertI32, 0xa9, f_i)    \
-  V(F32SConvertI64, 0xaa, f_l)    \
-  V(F32UConvertI64, 0xab, f_l)    \
-  V(F32ConvertF64, 0xac, f_d)     \
-  V(F32ReinterpretI32, 0xad, f_i) \
-  V(F64SConvertI32, 0xae, d_i)    \
-  V(F64UConvertI32, 0xaf, d_i)    \
-  V(F64SConvertI64, 0xb0, d_l)    \
-  V(F64UConvertI64, 0xb1, d_l)    \
-  V(F64ConvertF32, 0xb2, d_f)     \
-  V(F64ReinterpretI64, 0xb3, d_l) \
-  V(I32ReinterpretF32, 0xb4, i_f) \
-  V(I64ReinterpretF64, 0xb5, l_d) \
-  V(I32Ror, 0xb6, i_ii)           \
-  V(I32Rol, 0xb7, i_ii)           \
-  V(I64Ror, 0xb8, l_ll)           \
-  V(I64Rol, 0xb9, l_ll)
+  V(I32Eqz, 0x45, i_i)            \
+  V(I32Eq, 0x46, i_ii)            \
+  V(I32Ne, 0x47, i_ii)            \
+  V(I32LtS, 0x48, i_ii)           \
+  V(I32LtU, 0x49, i_ii)           \
+  V(I32GtS, 0x4a, i_ii)           \
+  V(I32GtU, 0x4b, i_ii)           \
+  V(I32LeS, 0x4c, i_ii)           \
+  V(I32LeU, 0x4d, i_ii)           \
+  V(I32GeS, 0x4e, i_ii)           \
+  V(I32GeU, 0x4f, i_ii)           \
+  V(I64Eqz, 0x50, i_l)            \
+  V(I64Eq, 0x51, i_ll)            \
+  V(I64Ne, 0x52, i_ll)            \
+  V(I64LtS, 0x53, i_ll)           \
+  V(I64LtU, 0x54, i_ll)           \
+  V(I64GtS, 0x55, i_ll)           \
+  V(I64GtU, 0x56, i_ll)           \
+  V(I64LeS, 0x57, i_ll)           \
+  V(I64LeU, 0x58, i_ll)           \
+  V(I64GeS, 0x59, i_ll)           \
+  V(I64GeU, 0x5a, i_ll)           \
+  V(F32Eq, 0x5b, i_ff)            \
+  V(F32Ne, 0x5c, i_ff)            \
+  V(F32Lt, 0x5d, i_ff)            \
+  V(F32Gt, 0x5e, i_ff)            \
+  V(F32Le, 0x5f, i_ff)            \
+  V(F32Ge, 0x60, i_ff)            \
+  V(F64Eq, 0x61, i_dd)            \
+  V(F64Ne, 0x62, i_dd)            \
+  V(F64Lt, 0x63, i_dd)            \
+  V(F64Gt, 0x64, i_dd)            \
+  V(F64Le, 0x65, i_dd)            \
+  V(F64Ge, 0x66, i_dd)            \
+  V(I32Clz, 0x67, i_i)            \
+  V(I32Ctz, 0x68, i_i)            \
+  V(I32Popcnt, 0x69, i_i)         \
+  V(I32Add, 0x6a, i_ii)           \
+  V(I32Sub, 0x6b, i_ii)           \
+  V(I32Mul, 0x6c, i_ii)           \
+  V(I32DivS, 0x6d, i_ii)          \
+  V(I32DivU, 0x6e, i_ii)          \
+  V(I32RemS, 0x6f, i_ii)          \
+  V(I32RemU, 0x70, i_ii)          \
+  V(I32And, 0x71, i_ii)           \
+  V(I32Ior, 0x72, i_ii)           \
+  V(I32Xor, 0x73, i_ii)           \
+  V(I32Shl, 0x74, i_ii)           \
+  V(I32ShrS, 0x75, i_ii)          \
+  V(I32ShrU, 0x76, i_ii)          \
+  V(I32Rol, 0x77, i_ii)           \
+  V(I32Ror, 0x78, i_ii)           \
+  V(I64Clz, 0x79, l_l)            \
+  V(I64Ctz, 0x7a, l_l)            \
+  V(I64Popcnt, 0x7b, l_l)         \
+  V(I64Add, 0x7c, l_ll)           \
+  V(I64Sub, 0x7d, l_ll)           \
+  V(I64Mul, 0x7e, l_ll)           \
+  V(I64DivS, 0x7f, l_ll)          \
+  V(I64DivU, 0x80, l_ll)          \
+  V(I64RemS, 0x81, l_ll)          \
+  V(I64RemU, 0x82, l_ll)          \
+  V(I64And, 0x83, l_ll)           \
+  V(I64Ior, 0x84, l_ll)           \
+  V(I64Xor, 0x85, l_ll)           \
+  V(I64Shl, 0x86, l_ll)           \
+  V(I64ShrS, 0x87, l_ll)          \
+  V(I64ShrU, 0x88, l_ll)          \
+  V(I64Rol, 0x89, l_ll)           \
+  V(I64Ror, 0x8a, l_ll)           \
+  V(F32Abs, 0x8b, f_f)            \
+  V(F32Neg, 0x8c, f_f)            \
+  V(F32Ceil, 0x8d, f_f)           \
+  V(F32Floor, 0x8e, f_f)          \
+  V(F32Trunc, 0x8f, f_f)          \
+  V(F32NearestInt, 0x90, f_f)     \
+  V(F32Sqrt, 0x91, f_f)           \
+  V(F32Add, 0x92, f_ff)           \
+  V(F32Sub, 0x93, f_ff)           \
+  V(F32Mul, 0x94, f_ff)           \
+  V(F32Div, 0x95, f_ff)           \
+  V(F32Min, 0x96, f_ff)           \
+  V(F32Max, 0x97, f_ff)           \
+  V(F32CopySign, 0x98, f_ff)      \
+  V(F64Abs, 0x99, d_d)            \
+  V(F64Neg, 0x9a, d_d)            \
+  V(F64Ceil, 0x9b, d_d)           \
+  V(F64Floor, 0x9c, d_d)          \
+  V(F64Trunc, 0x9d, d_d)          \
+  V(F64NearestInt, 0x9e, d_d)     \
+  V(F64Sqrt, 0x9f, d_d)           \
+  V(F64Add, 0xa0, d_dd)           \
+  V(F64Sub, 0xa1, d_dd)           \
+  V(F64Mul, 0xa2, d_dd)           \
+  V(F64Div, 0xa3, d_dd)           \
+  V(F64Min, 0xa4, d_dd)           \
+  V(F64Max, 0xa5, d_dd)           \
+  V(F64CopySign, 0xa6, d_dd)      \
+  V(I32ConvertI64, 0xa7, i_l)     \
+  V(I32SConvertF32, 0xa8, i_f)    \
+  V(I32UConvertF32, 0xa9, i_f)    \
+  V(I32SConvertF64, 0xaa, i_d)    \
+  V(I32UConvertF64, 0xab, i_d)    \
+  V(I64SConvertI32, 0xac, l_i)    \
+  V(I64UConvertI32, 0xad, l_i)    \
+  V(I64SConvertF32, 0xae, l_f)    \
+  V(I64UConvertF32, 0xaf, l_f)    \
+  V(I64SConvertF64, 0xb0, l_d)    \
+  V(I64UConvertF64, 0xb1, l_d)    \
+  V(F32SConvertI32, 0xb2, f_i)    \
+  V(F32UConvertI32, 0xb3, f_i)    \
+  V(F32SConvertI64, 0xb4, f_l)    \
+  V(F32UConvertI64, 0xb5, f_l)    \
+  V(F32ConvertF64, 0xb6, f_d)     \
+  V(F64SConvertI32, 0xb7, d_i)    \
+  V(F64UConvertI32, 0xb8, d_i)    \
+  V(F64SConvertI64, 0xb9, d_l)    \
+  V(F64UConvertI64, 0xba, d_l)    \
+  V(F64ConvertF32, 0xbb, d_f)     \
+  V(I32ReinterpretF32, 0xbc, i_f) \
+  V(I64ReinterpretF64, 0xbd, l_d) \
+  V(F32ReinterpretI32, 0xbe, f_i) \
+  V(F64ReinterpretI64, 0xbf, d_l)
 
 // For compatibility with Asm.js.
 #define FOREACH_ASMJS_COMPAT_OPCODE(V) \
@@ -400,18 +400,55 @@
   V(I16x8ExtractLane, 0xe539, _)         \
   V(I8x16ExtractLane, 0xe558, _)
 
+#define FOREACH_ATOMIC_OPCODE(V)               \
+  V(I32AtomicAdd8S, 0xe601, i_ii)              \
+  V(I32AtomicAdd8U, 0xe602, i_ii)              \
+  V(I32AtomicAdd16S, 0xe603, i_ii)             \
+  V(I32AtomicAdd16U, 0xe604, i_ii)             \
+  V(I32AtomicAdd32, 0xe605, i_ii)              \
+  V(I32AtomicAnd8S, 0xe606, i_ii)              \
+  V(I32AtomicAnd8U, 0xe607, i_ii)              \
+  V(I32AtomicAnd16S, 0xe608, i_ii)             \
+  V(I32AtomicAnd16U, 0xe609, i_ii)             \
+  V(I32AtomicAnd32, 0xe60a, i_ii)              \
+  V(I32AtomicCompareExchange8S, 0xe60b, i_ii)  \
+  V(I32AtomicCompareExchange8U, 0xe60c, i_ii)  \
+  V(I32AtomicCompareExchange16S, 0xe60d, i_ii) \
+  V(I32AtomicCompareExchange16U, 0xe60e, i_ii) \
+  V(I32AtomicCompareExchange32, 0xe60f, i_ii)  \
+  V(I32AtomicExchange8S, 0xe610, i_ii)         \
+  V(I32AtomicExchange8U, 0xe611, i_ii)         \
+  V(I32AtomicExchange16S, 0xe612, i_ii)        \
+  V(I32AtomicExchange16U, 0xe613, i_ii)        \
+  V(I32AtomicExchange32, 0xe614, i_ii)         \
+  V(I32AtomicOr8S, 0xe615, i_ii)               \
+  V(I32AtomicOr8U, 0xe616, i_ii)               \
+  V(I32AtomicOr16S, 0xe617, i_ii)              \
+  V(I32AtomicOr16U, 0xe618, i_ii)              \
+  V(I32AtomicOr32, 0xe619, i_ii)               \
+  V(I32AtomicSub8S, 0xe61a, i_ii)              \
+  V(I32AtomicSub8U, 0xe61b, i_ii)              \
+  V(I32AtomicSub16S, 0xe61c, i_ii)             \
+  V(I32AtomicSub16U, 0xe61d, i_ii)             \
+  V(I32AtomicSub32, 0xe61e, i_ii)              \
+  V(I32AtomicXor8S, 0xe61f, i_ii)              \
+  V(I32AtomicXor8U, 0xe620, i_ii)              \
+  V(I32AtomicXor16S, 0xe621, i_ii)             \
+  V(I32AtomicXor16U, 0xe622, i_ii)             \
+  V(I32AtomicXor32, 0xe623, i_ii)
+
 // All opcodes.
 #define FOREACH_OPCODE(V)          \
   FOREACH_CONTROL_OPCODE(V)        \
   FOREACH_MISC_OPCODE(V)           \
   FOREACH_SIMPLE_OPCODE(V)         \
-  FOREACH_SIMPLE_MEM_OPCODE(V)     \
   FOREACH_STORE_MEM_OPCODE(V)      \
   FOREACH_LOAD_MEM_OPCODE(V)       \
   FOREACH_MISC_MEM_OPCODE(V)       \
   FOREACH_ASMJS_COMPAT_OPCODE(V)   \
   FOREACH_SIMD_0_OPERAND_OPCODE(V) \
-  FOREACH_SIMD_1_OPERAND_OPCODE(V)
+  FOREACH_SIMD_1_OPERAND_OPCODE(V) \
+  FOREACH_ATOMIC_OPCODE(V)
 
 // All signatures.
 #define FOREACH_SIGNATURE(V)         \
@@ -454,7 +491,9 @@
   V(s_sii, kAstS128, kAstS128, kAstI32, kAstI32)   \
   V(s_si, kAstS128, kAstS128, kAstI32)
 
-#define FOREACH_PREFIX(V) V(Simd, 0xe5)
+#define FOREACH_PREFIX(V) \
+  V(Simd, 0xe5)           \
+  V(Atomic, 0xe6)
 
 enum WasmOpcode {
 // Declare expression opcodes.
@@ -486,12 +525,13 @@
 };
 
 // A collection of opcode-related static methods.
-class WasmOpcodes {
+class V8_EXPORT_PRIVATE WasmOpcodes {
  public:
   static const char* OpcodeName(WasmOpcode opcode);
   static const char* ShortOpcodeName(WasmOpcode opcode);
   static FunctionSig* Signature(WasmOpcode opcode);
   static FunctionSig* AsmjsSignature(WasmOpcode opcode);
+  static FunctionSig* AtomicSignature(WasmOpcode opcode);
   static bool IsPrefixOpcode(WasmOpcode opcode);
 
   static int TrapReasonToMessageId(TrapReason reason);
diff --git a/src/wasm/wasm-result.cc b/src/wasm/wasm-result.cc
index 7d251f0..6d535e3 100644
--- a/src/wasm/wasm-result.cc
+++ b/src/wasm/wasm-result.cc
@@ -46,14 +46,6 @@
   exception_ = isolate_->factory()->NewError(constructor, message);
 }
 
-void ErrorThrower::Error(const char* format, ...) {
-  if (error()) return;
-  va_list arguments;
-  va_start(arguments, format);
-  Format(isolate_->error_function(), format, arguments);
-  va_end(arguments);
-}
-
 void ErrorThrower::TypeError(const char* format, ...) {
   if (error()) return;
   va_list arguments;
@@ -66,11 +58,26 @@
   if (error()) return;
   va_list arguments;
   va_start(arguments, format);
-  CHECK(*isolate_->range_error_function() != *isolate_->type_error_function());
   Format(isolate_->range_error_function(), format, arguments);
   va_end(arguments);
 }
 
+void ErrorThrower::CompileError(const char* format, ...) {
+  if (error()) return;
+  va_list arguments;
+  va_start(arguments, format);
+  Format(isolate_->wasm_compile_error_function(), format, arguments);
+  va_end(arguments);
+}
+
+void ErrorThrower::RuntimeError(const char* format, ...) {
+  if (error()) return;
+  va_list arguments;
+  va_start(arguments, format);
+  Format(isolate_->wasm_runtime_error_function(), format, arguments);
+  va_end(arguments);
+}
+
 ErrorThrower::~ErrorThrower() {
   if (error() && !isolate_->has_pending_exception()) {
     isolate_->ScheduleThrow(*exception_);
diff --git a/src/wasm/wasm-result.h b/src/wasm/wasm-result.h
index ecc54e5..53c6b8d 100644
--- a/src/wasm/wasm-result.h
+++ b/src/wasm/wasm-result.h
@@ -82,7 +82,8 @@
   return os;
 }
 
-std::ostream& operator<<(std::ostream& os, const ErrorCode& error_code);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+                                           const ErrorCode& error_code);
 
 // A helper for generating error messages that bubble up to JS exceptions.
 class V8_EXPORT_PRIVATE ErrorThrower {
@@ -91,15 +92,16 @@
       : isolate_(isolate), context_(context) {}
   ~ErrorThrower();
 
-  PRINTF_FORMAT(2, 3) void Error(const char* fmt, ...);
   PRINTF_FORMAT(2, 3) void TypeError(const char* fmt, ...);
   PRINTF_FORMAT(2, 3) void RangeError(const char* fmt, ...);
+  PRINTF_FORMAT(2, 3) void CompileError(const char* fmt, ...);
+  PRINTF_FORMAT(2, 3) void RuntimeError(const char* fmt, ...);
 
   template <typename T>
-  void Failed(const char* error, Result<T>& result) {
+  void CompileFailed(const char* error, Result<T>& result) {
     std::ostringstream str;
     str << error << result;
-    Error("%s", str.str().c_str());
+    CompileError("%s", str.str().c_str());
   }
 
   i::Handle<i::Object> Reify() {
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index d202aad..5402a8c 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -402,8 +402,9 @@
 
   // Some internal data structures overflow for very large buffers,
   // they must ensure that kMaximalBufferSize is not too large.
-  if ((desc.buffer_size > kMaximalBufferSize) ||
-      (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) {
+  if (desc.buffer_size > kMaximalBufferSize ||
+      static_cast<size_t>(desc.buffer_size) >
+          isolate()->heap()->MaxOldGenerationSize()) {
     V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
   }
 
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 5de891c..e8ee9e4 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -1993,7 +1993,8 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+  void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+                         int id);
 
   void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
                                           ConstantPoolEntry::Access access,
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 2a962b3..d62aafe 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -693,13 +693,10 @@
   __ leal(rdx, Operand(rax, rax, times_1, 2));
 
   // rdx: Number of capture registers
-  // Check that the fourth object is a JSObject.
-  __ movp(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
-  __ JumpIfSmi(r15, &runtime);
-  __ CmpObjectType(r15, JS_OBJECT_TYPE, kScratchRegister);
-  __ j(not_equal, &runtime);
+  // Check that the last match info is a FixedArray.
+  __ movp(rbx, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
+  __ JumpIfSmi(rbx, &runtime);
   // Check that the object has fast elements.
-  __ movp(rbx, FieldOperand(r15, JSArray::kElementsOffset));
   __ movp(rax, FieldOperand(rbx, HeapObject::kMapOffset));
   __ CompareRoot(rax, Heap::kFixedArrayMapRootIndex);
   __ j(not_equal, &runtime);
@@ -707,43 +704,37 @@
   // additional information. Ensure no overflow in add.
   STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
   __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
-  __ subl(rax, Immediate(RegExpImpl::kLastMatchOverhead));
+  __ subl(rax, Immediate(RegExpMatchInfo::kLastMatchOverhead));
   __ cmpl(rdx, rax);
   __ j(greater, &runtime);
 
-  // rbx: last_match_info backing store (FixedArray)
+  // rbx: last_match_info (FixedArray)
   // rdx: number of capture registers
   // Store the capture count.
   __ Integer32ToSmi(kScratchRegister, rdx);
-  __ movp(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
+  __ movp(FieldOperand(rbx, RegExpMatchInfo::kNumberOfCapturesOffset),
           kScratchRegister);
   // Store last subject and last input.
   __ movp(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
-  __ movp(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
+  __ movp(FieldOperand(rbx, RegExpMatchInfo::kLastSubjectOffset), rax);
   __ movp(rcx, rax);
-  __ RecordWriteField(rbx,
-                      RegExpImpl::kLastSubjectOffset,
-                      rax,
-                      rdi,
+  __ RecordWriteField(rbx, RegExpMatchInfo::kLastSubjectOffset, rax, rdi,
                       kDontSaveFPRegs);
   __ movp(rax, rcx);
-  __ movp(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
-  __ RecordWriteField(rbx,
-                      RegExpImpl::kLastInputOffset,
-                      rax,
-                      rdi,
+  __ movp(FieldOperand(rbx, RegExpMatchInfo::kLastInputOffset), rax);
+  __ RecordWriteField(rbx, RegExpMatchInfo::kLastInputOffset, rax, rdi,
                       kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   __ LoadAddress(
       rcx, ExternalReference::address_of_static_offsets_vector(isolate()));
 
-  // rbx: last_match_info backing store (FixedArray)
+  // rbx: last_match_info (FixedArray)
   // rcx: offsets vector
   // rdx: number of capture registers
   Label next_capture, done;
   // Capture register counter starts from number of capture registers and
-  // counts down until wraping after zero.
+  // counts down until wrapping after zero.
   __ bind(&next_capture);
   __ subp(rdx, Immediate(1));
   __ j(negative, &done, Label::kNear);
@@ -751,16 +742,14 @@
   __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
   __ Integer32ToSmi(rdi, rdi);
   // Store the smi value in the last match info.
-  __ movp(FieldOperand(rbx,
-                       rdx,
-                       times_pointer_size,
-                       RegExpImpl::kFirstCaptureOffset),
+  __ movp(FieldOperand(rbx, rdx, times_pointer_size,
+                       RegExpMatchInfo::kFirstCaptureOffset),
           rdi);
   __ jmp(&next_capture);
   __ bind(&done);
 
   // Return last match info.
-  __ movp(rax, r15);
+  __ movp(rax, rbx);
   __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
 
   __ bind(&exception);
@@ -1324,29 +1313,25 @@
   __ cmpp(rdi, r8);
   __ j(not_equal, miss);
 
-  __ movp(rax, Immediate(arg_count()));
-
   // Increment the call count for monomorphic function calls.
   IncrementCallCount(masm, rbx, rdx);
 
   __ movp(rbx, rcx);
   __ movp(rdx, rdi);
-  ArrayConstructorStub stub(masm->isolate(), arg_count());
+  ArrayConstructorStub stub(masm->isolate());
   __ TailCallStub(&stub);
 }
 
 
 void CallICStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
+  // -- rax - number of arguments
   // -- rdi - function
   // -- rdx - slot id
   // -- rbx - vector
   // -----------------------------------
   Isolate* isolate = masm->isolate();
   Label extra_checks_or_miss, call, call_function, call_count_incremented;
-  int argc = arg_count();
-  StackArgumentsAccessor args(rsp, argc);
-  ParameterCount actual(argc);
 
   // The checks. First, does rdi match the recorded monomorphic target?
   __ SmiToInteger32(rdx, rdx);
@@ -1378,7 +1363,6 @@
   // Increment the call count for monomorphic function calls.
   IncrementCallCount(masm, rbx, rdx);
 
-  __ Set(rax, argc);
   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
                                                     tail_call_mode()),
           RelocInfo::CODE_TARGET);
@@ -1422,7 +1406,6 @@
   IncrementCallCount(masm, rbx, rdx);
 
   __ bind(&call_count_incremented);
-  __ Set(rax, argc);
   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
@@ -1455,7 +1438,9 @@
     FrameScope scope(masm, StackFrame::INTERNAL);
     CreateWeakCellStub create_stub(isolate);
 
+    __ Integer32ToSmi(rax, rax);
     __ Integer32ToSmi(rdx, rdx);
+    __ Push(rax);
     __ Push(rbx);
     __ Push(rdx);
     __ Push(rdi);
@@ -1465,7 +1450,9 @@
     __ Pop(rdi);
     __ Pop(rdx);
     __ Pop(rbx);
+    __ Pop(rax);
     __ SmiToInteger32(rdx, rdx);
+    __ SmiToInteger32(rax, rax);
   }
 
   __ jmp(&call_function);
@@ -1484,6 +1471,10 @@
 void CallICStub::GenerateMiss(MacroAssembler* masm) {
   FrameScope scope(masm, StackFrame::INTERNAL);
 
+  // Preserve the number of arguments.
+  __ Integer32ToSmi(rax, rax);
+  __ Push(rax);
+
   // Push the receiver and the function and feedback info.
   __ Integer32ToSmi(rdx, rdx);
   __ Push(rdi);
@@ -1495,6 +1486,10 @@
 
   // Move result to edi and exit the internal frame.
   __ movp(rdi, rax);
+
+  // Restore number of arguments.
+  __ Pop(rax);
+  __ SmiToInteger32(rax, rax);
 }
 
 bool CEntryStub::NeedsImmovableCode() {
@@ -2954,21 +2949,6 @@
   __ jmp(rcx);  // Return to IC Miss stub, continuation still on stack.
 }
 
-
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate());
-  stub.GenerateForTrampoline(masm);
-}
-
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate());
-  stub.GenerateForTrampoline(masm);
-}
-
-
 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
                              Register receiver_map, Register scratch1,
                              Register scratch2, Register scratch3,
@@ -3040,190 +3020,12 @@
   __ jmp(handler);
 }
 
-
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // rdx
-  Register name = LoadWithVectorDescriptor::NameRegister();          // rcx
-  Register vector = LoadWithVectorDescriptor::VectorRegister();      // rbx
-  Register slot = LoadWithVectorDescriptor::SlotRegister();          // rax
-  Register feedback = rdi;
-  Register integer_slot = r8;
-  Register receiver_map = r9;
-
-  __ SmiToInteger32(integer_slot, slot);
-  __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
-                                 FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector,
-                        integer_slot, &compare_map, &load_smi_map, &try_array);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &not_array);
-  HandleArrayCases(masm, feedback, receiver_map, integer_slot, r11, r15, true,
-                   &miss);
-
-  __ bind(&not_array);
-  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
-  __ j(not_equal, &miss);
-  masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, name,
-                                                    feedback, no_reg);
-
-  __ bind(&miss);
-  LoadIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
-  GenerateImpl(masm, false);
-}
-
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // rdx
-  Register key = LoadWithVectorDescriptor::NameRegister();           // rcx
-  Register vector = LoadWithVectorDescriptor::VectorRegister();      // rbx
-  Register slot = LoadWithVectorDescriptor::SlotRegister();          // rax
-  Register feedback = rdi;
-  Register integer_slot = r8;
-  Register receiver_map = r9;
-
-  __ SmiToInteger32(integer_slot, slot);
-  __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
-                                 FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector,
-                        integer_slot, &compare_map, &load_smi_map, &try_array);
-
-  __ bind(&try_array);
-  // Is it a fixed array?
-  __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &not_array);
-
-  // We have a polymorphic element handler.
-  Label polymorphic, try_poly_name;
-  __ bind(&polymorphic);
-  HandleArrayCases(masm, feedback, receiver_map, integer_slot, r11, r15, true,
-                   &miss);
-
-  __ bind(&not_array);
-  // Is it generic?
-  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
-  __ j(not_equal, &try_poly_name);
-  Handle<Code> megamorphic_stub =
-      KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
-  __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
-
-  __ bind(&try_poly_name);
-  // We might have a name in feedback, and a fixed array in the next slot.
-  __ cmpp(key, feedback);
-  __ j(not_equal, &miss);
-  // If the name comparison succeeded, we know we have a fixed array with
-  // at least one map/handler pair.
-  __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
-                                 FixedArray::kHeaderSize + kPointerSize));
-  HandleArrayCases(masm, feedback, receiver_map, integer_slot, r11, r15, false,
-                   &miss);
-
-  __ bind(&miss);
-  KeyedLoadIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
-  StoreICStub stub(isolate(), state());
-  stub.GenerateForTrampoline(masm);
-}
-
 void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
   KeyedStoreICStub stub(isolate(), state());
   stub.GenerateForTrampoline(masm);
 }
 
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // rdx
-  Register key = StoreWithVectorDescriptor::NameRegister();           // rcx
-  Register vector = StoreWithVectorDescriptor::VectorRegister();      // rbx
-  Register slot = StoreWithVectorDescriptor::SlotRegister();          // rdi
-  DCHECK(StoreWithVectorDescriptor::ValueRegister().is(rax));         // rax
-  Register feedback = r8;
-  Register integer_slot = r9;
-  Register receiver_map = r11;
-  DCHECK(!AreAliased(feedback, integer_slot, vector, slot, receiver_map));
-
-  __ SmiToInteger32(integer_slot, slot);
-  __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
-                                 FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector,
-                        integer_slot, &compare_map, &load_smi_map, &try_array);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &not_array);
-  HandleArrayCases(masm, feedback, receiver_map, integer_slot, r14, r15, true,
-                   &miss);
-
-  __ bind(&not_array);
-  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
-  __ j(not_equal, &miss);
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key,
-                                                     feedback, no_reg);
-
-  __ bind(&miss);
-  StoreIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
 void KeyedStoreICStub::Generate(MacroAssembler* masm) {
   GenerateImpl(masm, false);
 }
@@ -3539,37 +3341,23 @@
   }
 }
 
-
 void ArrayConstructorStub::GenerateDispatchToArrayStub(
-    MacroAssembler* masm,
-    AllocationSiteOverrideMode mode) {
-  if (argument_count() == ANY) {
-    Label not_zero_case, not_one_case;
-    __ testp(rax, rax);
-    __ j(not_zero, &not_zero_case);
-    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+    MacroAssembler* masm, AllocationSiteOverrideMode mode) {
+  Label not_zero_case, not_one_case;
+  __ testp(rax, rax);
+  __ j(not_zero, &not_zero_case);
+  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
 
-    __ bind(&not_zero_case);
-    __ cmpl(rax, Immediate(1));
-    __ j(greater, &not_one_case);
-    CreateArrayDispatchOneArgument(masm, mode);
+  __ bind(&not_zero_case);
+  __ cmpl(rax, Immediate(1));
+  __ j(greater, &not_one_case);
+  CreateArrayDispatchOneArgument(masm, mode);
 
-    __ bind(&not_one_case);
-    ArrayNArgumentsConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else if (argument_count() == NONE) {
-    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-  } else if (argument_count() == ONE) {
-    CreateArrayDispatchOneArgument(masm, mode);
-  } else if (argument_count() == MORE_THAN_ONE) {
-    ArrayNArgumentsConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else {
-    UNREACHABLE();
-  }
+  __ bind(&not_one_case);
+  ArrayNArgumentsConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
 }
 
-
 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax    : argc
@@ -3621,27 +3409,9 @@
 
   // Subclassing
   __ bind(&subclassing);
-  switch (argument_count()) {
-    case ANY:
-    case MORE_THAN_ONE: {
-      StackArgumentsAccessor args(rsp, rax);
-      __ movp(args.GetReceiverOperand(), rdi);
-      __ addp(rax, Immediate(3));
-      break;
-    }
-    case NONE: {
-      StackArgumentsAccessor args(rsp, 0);
-      __ movp(args.GetReceiverOperand(), rdi);
-      __ Set(rax, 3);
-      break;
-    }
-    case ONE: {
-      StackArgumentsAccessor args(rsp, 1);
-      __ movp(args.GetReceiverOperand(), rdi);
-      __ Set(rax, 4);
-      break;
-    }
-  }
+  StackArgumentsAccessor args(rsp, rax);
+  __ movp(args.GetReceiverOperand(), rdi);
+  __ addp(rax, Immediate(3));
   __ PopReturnAddressTo(rcx);
   __ Push(rdx);
   __ Push(rbx);
@@ -4388,129 +4158,6 @@
 }
 
 
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register context_reg = rsi;
-  Register slot_reg = rbx;
-  Register value_reg = rax;
-  Register cell_reg = r8;
-  Register cell_details_reg = rdx;
-  Register cell_value_reg = r9;
-  Label fast_heapobject_case, fast_smi_case, slow_case;
-
-  if (FLAG_debug_code) {
-    __ CompareRoot(value_reg, Heap::kTheHoleValueRootIndex);
-    __ Check(not_equal, kUnexpectedValue);
-  }
-
-  // Go up context chain to the script context.
-  for (int i = 0; i < depth(); ++i) {
-    __ movp(rdi, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
-    context_reg = rdi;
-  }
-
-  // Load the PropertyCell at the specified slot.
-  __ movp(cell_reg, ContextOperand(context_reg, slot_reg));
-
-  // Load PropertyDetails for the cell (actually only the cell_type, kind and
-  // READ_ONLY bit of attributes).
-  __ SmiToInteger32(cell_details_reg,
-                    FieldOperand(cell_reg, PropertyCell::kDetailsOffset));
-  __ andl(cell_details_reg,
-          Immediate(PropertyDetails::PropertyCellTypeField::kMask |
-                    PropertyDetails::KindField::kMask |
-                    PropertyDetails::kAttributesReadOnlyMask));
-
-  // Check if PropertyCell holds mutable data.
-  Label not_mutable_data;
-  __ cmpl(cell_details_reg,
-          Immediate(PropertyDetails::PropertyCellTypeField::encode(
-                        PropertyCellType::kMutable) |
-                    PropertyDetails::KindField::encode(kData)));
-  __ j(not_equal, &not_mutable_data);
-  __ JumpIfSmi(value_reg, &fast_smi_case);
-  __ bind(&fast_heapobject_case);
-  __ movp(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
-  __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
-                      cell_value_reg, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  // RecordWriteField clobbers the value register, so we need to reload.
-  __ movp(value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
-  __ Ret();
-  __ bind(&not_mutable_data);
-
-  // Check if PropertyCell value matches the new value (relevant for Constant,
-  // ConstantType and Undefined cells).
-  Label not_same_value;
-  __ movp(cell_value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
-  __ cmpp(cell_value_reg, value_reg);
-  __ j(not_equal, &not_same_value,
-       FLAG_debug_code ? Label::kFar : Label::kNear);
-  // Make sure the PropertyCell is not marked READ_ONLY.
-  __ testl(cell_details_reg,
-           Immediate(PropertyDetails::kAttributesReadOnlyMask));
-  __ j(not_zero, &slow_case);
-  if (FLAG_debug_code) {
-    Label done;
-    // This can only be true for Constant, ConstantType and Undefined cells,
-    // because we never store the_hole via this stub.
-    __ cmpl(cell_details_reg,
-            Immediate(PropertyDetails::PropertyCellTypeField::encode(
-                          PropertyCellType::kConstant) |
-                      PropertyDetails::KindField::encode(kData)));
-    __ j(equal, &done);
-    __ cmpl(cell_details_reg,
-            Immediate(PropertyDetails::PropertyCellTypeField::encode(
-                          PropertyCellType::kConstantType) |
-                      PropertyDetails::KindField::encode(kData)));
-    __ j(equal, &done);
-    __ cmpl(cell_details_reg,
-            Immediate(PropertyDetails::PropertyCellTypeField::encode(
-                          PropertyCellType::kUndefined) |
-                      PropertyDetails::KindField::encode(kData)));
-    __ Check(equal, kUnexpectedValue);
-    __ bind(&done);
-  }
-  __ Ret();
-  __ bind(&not_same_value);
-
-  // Check if PropertyCell contains data with constant type (and is not
-  // READ_ONLY).
-  __ cmpl(cell_details_reg,
-          Immediate(PropertyDetails::PropertyCellTypeField::encode(
-                        PropertyCellType::kConstantType) |
-                    PropertyDetails::KindField::encode(kData)));
-  __ j(not_equal, &slow_case, Label::kNear);
-
-  // Now either both old and new values must be SMIs or both must be heap
-  // objects with same map.
-  Label value_is_heap_object;
-  __ JumpIfNotSmi(value_reg, &value_is_heap_object, Label::kNear);
-  __ JumpIfNotSmi(cell_value_reg, &slow_case, Label::kNear);
-  // Old and new values are SMIs, no need for a write barrier here.
-  __ bind(&fast_smi_case);
-  __ movp(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
-  __ Ret();
-  __ bind(&value_is_heap_object);
-  __ JumpIfSmi(cell_value_reg, &slow_case, Label::kNear);
-  Register cell_value_map_reg = cell_value_reg;
-  __ movp(cell_value_map_reg,
-          FieldOperand(cell_value_reg, HeapObject::kMapOffset));
-  __ cmpp(cell_value_map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
-  __ j(equal, &fast_heapobject_case);
-
-  // Fallback to the runtime.
-  __ bind(&slow_case);
-  __ Integer32ToSmi(slot_reg, slot_reg);
-  __ PopReturnAddressTo(kScratchRegister);
-  __ Push(slot_reg);
-  __ Push(value_reg);
-  __ Push(kScratchRegister);
-  __ TailCallRuntime(is_strict(language_mode())
-                         ? Runtime::kStoreGlobalViaContext_Strict
-                         : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
-
 static int Offset(ExternalReference ref0, ExternalReference ref1) {
   int64_t offset = (ref0.address() - ref1.address());
   // Check that fits into int.
@@ -4853,7 +4500,7 @@
   __ Push(kScratchRegister);  // return value default
   __ PushAddress(ExternalReference::isolate_address(isolate()));
   __ Push(holder);
-  __ Push(Smi::FromInt(0));  // should_throw_on_error -> false
+  __ Push(Smi::kZero);  // should_throw_on_error -> false
   __ Push(FieldOperand(callback, AccessorInfo::kNameOffset));
   __ PushReturnAddressFrom(scratch);
 
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 35da7a2..9fbf69e 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -66,7 +66,7 @@
 #endif
   DeoptimizationInputData* deopt_data =
       DeoptimizationInputData::cast(code->deoptimization_data());
-  deopt_data->SetSharedFunctionInfo(Smi::FromInt(0));
+  deopt_data->SetSharedFunctionInfo(Smi::kZero);
   // For each LLazyBailout instruction insert a call to the corresponding
   // deoptimization entry.
   for (int i = 0; i < deopt_data->DeoptCount(); i++) {
diff --git a/src/x64/interface-descriptors-x64.cc b/src/x64/interface-descriptors-x64.cc
index 9e48644..3ee4412 100644
--- a/src/x64/interface-descriptors-x64.cc
+++ b/src/x64/interface-descriptors-x64.cc
@@ -29,9 +29,9 @@
 const Register LoadDescriptor::NameRegister() { return rcx; }
 const Register LoadDescriptor::SlotRegister() { return rax; }
 
-
 const Register LoadWithVectorDescriptor::VectorRegister() { return rbx; }
 
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return rdi; }
 
 const Register StoreDescriptor::ReceiverRegister() { return rdx; }
 const Register StoreDescriptor::NameRegister() { return rcx; }
@@ -44,10 +44,6 @@
 const Register StoreTransitionDescriptor::VectorRegister() { return rbx; }
 const Register StoreTransitionDescriptor::MapRegister() { return r11; }
 
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return rbx; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return rax; }
-
-
 const Register StringCompareDescriptor::LeftRegister() { return rdx; }
 const Register StringCompareDescriptor::RightRegister() { return rax; }
 
@@ -158,7 +154,7 @@
 
 void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {rdi, rdx, rbx};
+  Register registers[] = {rdi, rax, rdx, rbx};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -206,13 +202,6 @@
 }
 
 
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {rcx, rbx, rax};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 void TransitionElementsKindDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {rax, rbx};
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 0fd6333..8d70f54 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -676,20 +676,6 @@
   return has_frame_ || !stub->SometimesSetsUpAFrame();
 }
 
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
-  // The assert checks that the constants for the maximum number of digits
-  // for an array index cached in the hash field and the number of bits
-  // reserved for it does not conflict.
-  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
-         (1 << String::kArrayIndexValueBits));
-  if (!hash.is(index)) {
-    movl(index, hash);
-  }
-  DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
-}
-
-
 void MacroAssembler::CallRuntime(const Runtime::Function* f,
                                  int num_arguments,
                                  SaveFPRegsMode save_doubles) {
@@ -2371,7 +2357,7 @@
   Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
 #endif
   STATIC_ASSERT(kSmiTag == 0);
-  DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+  DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
   movl(kScratchRegister, Immediate(kSmiTagMask));
   andp(kScratchRegister, src1);
   testl(kScratchRegister, src2);
@@ -3677,20 +3663,6 @@
        Immediate(static_cast<int8_t>(type)));
 }
 
-
-void MacroAssembler::CheckFastElements(Register map,
-                                       Label* fail,
-                                       Label::Distance distance) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  STATIC_ASSERT(FAST_ELEMENTS == 2);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
-  cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Immediate(Map::kMaximumBitField2FastHoleyElementValue));
-  j(above, fail, distance);
-}
-
-
 void MacroAssembler::CheckFastObjectElements(Register map,
                                              Label* fail,
                                              Label::Distance distance) {
@@ -4686,82 +4658,6 @@
 }
 
 
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
-                                            Register scratch,
-                                            Label* miss) {
-  Label same_contexts;
-
-  DCHECK(!holder_reg.is(scratch));
-  DCHECK(!scratch.is(kScratchRegister));
-  // Load current lexical context from the active StandardFrame, which
-  // may require crawling past STUB frames.
-  Label load_context;
-  Label has_context;
-  movp(scratch, rbp);
-  bind(&load_context);
-  DCHECK(SmiValuesAre32Bits());
-  // This is "JumpIfNotSmi" but without loading the value into a register.
-  cmpl(MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset),
-       Immediate(0));
-  j(not_equal, &has_context);
-  movp(scratch, MemOperand(scratch, CommonFrameConstants::kCallerFPOffset));
-  jmp(&load_context);
-  bind(&has_context);
-  movp(scratch,
-       MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
-
-  // When generating debug code, make sure the lexical context is set.
-  if (emit_debug_code()) {
-    cmpp(scratch, Immediate(0));
-    Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
-  }
-  // Load the native context of the current context.
-  movp(scratch, ContextOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
-
-  // Check the context is a native context.
-  if (emit_debug_code()) {
-    Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
-        isolate()->factory()->native_context_map());
-    Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
-  }
-
-  // Check if both contexts are the same.
-  cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-  j(equal, &same_contexts);
-
-  // Compare security tokens.
-  // Check that the security token in the calling global object is
-  // compatible with the security token in the receiving global
-  // object.
-
-  // Check the context is a native context.
-  if (emit_debug_code()) {
-    // Preserve original value of holder_reg.
-    Push(holder_reg);
-    movp(holder_reg,
-         FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-    CompareRoot(holder_reg, Heap::kNullValueRootIndex);
-    Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
-
-    // Read the first word and compare to native_context_map(),
-    movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
-    CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
-    Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
-    Pop(holder_reg);
-  }
-
-  movp(kScratchRegister,
-       FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-  int token_offset =
-      Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
-  movp(scratch, FieldOperand(scratch, token_offset));
-  cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
-  j(not_equal, miss);
-
-  bind(&same_contexts);
-}
-
-
 // Compute the hash code from the untagged key.  This must be kept in sync with
 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
 // code-stub-hydrogen.cc
@@ -4800,87 +4696,6 @@
   andl(r0, Immediate(0x3fffffff));
 }
 
-
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
-                                              Register elements,
-                                              Register key,
-                                              Register r0,
-                                              Register r1,
-                                              Register r2,
-                                              Register result) {
-  // Register use:
-  //
-  // elements - holds the slow-case elements of the receiver on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // key      - holds the smi key on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // Scratch registers:
-  //
-  // r0 - holds the untagged key on entry and holds the hash once computed.
-  //
-  // r1 - used to hold the capacity mask of the dictionary
-  //
-  // r2 - used for the index into the dictionary.
-  //
-  // result - holds the result on exit if the load succeeded.
-  //          Allowed to be the same as 'key' or 'result'.
-  //          Unchanged on bailout so 'key' or 'result' can be used
-  //          in further computation.
-
-  Label done;
-
-  GetNumberHash(r0, r1);
-
-  // Compute capacity mask.
-  SmiToInteger32(r1, FieldOperand(elements,
-                                  SeededNumberDictionary::kCapacityOffset));
-  decl(r1);
-
-  // Generate an unrolled loop that performs a few probes before giving up.
-  for (int i = 0; i < kNumberDictionaryProbes; i++) {
-    // Use r2 for index calculations and keep the hash intact in r0.
-    movp(r2, r0);
-    // Compute the masked index: (hash + i + i * i) & mask.
-    if (i > 0) {
-      addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
-    }
-    andp(r2, r1);
-
-    // Scale the index by multiplying by the entry size.
-    DCHECK(SeededNumberDictionary::kEntrySize == 3);
-    leap(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
-
-    // Check if the key matches.
-    cmpp(key, FieldOperand(elements,
-                           r2,
-                           times_pointer_size,
-                           SeededNumberDictionary::kElementsStartOffset));
-    if (i != (kNumberDictionaryProbes - 1)) {
-      j(equal, &done);
-    } else {
-      j(not_equal, miss);
-    }
-  }
-
-  bind(&done);
-  // Check that the value is a field property.
-  const int kDetailsOffset =
-      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  DCHECK_EQ(DATA, 0);
-  Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
-       Smi::FromInt(PropertyDetails::TypeField::kMask));
-  j(not_zero, miss);
-
-  // Get the value at the masked, scaled index.
-  const int kValueOffset =
-      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
-  movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
 void MacroAssembler::LoadAllocationTopHelper(Register result,
                                              Register scratch,
                                              AllocationFlags flags) {
@@ -5283,93 +5098,6 @@
   STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
 }
 
-
-// Copy memory, byte-by-byte, from source to destination.  Not optimized for
-// long or aligned copies.  The contents of scratch and length are destroyed.
-// Destination is incremented by length, source, length and scratch are
-// clobbered.
-// A simpler loop is faster on small copies, but slower on large ones.
-// The cld() instruction must have been emitted, to set the direction flag(),
-// before calling this function.
-void MacroAssembler::CopyBytes(Register destination,
-                               Register source,
-                               Register length,
-                               int min_length,
-                               Register scratch) {
-  DCHECK(min_length >= 0);
-  if (emit_debug_code()) {
-    cmpl(length, Immediate(min_length));
-    Assert(greater_equal, kInvalidMinLength);
-  }
-  Label short_loop, len8, len16, len24, done, short_string;
-
-  const int kLongStringLimit = 4 * kPointerSize;
-  if (min_length <= kLongStringLimit) {
-    cmpl(length, Immediate(kPointerSize));
-    j(below, &short_string, Label::kNear);
-  }
-
-  DCHECK(source.is(rsi));
-  DCHECK(destination.is(rdi));
-  DCHECK(length.is(rcx));
-
-  if (min_length <= kLongStringLimit) {
-    cmpl(length, Immediate(2 * kPointerSize));
-    j(below_equal, &len8, Label::kNear);
-    cmpl(length, Immediate(3 * kPointerSize));
-    j(below_equal, &len16, Label::kNear);
-    cmpl(length, Immediate(4 * kPointerSize));
-    j(below_equal, &len24, Label::kNear);
-  }
-
-  // Because source is 8-byte aligned in our uses of this function,
-  // we keep source aligned for the rep movs operation by copying the odd bytes
-  // at the end of the ranges.
-  movp(scratch, length);
-  shrl(length, Immediate(kPointerSizeLog2));
-  repmovsp();
-  // Move remaining bytes of length.
-  andl(scratch, Immediate(kPointerSize - 1));
-  movp(length, Operand(source, scratch, times_1, -kPointerSize));
-  movp(Operand(destination, scratch, times_1, -kPointerSize), length);
-  addp(destination, scratch);
-
-  if (min_length <= kLongStringLimit) {
-    jmp(&done, Label::kNear);
-    bind(&len24);
-    movp(scratch, Operand(source, 2 * kPointerSize));
-    movp(Operand(destination, 2 * kPointerSize), scratch);
-    bind(&len16);
-    movp(scratch, Operand(source, kPointerSize));
-    movp(Operand(destination, kPointerSize), scratch);
-    bind(&len8);
-    movp(scratch, Operand(source, 0));
-    movp(Operand(destination, 0), scratch);
-    // Move remaining bytes of length.
-    movp(scratch, Operand(source, length, times_1, -kPointerSize));
-    movp(Operand(destination, length, times_1, -kPointerSize), scratch);
-    addp(destination, length);
-    jmp(&done, Label::kNear);
-
-    bind(&short_string);
-    if (min_length == 0) {
-      testl(length, length);
-      j(zero, &done, Label::kNear);
-    }
-
-    bind(&short_loop);
-    movb(scratch, Operand(source, 0));
-    movb(Operand(destination, 0), scratch);
-    incp(source);
-    incp(destination);
-    decl(length);
-    j(not_zero, &short_loop, Label::kNear);
-  }
-
-  bind(&done);
-}
-
-
 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
                                                 Register end_address,
                                                 Register filler) {
@@ -5503,7 +5231,7 @@
   SmiCompare(index, FieldOperand(string, String::kLengthOffset));
   Check(less, kIndexIsTooLarge);
 
-  SmiCompare(index, Smi::FromInt(0));
+  SmiCompare(index, Smi::kZero);
   Check(greater_equal, kIndexIsNegative);
 
   // Restore the index
@@ -5708,7 +5436,7 @@
 
   // For all objects but the receiver, check that the cache is empty.
   EnumLength(rdx, rbx);
-  Cmp(rdx, Smi::FromInt(0));
+  Cmp(rdx, Smi::kZero);
   j(not_equal, call_runtime);
 
   bind(&start);
@@ -5741,20 +5469,21 @@
   ExternalReference new_space_allocation_top =
       ExternalReference::new_space_allocation_top_address(isolate());
   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
-  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+  const int kMementoLastWordOffset =
+      kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
 
   // Bail out if the object is not in new space.
   JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
   // If the object is in new space, we need to check whether it is on the same
   // page as the current top.
-  leap(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+  leap(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
   xorp(scratch_reg, ExternalOperand(new_space_allocation_top));
   testp(scratch_reg, Immediate(~Page::kPageAlignmentMask));
   j(zero, &top_check);
   // The object is on a different page than allocation top. Bail out if the
   // object sits on the page boundary as no memento can follow and we cannot
   // touch the memory following it.
-  leap(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+  leap(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
   xorp(scratch_reg, receiver_reg);
   testp(scratch_reg, Immediate(~Page::kPageAlignmentMask));
   j(not_zero, no_memento_found);
@@ -5763,9 +5492,9 @@
   // If top is on the same page as the current object, we need to check whether
   // we are below top.
   bind(&top_check);
-  leap(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+  leap(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
   cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
-  j(greater, no_memento_found);
+  j(greater_equal, no_memento_found);
   // Memento map check.
   bind(&map_check);
   CompareRoot(MemOperand(receiver_reg, kMementoMapOffset),
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index a8d0c60..f085509 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -1112,12 +1112,6 @@
   // Always use unsigned comparisons: above and below, not less and greater.
   void CmpInstanceType(Register map, InstanceType type);
 
-  // Check if a map for a JSObject indicates that the object has fast elements.
-  // Jump to the specified label if it does not.
-  void CheckFastElements(Register map,
-                         Label* fail,
-                         Label::Distance distance = Label::kFar);
-
   // Check if a map for a JSObject indicates that the object can have both smi
   // and HeapObject elements.  Jump to the specified label if it does not.
   void CheckFastObjectElements(Register map,
@@ -1295,25 +1289,8 @@
   // ---------------------------------------------------------------------------
   // Inline caching support
 
-  // Generate code for checking access rights - used for security checks
-  // on access to global objects across environments. The holder register
-  // is left untouched, but the scratch register and kScratchRegister,
-  // which must be different, are clobbered.
-  void CheckAccessGlobalProxy(Register holder_reg,
-                              Register scratch,
-                              Label* miss);
-
   void GetNumberHash(Register r0, Register scratch);
 
-  void LoadFromNumberDictionary(Label* miss,
-                                Register elements,
-                                Register key,
-                                Register r0,
-                                Register r1,
-                                Register r2,
-                                Register result);
-
-
   // ---------------------------------------------------------------------------
   // Allocation support
 
@@ -1430,12 +1407,6 @@
   // clobbered.
   void TryGetFunctionPrototype(Register function, Register result, Label* miss);
 
-  // Picks out an array index from the hash field.
-  // Register use:
-  //   hash - holds the index's hash. Clobbered.
-  //   index - holds the overwritten index on exit.
-  void IndexFromHash(Register hash, Register index);
-
   // Find the function context up the context chain.
   void LoadContext(Register dst, int context_chain_length);
 
@@ -1549,18 +1520,6 @@
     return code_object_;
   }
 
-  // Copy length bytes from source to destination.
-  // Uses scratch register internally (if you have a low-eight register
-  // free, do use it, otherwise kScratchRegister will be used).
-  // The min_length is a minimum limit on the value that length will have.
-  // The algorithm has some special cases that might be omitted if the string
-  // is known to always be long.
-  void CopyBytes(Register destination,
-                 Register source,
-                 Register length,
-                 int min_length = 0,
-                 Register scratch = kScratchRegister);
-
   // Initialize fields with filler values.  Fields starting at |current_address|
   // not including |end_address| are overwritten with the value in |filler|.  At
   // the end the loop, |current_address| takes the value of |end_address|.
diff --git a/src/x87/assembler-x87.cc b/src/x87/assembler-x87.cc
index 62b662f..eb8dafa 100644
--- a/src/x87/assembler-x87.cc
+++ b/src/x87/assembler-x87.cc
@@ -2052,8 +2052,9 @@
 
   // Some internal data structures overflow for very large buffers,
   // they must ensure that kMaximalBufferSize is not too large.
-  if ((desc.buffer_size > kMaximalBufferSize) ||
-      (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) {
+  if (desc.buffer_size > kMaximalBufferSize ||
+      static_cast<size_t>(desc.buffer_size) >
+          isolate()->heap()->MaxOldGenerationSize()) {
     V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
   }
 
diff --git a/src/x87/assembler-x87.h b/src/x87/assembler-x87.h
index 4111e8d..160145b 100644
--- a/src/x87/assembler-x87.h
+++ b/src/x87/assembler-x87.h
@@ -969,7 +969,8 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+  void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+                         int id);
 
   // Writes a single byte or word of data in the code stream.  Used for
   // inline tables, e.g., jump-tables.
diff --git a/src/x87/code-stubs-x87.cc b/src/x87/code-stubs-x87.cc
index e70cbad..0ea919d 100644
--- a/src/x87/code-stubs-x87.cc
+++ b/src/x87/code-stubs-x87.cc
@@ -631,14 +631,10 @@
   __ add(edx, Immediate(2));  // edx was a smi.
 
   // edx: Number of capture registers
-  // Load last_match_info which is still known to be a fast-elements JSObject.
-  // Check that the fourth object is a JSObject.
-  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
-  __ JumpIfSmi(eax, &runtime);
-  __ CmpObjectType(eax, JS_OBJECT_TYPE, ebx);
-  __ j(not_equal, &runtime);
+  // Check that the last match info is a FixedArray.
+  __ mov(ebx, Operand(esp, kLastMatchInfoOffset));
+  __ JumpIfSmi(ebx, &runtime);
   // Check that the object has fast elements.
-  __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
   __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
   __ cmp(eax, factory->fixed_array_map());
   __ j(not_equal, &runtime);
@@ -646,7 +642,7 @@
   // additional information.
   __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
   __ SmiUntag(eax);
-  __ sub(eax, Immediate(RegExpImpl::kLastMatchOverhead));
+  __ sub(eax, Immediate(RegExpMatchInfo::kLastMatchOverhead));
   __ cmp(edx, eax);
   __ j(greater, &runtime);
 
@@ -654,17 +650,17 @@
   // edx: number of capture registers
   // Store the capture count.
   __ SmiTag(edx);  // Number of capture registers to smi.
-  __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
+  __ mov(FieldOperand(ebx, RegExpMatchInfo::kNumberOfCapturesOffset), edx);
   __ SmiUntag(edx);  // Number of capture registers back from smi.
   // Store last subject and last input.
   __ mov(eax, Operand(esp, kSubjectOffset));
   __ mov(ecx, eax);
-  __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
-  __ RecordWriteField(ebx, RegExpImpl::kLastSubjectOffset, eax, edi,
+  __ mov(FieldOperand(ebx, RegExpMatchInfo::kLastSubjectOffset), eax);
+  __ RecordWriteField(ebx, RegExpMatchInfo::kLastSubjectOffset, eax, edi,
                       kDontSaveFPRegs);
   __ mov(eax, ecx);
-  __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
-  __ RecordWriteField(ebx, RegExpImpl::kLastInputOffset, eax, edi,
+  __ mov(FieldOperand(ebx, RegExpMatchInfo::kLastInputOffset), eax);
+  __ RecordWriteField(ebx, RegExpMatchInfo::kLastInputOffset, eax, edi,
                       kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
@@ -677,7 +673,7 @@
   // edx: number of capture registers
   Label next_capture, done;
   // Capture register counter starts from number of capture registers and
-  // counts down until wraping after zero.
+  // counts down until wrapping after zero.
   __ bind(&next_capture);
   __ sub(edx, Immediate(1));
   __ j(negative, &done, Label::kNear);
@@ -685,16 +681,14 @@
   __ mov(edi, Operand(ecx, edx, times_int_size, 0));
   __ SmiTag(edi);
   // Store the smi value in the last match info.
-  __ mov(FieldOperand(ebx,
-                      edx,
-                      times_pointer_size,
-                      RegExpImpl::kFirstCaptureOffset),
-                      edi);
+  __ mov(FieldOperand(ebx, edx, times_pointer_size,
+                      RegExpMatchInfo::kFirstCaptureOffset),
+         edi);
   __ jmp(&next_capture);
   __ bind(&done);
 
   // Return last match info.
-  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+  __ mov(eax, ebx);
   __ ret(4 * kPointerSize);
 
   // Do the runtime call to execute the regexp.
@@ -877,7 +871,7 @@
     // If either is a Smi (we know that not both are), then they can only
     // be equal if the other is a HeapNumber. If so, use the slow case.
     STATIC_ASSERT(kSmiTag == 0);
-    DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+    DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
     __ mov(ecx, Immediate(kSmiTagMask));
     __ and_(ecx, eax);
     __ test(ecx, edx);
@@ -1258,6 +1252,7 @@
 }
 
 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+  // eax - number of arguments
   // edi - function
   // edx - slot id
   // ebx - vector
@@ -1265,7 +1260,6 @@
   __ cmp(edi, ecx);
   __ j(not_equal, miss);
 
-  __ mov(eax, arg_count());
   // Reload ecx.
   __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
                            FixedArray::kHeaderSize));
@@ -1275,7 +1269,7 @@
 
   __ mov(ebx, ecx);
   __ mov(edx, edi);
-  ArrayConstructorStub stub(masm->isolate(), arg_count());
+  ArrayConstructorStub stub(masm->isolate());
   __ TailCallStub(&stub);
 
   // Unreachable.
@@ -1283,13 +1277,12 @@
 
 
 void CallICStub::Generate(MacroAssembler* masm) {
+  // edi - number of arguments
   // edi - function
   // edx - slot id
   // ebx - vector
   Isolate* isolate = masm->isolate();
   Label extra_checks_or_miss, call, call_function, call_count_incremented;
-  int argc = arg_count();
-  ParameterCount actual(argc);
 
   // The checks. First, does edi match the recorded monomorphic target?
   __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
@@ -1321,7 +1314,6 @@
   // Increment the call count for monomorphic function calls.
   IncrementCallCount(masm, ebx, edx);
 
-  __ Set(eax, argc);
   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
                                                     tail_call_mode()),
           RelocInfo::CODE_TARGET);
@@ -1367,7 +1359,6 @@
 
   __ bind(&call_count_incremented);
 
-  __ Set(eax, argc);
   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
@@ -1393,12 +1384,15 @@
   __ j(not_equal, &miss);
 
   // Store the function. Use a stub since we need a frame for allocation.
+  // eax - number of arguments
   // ebx - vector
   // edx - slot
   // edi - function
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     CreateWeakCellStub create_stub(isolate);
+    __ SmiTag(eax);
+    __ push(eax);
     __ push(ebx);
     __ push(edx);
     __ push(edi);
@@ -1408,6 +1402,8 @@
     __ pop(edi);
     __ pop(edx);
     __ pop(ebx);
+    __ pop(eax);
+    __ SmiUntag(eax);
   }
 
   __ jmp(&call_function);
@@ -1427,6 +1423,10 @@
 void CallICStub::GenerateMiss(MacroAssembler* masm) {
   FrameScope scope(masm, StackFrame::INTERNAL);
 
+  // Preserve the number of arguments.
+  __ SmiTag(eax);
+  __ push(eax);
+
   // Push the function and feedback info.
   __ push(edi);
   __ push(ebx);
@@ -1437,6 +1437,10 @@
 
   // Move result to edi and exit the internal frame.
   __ mov(edi, eax);
+
+  // Restore number of arguments.
+  __ pop(eax);
+  __ SmiUntag(eax);
 }
 
 
@@ -2830,254 +2834,12 @@
   __ jmp(ecx);  // Return to IC Miss stub, continuation still on stack.
 }
 
-
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate());
-  stub.GenerateForTrampoline(masm);
-}
-
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate());
-  stub.GenerateForTrampoline(masm);
-}
-
-
-static void HandleArrayCases(MacroAssembler* masm, Register receiver,
-                             Register key, Register vector, Register slot,
-                             Register feedback, bool is_polymorphic,
-                             Label* miss) {
-  // feedback initially contains the feedback array
-  Label next, next_loop, prepare_next;
-  Label load_smi_map, compare_map;
-  Label start_polymorphic;
-
-  __ push(receiver);
-  __ push(vector);
-
-  Register receiver_map = receiver;
-  Register cached_map = vector;
-
-  // Receiver might not be a heap object.
-  __ JumpIfSmi(receiver, &load_smi_map);
-  __ mov(receiver_map, FieldOperand(receiver, 0));
-  __ bind(&compare_map);
-  __ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
-
-  // A named keyed load might have a 2 element array, all other cases can count
-  // on an array with at least 2 {map, handler} pairs, so they can go right
-  // into polymorphic array handling.
-  __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
-  __ j(not_equal, is_polymorphic ? &start_polymorphic : &next);
-
-  // found, now call handler.
-  Register handler = feedback;
-  __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
-  __ pop(vector);
-  __ pop(receiver);
-  __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
-  __ jmp(handler);
-
-  if (!is_polymorphic) {
-    __ bind(&next);
-    __ cmp(FieldOperand(feedback, FixedArray::kLengthOffset),
-           Immediate(Smi::FromInt(2)));
-    __ j(not_equal, &start_polymorphic);
-    __ pop(vector);
-    __ pop(receiver);
-    __ jmp(miss);
-  }
-
-  // Polymorphic, we have to loop from 2 to N
-  __ bind(&start_polymorphic);
-  __ push(key);
-  Register counter = key;
-  __ mov(counter, Immediate(Smi::FromInt(2)));
-  __ bind(&next_loop);
-  __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
-                                  FixedArray::kHeaderSize));
-  __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
-  __ j(not_equal, &prepare_next);
-  __ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size,
-                               FixedArray::kHeaderSize + kPointerSize));
-  __ pop(key);
-  __ pop(vector);
-  __ pop(receiver);
-  __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
-  __ jmp(handler);
-
-  __ bind(&prepare_next);
-  __ add(counter, Immediate(Smi::FromInt(2)));
-  __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
-  __ j(less, &next_loop);
-
-  // We exhausted our array of map handler pairs.
-  __ pop(key);
-  __ pop(vector);
-  __ pop(receiver);
-  __ jmp(miss);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
-                                  Register key, Register vector, Register slot,
-                                  Register weak_cell, Label* miss) {
-  // feedback initially contains the feedback array
-  Label compare_smi_map;
-
-  // Move the weak map into the weak_cell register.
-  Register ic_map = weak_cell;
-  __ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
-
-  // Receiver might not be a heap object.
-  __ JumpIfSmi(receiver, &compare_smi_map);
-  __ cmp(ic_map, FieldOperand(receiver, 0));
-  __ j(not_equal, miss);
-  Register handler = weak_cell;
-  __ mov(handler, FieldOperand(vector, slot, times_half_pointer_size,
-                               FixedArray::kHeaderSize + kPointerSize));
-  __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
-  __ jmp(handler);
-
-  // In microbenchmarks, it made sense to unroll this code so that the call to
-  // the handler is duplicated for a HeapObject receiver and a Smi receiver.
-  __ bind(&compare_smi_map);
-  __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, miss);
-  __ mov(handler, FieldOperand(vector, slot, times_half_pointer_size,
-                               FixedArray::kHeaderSize + kPointerSize));
-  __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
-  __ jmp(handler);
-}
-
-
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // edx
-  Register name = LoadWithVectorDescriptor::NameRegister();          // ecx
-  Register vector = LoadWithVectorDescriptor::VectorRegister();      // ebx
-  Register slot = LoadWithVectorDescriptor::SlotRegister();          // eax
-  Register scratch = edi;
-  __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
-                               FixedArray::kHeaderSize));
-
-  // Is it a weak cell?
-  Label try_array;
-  Label not_array, smi_key, key_okay, miss;
-  __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
-  __ j(not_equal, &try_array);
-  HandleMonomorphicCase(masm, receiver, name, vector, slot, scratch, &miss);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &not_array);
-  HandleArrayCases(masm, receiver, name, vector, slot, scratch, true, &miss);
-
-  __ bind(&not_array);
-  __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
-  __ j(not_equal, &miss);
-  __ push(slot);
-  __ push(vector);
-  masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, name,
-                                                    vector, scratch);
-  __ pop(vector);
-  __ pop(slot);
-
-  __ bind(&miss);
-  LoadIC::GenerateMiss(masm);
-}
-
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
-  GenerateImpl(masm, false);
-}
-
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // edx
-  Register key = LoadWithVectorDescriptor::NameRegister();           // ecx
-  Register vector = LoadWithVectorDescriptor::VectorRegister();      // ebx
-  Register slot = LoadWithVectorDescriptor::SlotRegister();          // eax
-  Register feedback = edi;
-  __ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
-                                FixedArray::kHeaderSize));
-  // Is it a weak cell?
-  Label try_array;
-  Label not_array, smi_key, key_okay, miss;
-  __ CompareRoot(FieldOperand(feedback, 0), Heap::kWeakCellMapRootIndex);
-  __ j(not_equal, &try_array);
-  HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, &miss);
-
-  __ bind(&try_array);
-  // Is it a fixed array?
-  __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &not_array);
-
-  // We have a polymorphic element handler.
-  Label polymorphic, try_poly_name;
-  __ bind(&polymorphic);
-  HandleArrayCases(masm, receiver, key, vector, slot, feedback, true, &miss);
-
-  __ bind(&not_array);
-  // Is it generic?
-  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
-  __ j(not_equal, &try_poly_name);
-  Handle<Code> megamorphic_stub =
-      KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
-  __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
-
-  __ bind(&try_poly_name);
-  // We might have a name in feedback, and a fixed array in the next slot.
-  __ cmp(key, feedback);
-  __ j(not_equal, &miss);
-  // If the name comparison succeeded, we know we have a fixed array with
-  // at least one map/handler pair.
-  __ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
-                                FixedArray::kHeaderSize + kPointerSize));
-  HandleArrayCases(masm, receiver, key, vector, slot, feedback, false, &miss);
-
-  __ bind(&miss);
-  KeyedLoadIC::GenerateMiss(masm);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
-  StoreICStub stub(isolate(), state());
-  stub.GenerateForTrampoline(masm);
-}
-
 void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
   KeyedStoreICStub stub(isolate(), state());
   stub.GenerateForTrampoline(masm);
 }
 
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
 // value is on the stack already.
 static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
                                        Register key, Register vector,
@@ -3194,63 +2956,6 @@
   __ jmp(weak_cell);
 }
 
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // edx
-  Register key = StoreWithVectorDescriptor::NameRegister();           // ecx
-  Register value = StoreWithVectorDescriptor::ValueRegister();        // eax
-  Register vector = StoreWithVectorDescriptor::VectorRegister();      // ebx
-  Register slot = StoreWithVectorDescriptor::SlotRegister();          // edi
-  Label miss;
-
-  if (StoreWithVectorDescriptor::kPassLastArgsOnStack) {
-    // Current stack layout:
-    // - esp[8]    -- value
-    // - esp[4]    -- slot
-    // - esp[0]    -- return address
-    STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2);
-    STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
-    if (in_frame) {
-      __ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor");
-      // If the vector is not on the stack, then insert the vector beneath
-      // return address in order to prepare for calling handler with
-      // StoreWithVector calling convention.
-      __ push(Operand(esp, 0));
-      __ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister());
-      __ RecordComment("]");
-    } else {
-      __ mov(vector, Operand(esp, 1 * kPointerSize));
-    }
-    __ mov(slot, Operand(esp, 2 * kPointerSize));
-  }
-
-  Register scratch = value;
-  __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
-                               FixedArray::kHeaderSize));
-
-  // Is it a weak cell?
-  Label try_array;
-  Label not_array, smi_key, key_okay;
-  __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
-  __ j(not_equal, &try_array);
-  HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &not_array);
-  HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, true,
-                             &miss);
-
-  __ bind(&not_array);
-  __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
-  __ j(not_equal, &miss);
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, slot,
-                                                     no_reg);
-  __ bind(&miss);
-  StoreIC::GenerateMiss(masm);
-}
-
 void KeyedStoreICStub::Generate(MacroAssembler* masm) {
   GenerateImpl(masm, false);
 }
@@ -3299,7 +3004,7 @@
   // - esp[12]   -- value
   // - receiver, key, handler in registers.
   Register counter = key;
-  __ mov(counter, Immediate(Smi::FromInt(0)));
+  __ mov(counter, Immediate(Smi::kZero));
   __ bind(&next_loop);
   __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
                                   FixedArray::kHeaderSize));
@@ -3634,30 +3339,19 @@
 
 void ArrayConstructorStub::GenerateDispatchToArrayStub(
     MacroAssembler* masm, AllocationSiteOverrideMode mode) {
-  if (argument_count() == ANY) {
-    Label not_zero_case, not_one_case;
-    __ test(eax, eax);
-    __ j(not_zero, &not_zero_case);
-    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+  Label not_zero_case, not_one_case;
+  __ test(eax, eax);
+  __ j(not_zero, &not_zero_case);
+  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
 
-    __ bind(&not_zero_case);
-    __ cmp(eax, 1);
-    __ j(greater, &not_one_case);
-    CreateArrayDispatchOneArgument(masm, mode);
+  __ bind(&not_zero_case);
+  __ cmp(eax, 1);
+  __ j(greater, &not_one_case);
+  CreateArrayDispatchOneArgument(masm, mode);
 
-    __ bind(&not_one_case);
-    ArrayNArgumentsConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else if (argument_count() == NONE) {
-    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-  } else if (argument_count() == ONE) {
-    CreateArrayDispatchOneArgument(masm, mode);
-  } else if (argument_count() == MORE_THAN_ONE) {
-    ArrayNArgumentsConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else {
-    UNREACHABLE();
-  }
+  __ bind(&not_one_case);
+  ArrayNArgumentsConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
 }
 
 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
@@ -3711,21 +3405,8 @@
 
   // Subclassing.
   __ bind(&subclassing);
-  switch (argument_count()) {
-    case ANY:
-    case MORE_THAN_ONE:
-      __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
-      __ add(eax, Immediate(3));
-      break;
-    case NONE:
-      __ mov(Operand(esp, 1 * kPointerSize), edi);
-      __ mov(eax, Immediate(3));
-      break;
-    case ONE:
-      __ mov(Operand(esp, 2 * kPointerSize), edi);
-      __ mov(eax, Immediate(4));
-      break;
-  }
+  __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+  __ add(eax, Immediate(3));
   __ PopReturnAddressTo(ecx);
   __ Push(edx);
   __ Push(ebx);
@@ -4004,8 +3685,7 @@
     __ mov(ecx, isolate()->factory()->empty_fixed_array());
     __ mov(FieldOperand(eax, JSArray::kPropertiesOffset), ecx);
     __ mov(FieldOperand(eax, JSArray::kElementsOffset), ecx);
-    __ mov(FieldOperand(eax, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(0)));
+    __ mov(FieldOperand(eax, JSArray::kLengthOffset), Immediate(Smi::kZero));
     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
     __ Ret();
 
@@ -4046,7 +3726,7 @@
     __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
     {
       Label loop, done_loop;
-      __ Move(ecx, Smi::FromInt(0));
+      __ Move(ecx, Smi::kZero);
       __ bind(&loop);
       __ cmp(ecx, eax);
       __ j(equal, &done_loop, Label::kNear);
@@ -4433,7 +4113,7 @@
   __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
   {
     Label loop, done_loop;
-    __ Move(ecx, Smi::FromInt(0));
+    __ Move(ecx, Smi::kZero);
     __ bind(&loop);
     __ cmp(ecx, eax);
     __ j(equal, &done_loop, Label::kNear);
@@ -4495,129 +4175,6 @@
   __ TailCallRuntime(Runtime::kNewStrictArguments);
 }
 
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register context_reg = esi;
-  Register slot_reg = ebx;
-  Register value_reg = eax;
-  Register cell_reg = edi;
-  Register cell_details_reg = edx;
-  Register cell_value_reg = ecx;
-  Label fast_heapobject_case, fast_smi_case, slow_case;
-
-  if (FLAG_debug_code) {
-    __ CompareRoot(value_reg, Heap::kTheHoleValueRootIndex);
-    __ Check(not_equal, kUnexpectedValue);
-  }
-
-  // Go up context chain to the script context.
-  for (int i = 0; i < depth(); ++i) {
-    __ mov(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
-    context_reg = cell_reg;
-  }
-
-  // Load the PropertyCell at the specified slot.
-  __ mov(cell_reg, ContextOperand(context_reg, slot_reg));
-
-  // Load PropertyDetails for the cell (actually only the cell_type and kind).
-  __ mov(cell_details_reg,
-         FieldOperand(cell_reg, PropertyCell::kDetailsOffset));
-  __ SmiUntag(cell_details_reg);
-  __ and_(cell_details_reg,
-          Immediate(PropertyDetails::PropertyCellTypeField::kMask |
-                    PropertyDetails::KindField::kMask |
-                    PropertyDetails::kAttributesReadOnlyMask));
-
-  // Check if PropertyCell holds mutable data.
-  Label not_mutable_data;
-  __ cmp(cell_details_reg,
-         Immediate(PropertyDetails::PropertyCellTypeField::encode(
-                       PropertyCellType::kMutable) |
-                   PropertyDetails::KindField::encode(kData)));
-  __ j(not_equal, &not_mutable_data);
-  __ JumpIfSmi(value_reg, &fast_smi_case);
-  __ bind(&fast_heapobject_case);
-  __ mov(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
-  __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
-                      cell_details_reg, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  // RecordWriteField clobbers the value register, so we need to reload.
-  __ mov(value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
-  __ Ret();
-  __ bind(&not_mutable_data);
-
-  // Check if PropertyCell value matches the new value (relevant for Constant,
-  // ConstantType and Undefined cells).
-  Label not_same_value;
-  __ mov(cell_value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
-  __ cmp(cell_value_reg, value_reg);
-  __ j(not_equal, &not_same_value,
-       FLAG_debug_code ? Label::kFar : Label::kNear);
-  // Make sure the PropertyCell is not marked READ_ONLY.
-  __ test(cell_details_reg,
-          Immediate(PropertyDetails::kAttributesReadOnlyMask));
-  __ j(not_zero, &slow_case);
-  if (FLAG_debug_code) {
-    Label done;
-    // This can only be true for Constant, ConstantType and Undefined cells,
-    // because we never store the_hole via this stub.
-    __ cmp(cell_details_reg,
-           Immediate(PropertyDetails::PropertyCellTypeField::encode(
-                         PropertyCellType::kConstant) |
-                     PropertyDetails::KindField::encode(kData)));
-    __ j(equal, &done);
-    __ cmp(cell_details_reg,
-           Immediate(PropertyDetails::PropertyCellTypeField::encode(
-                         PropertyCellType::kConstantType) |
-                     PropertyDetails::KindField::encode(kData)));
-    __ j(equal, &done);
-    __ cmp(cell_details_reg,
-           Immediate(PropertyDetails::PropertyCellTypeField::encode(
-                         PropertyCellType::kUndefined) |
-                     PropertyDetails::KindField::encode(kData)));
-    __ Check(equal, kUnexpectedValue);
-    __ bind(&done);
-  }
-  __ Ret();
-  __ bind(&not_same_value);
-
-  // Check if PropertyCell contains data with constant type (and is not
-  // READ_ONLY).
-  __ cmp(cell_details_reg,
-         Immediate(PropertyDetails::PropertyCellTypeField::encode(
-                       PropertyCellType::kConstantType) |
-                   PropertyDetails::KindField::encode(kData)));
-  __ j(not_equal, &slow_case, Label::kNear);
-
-  // Now either both old and new values must be SMIs or both must be heap
-  // objects with same map.
-  Label value_is_heap_object;
-  __ JumpIfNotSmi(value_reg, &value_is_heap_object, Label::kNear);
-  __ JumpIfNotSmi(cell_value_reg, &slow_case, Label::kNear);
-  // Old and new values are SMIs, no need for a write barrier here.
-  __ bind(&fast_smi_case);
-  __ mov(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
-  __ Ret();
-  __ bind(&value_is_heap_object);
-  __ JumpIfSmi(cell_value_reg, &slow_case, Label::kNear);
-  Register cell_value_map_reg = cell_value_reg;
-  __ mov(cell_value_map_reg,
-         FieldOperand(cell_value_reg, HeapObject::kMapOffset));
-  __ cmp(cell_value_map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
-  __ j(equal, &fast_heapobject_case);
-
-  // Fallback to the runtime.
-  __ bind(&slow_case);
-  __ SmiTag(slot_reg);
-  __ Pop(cell_reg);  // Pop return address.
-  __ Push(slot_reg);
-  __ Push(value_reg);
-  __ Push(cell_reg);  // Push return address.
-  __ TailCallRuntime(is_strict(language_mode())
-                         ? Runtime::kStoreGlobalViaContext_Strict
-                         : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
-
 // Generates an Operand for saving parameters after PrepareCallApiFunction.
 static Operand ApiParameterOperand(int index) {
   return Operand(esp, index * kPointerSize);
@@ -4951,7 +4508,7 @@
   __ PushRoot(Heap::kUndefinedValueRootIndex);
   __ push(Immediate(ExternalReference::isolate_address(isolate())));
   __ push(holder);
-  __ push(Immediate(Smi::FromInt(0)));  // should_throw_on_error -> false
+  __ push(Immediate(Smi::kZero));  // should_throw_on_error -> false
   __ push(FieldOperand(callback, AccessorInfo::kNameOffset));
   __ push(scratch);  // Restore return address.
 
diff --git a/src/x87/interface-descriptors-x87.cc b/src/x87/interface-descriptors-x87.cc
index 85b26ca..70b110a 100644
--- a/src/x87/interface-descriptors-x87.cc
+++ b/src/x87/interface-descriptors-x87.cc
@@ -31,6 +31,7 @@
 
 const Register LoadWithVectorDescriptor::VectorRegister() { return ebx; }
 
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return edi; }
 
 const Register StoreDescriptor::ReceiverRegister() { return edx; }
 const Register StoreDescriptor::NameRegister() { return ecx; }
@@ -40,15 +41,9 @@
 const Register StoreWithVectorDescriptor::VectorRegister() { return ebx; }
 
 const Register StoreTransitionDescriptor::SlotRegister() { return no_reg; }
-
 const Register StoreTransitionDescriptor::VectorRegister() { return ebx; }
-
 const Register StoreTransitionDescriptor::MapRegister() { return edi; }
 
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
-
-
 const Register StringCompareDescriptor::LeftRegister() { return edx; }
 const Register StringCompareDescriptor::RightRegister() { return eax; }
 
@@ -159,7 +154,7 @@
 
 void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {edi, edx, ebx};
+  Register registers[] = {edi, eax, edx, ebx};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -208,13 +203,6 @@
 }
 
 
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {ecx, ebx, eax};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
 void TransitionElementsKindDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {eax, ebx};
diff --git a/src/x87/macro-assembler-x87.cc b/src/x87/macro-assembler-x87.cc
index dafe985..ee81a68 100644
--- a/src/x87/macro-assembler-x87.cc
+++ b/src/x87/macro-assembler-x87.cc
@@ -701,20 +701,6 @@
   cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
 }
 
-
-void MacroAssembler::CheckFastElements(Register map,
-                                       Label* fail,
-                                       Label::Distance distance) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  STATIC_ASSERT(FAST_ELEMENTS == 2);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
-  cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Immediate(Map::kMaximumBitField2FastHoleyElementValue));
-  j(above, fail, distance);
-}
-
-
 void MacroAssembler::CheckFastObjectElements(Register map,
                                              Label* fail,
                                              Label::Distance distance) {
@@ -1238,79 +1224,6 @@
 }
 
 
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
-                                            Register scratch1,
-                                            Register scratch2,
-                                            Label* miss) {
-  Label same_contexts;
-
-  DCHECK(!holder_reg.is(scratch1));
-  DCHECK(!holder_reg.is(scratch2));
-  DCHECK(!scratch1.is(scratch2));
-
-  // Load current lexical context from the active StandardFrame, which
-  // may require crawling past STUB frames.
-  Label load_context;
-  Label has_context;
-  mov(scratch2, ebp);
-  bind(&load_context);
-  mov(scratch1,
-      MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
-  JumpIfNotSmi(scratch1, &has_context);
-  mov(scratch2, MemOperand(scratch2, CommonFrameConstants::kCallerFPOffset));
-  jmp(&load_context);
-  bind(&has_context);
-
-  // When generating debug code, make sure the lexical context is set.
-  if (emit_debug_code()) {
-    cmp(scratch1, Immediate(0));
-    Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
-  }
-  // Load the native context of the current context.
-  mov(scratch1, ContextOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
-
-  // Check the context is a native context.
-  if (emit_debug_code()) {
-    // Read the first word and compare to native_context_map.
-    cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
-        isolate()->factory()->native_context_map());
-    Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
-  }
-
-  // Check if both contexts are the same.
-  cmp(scratch1, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-  j(equal, &same_contexts);
-
-  // Compare security tokens, save holder_reg on the stack so we can use it
-  // as a temporary register.
-  //
-  // Check that the security token in the calling global object is
-  // compatible with the security token in the receiving global
-  // object.
-  mov(scratch2,
-      FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-
-  // Check the context is a native context.
-  if (emit_debug_code()) {
-    cmp(scratch2, isolate()->factory()->null_value());
-    Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
-
-    // Read the first word and compare to native_context_map(),
-    cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
-        isolate()->factory()->native_context_map());
-    Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
-  }
-
-  int token_offset = Context::kHeaderSize +
-                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
-  mov(scratch1, FieldOperand(scratch1, token_offset));
-  cmp(scratch1, FieldOperand(scratch2, token_offset));
-  j(not_equal, miss);
-
-  bind(&same_contexts);
-}
-
-
 // Compute the hash code from the untagged key.  This must be kept in sync with
 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
 // code-stub-hydrogen.cc
@@ -1355,82 +1268,6 @@
   and_(r0, 0x3fffffff);
 }
 
-
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
-                                              Register elements,
-                                              Register key,
-                                              Register r0,
-                                              Register r1,
-                                              Register r2,
-                                              Register result) {
-  // Register use:
-  //
-  // elements - holds the slow-case elements of the receiver and is unchanged.
-  //
-  // key      - holds the smi key on entry and is unchanged.
-  //
-  // Scratch registers:
-  //
-  // r0 - holds the untagged key on entry and holds the hash once computed.
-  //
-  // r1 - used to hold the capacity mask of the dictionary
-  //
-  // r2 - used for the index into the dictionary.
-  //
-  // result - holds the result on exit if the load succeeds and we fall through.
-
-  Label done;
-
-  GetNumberHash(r0, r1);
-
-  // Compute capacity mask.
-  mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
-  shr(r1, kSmiTagSize);  // convert smi to int
-  dec(r1);
-
-  // Generate an unrolled loop that performs a few probes before giving up.
-  for (int i = 0; i < kNumberDictionaryProbes; i++) {
-    // Use r2 for index calculations and keep the hash intact in r0.
-    mov(r2, r0);
-    // Compute the masked index: (hash + i + i * i) & mask.
-    if (i > 0) {
-      add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
-    }
-    and_(r2, r1);
-
-    // Scale the index by multiplying by the entry size.
-    DCHECK(SeededNumberDictionary::kEntrySize == 3);
-    lea(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
-
-    // Check if the key matches.
-    cmp(key, FieldOperand(elements,
-                          r2,
-                          times_pointer_size,
-                          SeededNumberDictionary::kElementsStartOffset));
-    if (i != (kNumberDictionaryProbes - 1)) {
-      j(equal, &done);
-    } else {
-      j(not_equal, miss);
-    }
-  }
-
-  bind(&done);
-  // Check that the value is a field property.
-  const int kDetailsOffset =
-      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  DCHECK_EQ(DATA, 0);
-  test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
-       Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
-  j(not_zero, miss);
-
-  // Get the value at the masked, scaled index.
-  const int kValueOffset =
-      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
-  mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
 void MacroAssembler::LoadAllocationTopHelper(Register result,
                                              Register scratch,
                                              AllocationFlags flags) {
@@ -1912,74 +1749,6 @@
   STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
 }
 
-
-// Copy memory, byte-by-byte, from source to destination.  Not optimized for
-// long or aligned copies.  The contents of scratch and length are destroyed.
-// Source and destination are incremented by length.
-// Many variants of movsb, loop unrolling, word moves, and indexed operands
-// have been tried here already, and this is fastest.
-// A simpler loop is faster on small copies, but 30% slower on large ones.
-// The cld() instruction must have been emitted, to set the direction flag(),
-// before calling this function.
-void MacroAssembler::CopyBytes(Register source,
-                               Register destination,
-                               Register length,
-                               Register scratch) {
-  Label short_loop, len4, len8, len12, done, short_string;
-  DCHECK(source.is(esi));
-  DCHECK(destination.is(edi));
-  DCHECK(length.is(ecx));
-  cmp(length, Immediate(4));
-  j(below, &short_string, Label::kNear);
-
-  // Because source is 4-byte aligned in our uses of this function,
-  // we keep source aligned for the rep_movs call by copying the odd bytes
-  // at the end of the ranges.
-  mov(scratch, Operand(source, length, times_1, -4));
-  mov(Operand(destination, length, times_1, -4), scratch);
-
-  cmp(length, Immediate(8));
-  j(below_equal, &len4, Label::kNear);
-  cmp(length, Immediate(12));
-  j(below_equal, &len8, Label::kNear);
-  cmp(length, Immediate(16));
-  j(below_equal, &len12, Label::kNear);
-
-  mov(scratch, ecx);
-  shr(ecx, 2);
-  rep_movs();
-  and_(scratch, Immediate(0x3));
-  add(destination, scratch);
-  jmp(&done, Label::kNear);
-
-  bind(&len12);
-  mov(scratch, Operand(source, 8));
-  mov(Operand(destination, 8), scratch);
-  bind(&len8);
-  mov(scratch, Operand(source, 4));
-  mov(Operand(destination, 4), scratch);
-  bind(&len4);
-  mov(scratch, Operand(source, 0));
-  mov(Operand(destination, 0), scratch);
-  add(destination, length);
-  jmp(&done, Label::kNear);
-
-  bind(&short_string);
-  test(length, length);
-  j(zero, &done, Label::kNear);
-
-  bind(&short_loop);
-  mov_b(scratch, Operand(source, 0));
-  mov_b(Operand(destination, 0), scratch);
-  inc(source);
-  inc(destination);
-  dec(length);
-  j(not_zero, &short_loop);
-
-  bind(&done);
-}
-
-
 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
                                                 Register end_address,
                                                 Register filler) {
@@ -2094,20 +1863,6 @@
   return has_frame_ || !stub->SometimesSetsUpAFrame();
 }
 
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
-  // The assert checks that the constants for the maximum number of digits
-  // for an array index cached in the hash field and the number of bits
-  // reserved for it does not conflict.
-  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
-         (1 << String::kArrayIndexValueBits));
-  if (!index.is(hash)) {
-    mov(index, hash);
-  }
-  DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
-}
-
-
 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
                                  SaveFPRegsMode save_doubles) {
   // If the expected number of arguments of the runtime function is
@@ -2937,7 +2692,7 @@
   cmp(index, FieldOperand(string, String::kLengthOffset));
   Check(less, kIndexIsTooLarge);
 
-  cmp(index, Immediate(Smi::FromInt(0)));
+  cmp(index, Immediate(Smi::kZero));
   Check(greater_equal, kIndexIsNegative);
 
   // Restore the index
@@ -3190,7 +2945,7 @@
 
   // For all objects but the receiver, check that the cache is empty.
   EnumLength(edx, ebx);
-  cmp(edx, Immediate(Smi::FromInt(0)));
+  cmp(edx, Immediate(Smi::kZero));
   j(not_equal, call_runtime);
 
   bind(&start);
@@ -3222,20 +2977,21 @@
   ExternalReference new_space_allocation_top =
       ExternalReference::new_space_allocation_top_address(isolate());
   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
-  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+  const int kMementoLastWordOffset =
+      kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
 
   // Bail out if the object is not in new space.
   JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
   // If the object is in new space, we need to check whether it is on the same
   // page as the current top.
-  lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+  lea(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
   xor_(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
   test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
   j(zero, &top_check);
   // The object is on a different page than allocation top. Bail out if the
   // object sits on the page boundary as no memento can follow and we cannot
   // touch the memory following it.
-  lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+  lea(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
   xor_(scratch_reg, receiver_reg);
   test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
   j(not_zero, no_memento_found);
@@ -3244,9 +3000,9 @@
   // If top is on the same page as the current object, we need to check whether
   // we are below top.
   bind(&top_check);
-  lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+  lea(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
   cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
-  j(greater, no_memento_found);
+  j(greater_equal, no_memento_found);
   // Memento map check.
   bind(&map_check);
   mov(scratch_reg, Operand(receiver_reg, kMementoMapOffset));
diff --git a/src/x87/macro-assembler-x87.h b/src/x87/macro-assembler-x87.h
index bdd3c03..6bb6359 100644
--- a/src/x87/macro-assembler-x87.h
+++ b/src/x87/macro-assembler-x87.h
@@ -381,11 +381,6 @@
   // Compare instance type for map.
   void CmpInstanceType(Register map, InstanceType type);
 
-  // Check if a map for a JSObject indicates that the object has fast elements.
-  // Jump to the specified label if it does not.
-  void CheckFastElements(Register map, Label* fail,
-                         Label::Distance distance = Label::kFar);
-
   // Check if a map for a JSObject indicates that the object can have both smi
   // and HeapObject elements.  Jump to the specified label if it does not.
   void CheckFastObjectElements(Register map, Label* fail,
@@ -593,18 +588,8 @@
   // ---------------------------------------------------------------------------
   // Inline caching support
 
-  // Generate code for checking access rights - used for security checks
-  // on access to global objects across environments. The holder register
-  // is left untouched, but the scratch register is clobbered.
-  void CheckAccessGlobalProxy(Register holder_reg, Register scratch1,
-                              Register scratch2, Label* miss);
-
   void GetNumberHash(Register r0, Register scratch);
 
-  void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
-                                Register r0, Register r1, Register r2,
-                                Register result);
-
   // ---------------------------------------------------------------------------
   // Allocation support
 
@@ -674,12 +659,6 @@
   void AllocateJSValue(Register result, Register constructor, Register value,
                        Register scratch, Label* gc_required);
 
-  // Copy memory, byte-by-byte, from source to destination.  Not optimized for
-  // long or aligned copies.
-  // The contents of index and scratch are destroyed.
-  void CopyBytes(Register source, Register destination, Register length,
-                 Register scratch);
-
   // Initialize fields with filler values.  Fields starting at |current_address|
   // not including |end_address| are overwritten with the value in |filler|.  At
   // the end the loop, |current_address| takes the value of |end_address|.
@@ -712,12 +691,6 @@
   void TryGetFunctionPrototype(Register function, Register result,
                                Register scratch, Label* miss);
 
-  // Picks out an array index from the hash field.
-  // Register use:
-  //   hash - holds the index's hash. Clobbered.
-  //   index - holds the overwritten index on exit.
-  void IndexFromHash(Register hash, Register index);
-
   // ---------------------------------------------------------------------------
   // Runtime calls
 
@@ -810,7 +783,10 @@
   void Drop(int element_count);
 
   void Call(Label* target) { call(target); }
-  void Call(Handle<Code> target, RelocInfo::Mode rmode) { call(target, rmode); }
+  void Call(Handle<Code> target, RelocInfo::Mode rmode,
+            TypeFeedbackId id = TypeFeedbackId::None()) {
+    call(target, rmode, id);
+  }
   void Jump(Handle<Code> target, RelocInfo::Mode rmode) { jmp(target, rmode); }
   void Push(Register src) { push(src); }
   void Push(const Operand& src) { push(src); }
diff --git a/src/zone/accounting-allocator.cc b/src/zone/accounting-allocator.cc
index 663ea32..587e09d 100644
--- a/src/zone/accounting-allocator.cc
+++ b/src/zone/accounting-allocator.cc
@@ -13,6 +13,72 @@
 namespace v8 {
 namespace internal {
 
+AccountingAllocator::AccountingAllocator() : unused_segments_mutex_() {
+  static const size_t kDefaultBucketMaxSize = 5;
+
+  memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
+  std::fill(unused_segments_heads_, unused_segments_heads_ + kNumberBuckets,
+            nullptr);
+  std::fill(unused_segments_sizes_, unused_segments_sizes_ + kNumberBuckets, 0);
+  std::fill(unused_segments_max_sizes_,
+            unused_segments_max_sizes_ + kNumberBuckets, kDefaultBucketMaxSize);
+}
+
+AccountingAllocator::~AccountingAllocator() { ClearPool(); }
+
+void AccountingAllocator::MemoryPressureNotification(
+    MemoryPressureLevel level) {
+  memory_pressure_level_.SetValue(level);
+
+  if (level != MemoryPressureLevel::kNone) {
+    ClearPool();
+  }
+}
+
+void AccountingAllocator::ConfigureSegmentPool(const size_t max_pool_size) {
+  // The sum of the bytes of one segment of each size.
+  static const size_t full_size = (size_t(1) << (kMaxSegmentSizePower + 1)) -
+                                  (size_t(1) << kMinSegmentSizePower);
+  size_t fits_fully = max_pool_size / full_size;
+
+  base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
+
+  // We assume few zones (less than 'fits_fully' many) to be active at the same
+  // time. When zones grow regularly, they will keep requesting segments of
+  // increasing size each time. Therefore we try to get as many segments with an
+  // equal number of segments of each size as possible.
+  // The remaining space is used to make more room for an 'incomplete set' of
+  // segments beginning with the smaller ones.
+  // This code will work best if the max_pool_size is a multiple of the
+  // full_size. If max_pool_size is no sum of segment sizes the actual pool
+  // size might be smaller then max_pool_size. Note that no actual memory gets
+  // wasted though.
+  // TODO(heimbuef): Determine better strategy generating a segment sizes
+  // distribution that is closer to real/benchmark usecases and uses the given
+  // max_pool_size more efficiently.
+  size_t total_size = fits_fully * full_size;
+
+  for (size_t power = 0; power < kNumberBuckets; ++power) {
+    if (total_size + (size_t(1) << (power + kMinSegmentSizePower)) <=
+        max_pool_size) {
+      unused_segments_max_sizes_[power] = fits_fully + 1;
+      total_size += size_t(1) << power;
+    } else {
+      unused_segments_max_sizes_[power] = fits_fully;
+    }
+  }
+}
+
+Segment* AccountingAllocator::GetSegment(size_t bytes) {
+  Segment* result = GetSegmentFromPool(bytes);
+  if (result == nullptr) {
+    result = AllocateSegment(bytes);
+    result->Initialize(bytes);
+  }
+
+  return result;
+}
+
 Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
   void* memory = malloc(bytes);
   if (memory) {
@@ -26,6 +92,16 @@
   return reinterpret_cast<Segment*>(memory);
 }
 
+void AccountingAllocator::ReturnSegment(Segment* segment) {
+  segment->ZapContents();
+
+  if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
+    FreeSegment(segment);
+  } else if (!AddSegmentToPool(segment)) {
+    FreeSegment(segment);
+  }
+}
+
 void AccountingAllocator::FreeSegment(Segment* memory) {
   base::NoBarrier_AtomicIncrement(
       &current_memory_usage_, -static_cast<base::AtomicWord>(memory->size()));
@@ -41,5 +117,87 @@
   return base::NoBarrier_Load(&max_memory_usage_);
 }
 
+size_t AccountingAllocator::GetCurrentPoolSize() const {
+  return base::NoBarrier_Load(&current_pool_size_);
+}
+
+Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
+  if (requested_size > (1 << kMaxSegmentSizePower)) {
+    return nullptr;
+  }
+
+  size_t power = kMinSegmentSizePower;
+  while (requested_size > (static_cast<size_t>(1) << power)) power++;
+
+  DCHECK_GE(power, kMinSegmentSizePower + 0);
+  power -= kMinSegmentSizePower;
+
+  Segment* segment;
+  {
+    base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
+
+    segment = unused_segments_heads_[power];
+
+    if (segment != nullptr) {
+      unused_segments_heads_[power] = segment->next();
+      segment->set_next(nullptr);
+
+      unused_segments_sizes_[power]--;
+      base::NoBarrier_AtomicIncrement(
+          &current_pool_size_, -static_cast<base::AtomicWord>(segment->size()));
+    }
+  }
+
+  if (segment) {
+    DCHECK_GE(segment->size(), requested_size);
+  }
+  return segment;
+}
+
+bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
+  size_t size = segment->size();
+
+  if (size >= (1 << (kMaxSegmentSizePower + 1))) return false;
+
+  if (size < (1 << kMinSegmentSizePower)) return false;
+
+  size_t power = kMaxSegmentSizePower;
+
+  while (size < (static_cast<size_t>(1) << power)) power--;
+
+  DCHECK_GE(power, kMinSegmentSizePower + 0);
+  power -= kMinSegmentSizePower;
+
+  {
+    base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
+
+    if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) {
+      return false;
+    }
+
+    segment->set_next(unused_segments_heads_[power]);
+    unused_segments_heads_[power] = segment;
+    base::NoBarrier_AtomicIncrement(&current_pool_size_, size);
+    unused_segments_sizes_[power]++;
+  }
+
+  return true;
+}
+
+void AccountingAllocator::ClearPool() {
+  base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
+
+  for (size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower;
+       power++) {
+    Segment* current = unused_segments_heads_[power];
+    while (current) {
+      Segment* next = current->next();
+      FreeSegment(current);
+      current = next;
+    }
+    unused_segments_heads_[power] = nullptr;
+  }
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/zone/accounting-allocator.h b/src/zone/accounting-allocator.h
index 31016a5..c6bf7a7 100644
--- a/src/zone/accounting-allocator.h
+++ b/src/zone/accounting-allocator.h
@@ -13,24 +13,77 @@
 #include "src/base/platform/semaphore.h"
 #include "src/base/platform/time.h"
 #include "src/zone/zone-segment.h"
+#include "testing/gtest/include/gtest/gtest_prod.h"
 
 namespace v8 {
 namespace internal {
 
 class V8_EXPORT_PRIVATE AccountingAllocator {
  public:
-  AccountingAllocator() = default;
-  virtual ~AccountingAllocator() = default;
+  static const size_t kMaxPoolSizeLowMemoryDevice = 8ul * KB;
+  static const size_t kMaxPoolSizeMediumMemoryDevice = 8ul * KB;
+  static const size_t kMaxPoolSizeHighMemoryDevice = 8ul * KB;
+  static const size_t kMaxPoolSizeHugeMemoryDevice = 8ul * KB;
 
-  virtual Segment* AllocateSegment(size_t bytes);
-  virtual void FreeSegment(Segment* memory);
+  AccountingAllocator();
+  virtual ~AccountingAllocator();
+
+  // Gets an empty segment from the pool or creates a new one.
+  virtual Segment* GetSegment(size_t bytes);
+  // Return unneeded segments to either insert them into the pool or release
+  // them if the pool is already full or memory pressure is high.
+  virtual void ReturnSegment(Segment* memory);
 
   size_t GetCurrentMemoryUsage() const;
   size_t GetMaxMemoryUsage() const;
 
+  size_t GetCurrentPoolSize() const;
+
+  void MemoryPressureNotification(MemoryPressureLevel level);
+  // Configures the zone segment pool size limits so the pool does not
+  // grow bigger than max_pool_size.
+  // TODO(heimbuef): Do not accept segments to pool that are larger than
+  // their size class requires. Sometimes the zones generate weird segments.
+  void ConfigureSegmentPool(const size_t max_pool_size);
+
+  virtual void ZoneCreation(const Zone* zone) {}
+  virtual void ZoneDestruction(const Zone* zone) {}
+
  private:
+  FRIEND_TEST(Zone, SegmentPoolConstraints);
+
+  static const size_t kMinSegmentSizePower = 13;
+  static const size_t kMaxSegmentSizePower = 18;
+
+  STATIC_ASSERT(kMinSegmentSizePower <= kMaxSegmentSizePower);
+
+  static const size_t kNumberBuckets =
+      1 + kMaxSegmentSizePower - kMinSegmentSizePower;
+
+  // Allocates a new segment. Returns nullptr on failed allocation.
+  Segment* AllocateSegment(size_t bytes);
+  void FreeSegment(Segment* memory);
+
+  // Returns a segment from the pool of at least the requested size.
+  Segment* GetSegmentFromPool(size_t requested_size);
+  // Trys to add a segment to the pool. Returns false if the pool is full.
+  bool AddSegmentToPool(Segment* segment);
+
+  // Empties the pool and puts all its contents onto the garbage stack.
+  void ClearPool();
+
+  Segment* unused_segments_heads_[kNumberBuckets];
+
+  size_t unused_segments_sizes_[kNumberBuckets];
+  size_t unused_segments_max_sizes_[kNumberBuckets];
+
+  base::Mutex unused_segments_mutex_;
+
   base::AtomicWord current_memory_usage_ = 0;
   base::AtomicWord max_memory_usage_ = 0;
+  base::AtomicWord current_pool_size_ = 0;
+
+  base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
 
   DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
 };
diff --git a/src/zone/zone-allocator.h b/src/zone/zone-allocator.h
index 8370d73..1e2862a 100644
--- a/src/zone/zone-allocator.h
+++ b/src/zone/zone-allocator.h
@@ -26,6 +26,8 @@
     typedef zone_allocator<O> other;
   };
 
+  // TODO(bbudge) Remove when V8 updates to MSVS 2015. See crbug.com/603131.
+  zone_allocator() : zone_(nullptr) { UNREACHABLE(); }
   explicit zone_allocator(Zone* zone) throw() : zone_(zone) {}
   explicit zone_allocator(const zone_allocator& other) throw()
       : zone_(other.zone_) {}
@@ -62,7 +64,6 @@
   Zone* zone() { return zone_; }
 
  private:
-  zone_allocator();
   Zone* zone_;
 };
 
diff --git a/src/zone/zone-chunk-list.h b/src/zone/zone-chunk-list.h
new file mode 100644
index 0000000..f977a0c
--- /dev/null
+++ b/src/zone/zone-chunk-list.h
@@ -0,0 +1,452 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include "src/globals.h"
+#include "src/zone/zone.h"
+
+#ifndef V8_SRC_ZONE_ZONE_CHUNK_LIST_H_
+#define V8_SRC_ZONE_ZONE_CHUNK_LIST_H_
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+class ZoneChunkListIterator;
+template <typename T>
+class ForwardZoneChunkListIterator;
+template <typename T>
+class ReverseZoneChunkListIterator;
+
+// A zone-backed hybrid of a vector and a linked list. Use it if you need a
+// collection that
+// * needs to grow indefinitely,
+// * will mostly grow at the back, but may sometimes grow in front as well
+// (preferrably in batches),
+// * needs to have very low overhead,
+// * offers forward- and backwards-iteration,
+// * offers relatively fast seeking,
+// * offers bidirectional iterators,
+// * can be rewound without freeing the backing store.
+// This list will maintain a doubly-linked list of chunks. When a chunk is
+// filled up, a new one gets appended. New chunks appended at the end will
+// grow in size up to a certain limit to avoid over-allocation and to keep
+// the zone clean.
+template <typename T>
+class ZoneChunkList : public ZoneObject {
+ public:
+  enum class StartMode {
+    // The list will not allocate a starting chunk. Use if you expect your
+    // list to remain empty in many cases.
+    kEmpty = 0,
+    // The list will start with a small initial chunk. Subsequent chunks will
+    // get bigger over time.
+    kSmall = 8,
+    // The list will start with one chunk at maximum size. Use this if you
+    // expect your list to contain many items to avoid growing chunks.
+    kBig = 256
+  };
+
+  explicit ZoneChunkList(Zone* zone, StartMode start_mode = StartMode::kEmpty)
+      : zone_(zone) {
+    if (start_mode != StartMode::kEmpty) {
+      front_ = NewChunk(static_cast<uint32_t>(start_mode));
+      back_ = front_;
+    }
+  }
+
+  size_t size() const;
+
+  T& front() const;
+  T& back() const;
+
+  void push_back(const T& item);
+  void pop_back();
+
+  // Will push a separate chunk to the front of the chunk-list.
+  // Very memory-inefficient. Do only use sparsely! If you have many items to
+  // add in front, consider using 'push_front_many'.
+  void push_front(const T& item);
+  // TODO(heimbuef): Add 'push_front_many'.
+
+  // Cuts the last list elements so at most 'limit' many remain. Does not
+  // free the actual memory, since it is zone allocated.
+  void Rewind(const size_t limit = 0);
+
+  // Quickly scans the list to retrieve the element at the given index. Will
+  // *not* check bounds.
+  ForwardZoneChunkListIterator<T> Find(const size_t index);
+  ForwardZoneChunkListIterator<const T> Find(const size_t index) const;
+  // TODO(heimbuef): Add 'rFind', seeking from the end and returning a
+  // reverse iterator.
+
+  void CopyTo(T* ptr);
+
+  ForwardZoneChunkListIterator<T> begin();
+  ForwardZoneChunkListIterator<T> end();
+  ReverseZoneChunkListIterator<T> rbegin();
+  ReverseZoneChunkListIterator<T> rend();
+  ForwardZoneChunkListIterator<const T> begin() const;
+  ForwardZoneChunkListIterator<const T> end() const;
+  ReverseZoneChunkListIterator<const T> rbegin() const;
+  ReverseZoneChunkListIterator<const T> rend() const;
+
+ private:
+  friend class ZoneChunkListIterator<T>;
+  friend class ForwardZoneChunkListIterator<T>;
+  friend class ReverseZoneChunkListIterator<T>;
+  static const uint32_t kMaxChunkCapacity = 256u;
+
+  STATIC_ASSERT(kMaxChunkCapacity == static_cast<uint32_t>(StartMode::kBig));
+
+  struct Chunk {
+    uint32_t capacity_ = 0;
+    uint32_t position_ = 0;
+    Chunk* next_ = nullptr;
+    Chunk* previous_ = nullptr;
+    T* items() { return reinterpret_cast<T*>(this + 1); }
+  };
+
+  Chunk* NewChunk(const uint32_t capacity) {
+    Chunk* chunk =
+        new (zone_->New(sizeof(Chunk) + capacity * sizeof(T))) Chunk();
+    chunk->capacity_ = capacity;
+    return chunk;
+  }
+
+  struct SeekResult {
+    Chunk* chunk_;
+    uint32_t chunk_index_;
+  };
+
+  // Returns the chunk and relative index of the element at the given global
+  // index. Will skip entire chunks and is therefore faster than iterating.
+  SeekResult SeekIndex(size_t index) const;
+
+  Zone* zone_;
+
+  size_t size_ = 0;
+  Chunk* front_ = nullptr;
+  Chunk* back_ = nullptr;
+
+  DISALLOW_COPY_AND_ASSIGN(ZoneChunkList);
+};
+
+template <typename T>
+class ZoneChunkListIterator {
+ public:
+  T& operator*() { return current_->items()[position_]; }
+  bool operator==(const ZoneChunkListIterator& other) {
+    return other.current_ == current_ && other.position_ == position_;
+  }
+  bool operator!=(const ZoneChunkListIterator& other) {
+    return !operator==(other);
+  }
+
+ protected:
+  ZoneChunkListIterator(typename ZoneChunkList<T>::Chunk* current,
+                        size_t position)
+      : current_(current), position_(position) {}
+
+  void MoveNext() {
+    ++position_;
+    if (position_ >= current_->capacity_) {
+      current_ = current_->next_;
+      position_ = 0;
+    }
+  }
+
+  void MoveRNext() {
+    if (position_ == 0) {
+      current_ = current_->previous_;
+      position_ = current_ ? current_->capacity_ - 1 : 0;
+    } else {
+      --position_;
+    }
+  }
+
+  typename ZoneChunkList<T>::Chunk* current_;
+  size_t position_;
+};
+
+template <typename T>
+class ForwardZoneChunkListIterator : public ZoneChunkListIterator<T> {
+  using ZoneChunkListIterator<T>::current_;
+  using ZoneChunkListIterator<T>::position_;
+  using ZoneChunkListIterator<T>::MoveNext;
+  using ZoneChunkListIterator<T>::MoveRNext;
+
+ public:
+  ForwardZoneChunkListIterator(typename ZoneChunkList<T>::Chunk* current,
+                               size_t position)
+      : ZoneChunkListIterator<T>(current, position) {}
+
+  ForwardZoneChunkListIterator& operator++() {
+    MoveNext();
+    return *this;
+  }
+
+  ForwardZoneChunkListIterator operator++(int) {
+    ForwardZoneChunkListIterator<T> clone(*this);
+    MoveNext();
+    return clone;
+  }
+
+  ForwardZoneChunkListIterator& operator--() {
+    MoveRNext();
+    return *this;
+  }
+
+  ForwardZoneChunkListIterator operator--(int) {
+    ForwardZoneChunkListIterator<T> clone(*this);
+    MoveRNext();
+    return clone;
+  }
+
+ private:
+  friend class ZoneChunkList<T>;
+  static ForwardZoneChunkListIterator<T> Begin(ZoneChunkList<T>* list) {
+    return ForwardZoneChunkListIterator<T>(list->front_, 0);
+  }
+  static ForwardZoneChunkListIterator<T> End(ZoneChunkList<T>* list) {
+    if (list->back_ == nullptr) return Begin(list);
+
+    DCHECK_LE(list->back_->position_, list->back_->capacity_);
+    if (list->back_->position_ == list->back_->capacity_) {
+      return ForwardZoneChunkListIterator<T>(nullptr, 0);
+    }
+
+    return ForwardZoneChunkListIterator<T>(list->back_, list->back_->position_);
+  }
+};
+
+template <typename T>
+class ReverseZoneChunkListIterator : public ZoneChunkListIterator<T> {
+  using ZoneChunkListIterator<T>::current_;
+  using ZoneChunkListIterator<T>::position_;
+  using ZoneChunkListIterator<T>::MoveNext;
+  using ZoneChunkListIterator<T>::MoveRNext;
+
+ public:
+  ReverseZoneChunkListIterator(typename ZoneChunkList<T>::Chunk* current,
+                               size_t position)
+      : ZoneChunkListIterator<T>(current, position) {}
+
+  ReverseZoneChunkListIterator& operator++() {
+    MoveRNext();
+    return *this;
+  }
+
+  ReverseZoneChunkListIterator operator++(int) {
+    ReverseZoneChunkListIterator<T> clone(*this);
+    MoveRNext();
+    return clone;
+  }
+
+  ReverseZoneChunkListIterator& operator--() {
+    MoveNext();
+    return *this;
+  }
+
+  ReverseZoneChunkListIterator operator--(int) {
+    ForwardZoneChunkListIterator<T> clone(*this);
+    MoveNext();
+    return clone;
+  }
+
+ private:
+  friend class ZoneChunkList<T>;
+  static ReverseZoneChunkListIterator<T> Begin(ZoneChunkList<T>* list) {
+    if (list->back_ == nullptr) return End(list);
+    if (list->back_->position_ == 0) {
+      if (list->back_->previous_ != nullptr) {
+        return ReverseZoneChunkListIterator<T>(
+            list->back_->previous_, list->back_->previous_->capacity_ - 1);
+      } else {
+        return End(list);
+      }
+    }
+    return ReverseZoneChunkListIterator<T>(list->back_,
+                                           list->back_->position_ - 1);
+  }
+  static ReverseZoneChunkListIterator<T> End(ZoneChunkList<T>* list) {
+    return ReverseZoneChunkListIterator<T>(nullptr, 0);
+  }
+};
+
+template <typename T>
+size_t ZoneChunkList<T>::size() const {
+  return size_;
+}
+
+template <typename T>
+T& ZoneChunkList<T>::front() const {
+  DCHECK_LT(size_t(0), size());
+  return front_->items()[0];
+}
+
+template <typename T>
+T& ZoneChunkList<T>::back() const {
+  DCHECK_LT(size_t(0), size());
+
+  if (back_->position_ == 0) {
+    return back_->previous_->items()[back_->previous_->position_ - 1];
+  } else {
+    return back_->items()[back_->position_ - 1];
+  }
+}
+
+template <typename T>
+void ZoneChunkList<T>::push_back(const T& item) {
+  if (back_ == nullptr) {
+    front_ = NewChunk(static_cast<uint32_t>(StartMode::kSmall));
+    back_ = front_;
+  }
+
+  DCHECK_LE(back_->position_, back_->capacity_);
+  if (back_->position_ == back_->capacity_) {
+    if (back_->next_ == nullptr) {
+      Chunk* chunk = NewChunk(Min(back_->capacity_ << 1, kMaxChunkCapacity));
+      back_->next_ = chunk;
+      chunk->previous_ = back_;
+    }
+    back_ = back_->next_;
+  }
+  back_->items()[back_->position_] = item;
+  ++back_->position_;
+  ++size_;
+}
+
+template <typename T>
+void ZoneChunkList<T>::pop_back() {
+  DCHECK_LT(size_t(0), size());
+  if (back_->position_ == 0) {
+    back_ = back_->previous_;
+  }
+  --back_->position_;
+}
+
+template <typename T>
+void ZoneChunkList<T>::push_front(const T& item) {
+  Chunk* chunk = NewChunk(1);  // Yes, this gets really inefficient.
+  chunk->next_ = front_;
+  if (front_) {
+    front_->previous_ = chunk;
+  } else {
+    back_ = chunk;
+  }
+  front_ = chunk;
+
+  chunk->items()[0] = item;
+  chunk->position_ = 1;
+  ++size_;
+}
+
+template <typename T>
+typename ZoneChunkList<T>::SeekResult ZoneChunkList<T>::SeekIndex(
+    size_t index) const {
+  DCHECK_LT(index, size());
+  Chunk* current = front_;
+  while (index > current->capacity_) {
+    index -= current->capacity_;
+    current = current->next_;
+  }
+  return {current, static_cast<uint32_t>(index)};
+}
+
+template <typename T>
+void ZoneChunkList<T>::Rewind(const size_t limit) {
+  if (limit >= size()) return;
+
+  SeekResult seek_result = SeekIndex(limit);
+  DCHECK_NOT_NULL(seek_result.chunk_);
+
+  // Do a partial rewind of the chunk containing the index.
+  seek_result.chunk_->position_ = seek_result.chunk_index_;
+
+  // Set back_ so iterators will work correctly.
+  back_ = seek_result.chunk_;
+
+  // Do full rewind of all subsequent chunks.
+  for (Chunk* current = seek_result.chunk_->next_; current != nullptr;
+       current = current->next_) {
+    current->position_ = 0;
+  }
+
+  size_ = limit;
+}
+
+template <typename T>
+ForwardZoneChunkListIterator<T> ZoneChunkList<T>::Find(const size_t index) {
+  SeekResult seek_result = SeekIndex(index);
+  return ForwardZoneChunkListIterator<T>(seek_result.chunk_,
+                                         seek_result.chunk_index_);
+}
+
+template <typename T>
+ForwardZoneChunkListIterator<const T> ZoneChunkList<T>::Find(
+    const size_t index) const {
+  SeekResult seek_result = SeekIndex(index);
+  return ForwardZoneChunkListIterator<const T>(seek_result.chunk_,
+                                               seek_result.chunk_index_);
+}
+
+template <typename T>
+void ZoneChunkList<T>::CopyTo(T* ptr) {
+  for (Chunk* current = front_; current != nullptr; current = current->next_) {
+    void* start = current->items();
+    void* end = current->items() + current->position_;
+    size_t bytes = static_cast<size_t>(reinterpret_cast<uintptr_t>(end) -
+                                       reinterpret_cast<uintptr_t>(start));
+
+    MemCopy(ptr, current->items(), bytes);
+    ptr += current->position_;
+  }
+}
+
+template <typename T>
+ForwardZoneChunkListIterator<T> ZoneChunkList<T>::begin() {
+  return ForwardZoneChunkListIterator<T>::Begin(this);
+}
+
+template <typename T>
+ForwardZoneChunkListIterator<T> ZoneChunkList<T>::end() {
+  return ForwardZoneChunkListIterator<T>::End(this);
+}
+
+template <typename T>
+ReverseZoneChunkListIterator<T> ZoneChunkList<T>::rbegin() {
+  return ReverseZoneChunkListIterator<T>::Begin(this);
+}
+
+template <typename T>
+ReverseZoneChunkListIterator<T> ZoneChunkList<T>::rend() {
+  return ReverseZoneChunkListIterator<T>::End(this);
+}
+
+template <typename T>
+ForwardZoneChunkListIterator<const T> ZoneChunkList<T>::begin() const {
+  return ForwardZoneChunkListIterator<const T>::Begin(this);
+}
+
+template <typename T>
+ForwardZoneChunkListIterator<const T> ZoneChunkList<T>::end() const {
+  return ForwardZoneChunkListIterator<const T>::End(this);
+}
+
+template <typename T>
+ReverseZoneChunkListIterator<const T> ZoneChunkList<T>::rbegin() const {
+  return ReverseZoneChunkListIterator<const T>::Begin(this);
+}
+
+template <typename T>
+ReverseZoneChunkListIterator<const T> ZoneChunkList<T>::rend() const {
+  return ReverseZoneChunkListIterator<const T>::End(this);
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_SRC_ZONE_ZONE_CHUNK_LIST_H_
diff --git a/src/zone/zone-segment.cc b/src/zone/zone-segment.cc
index f63b530..1fa49d4 100644
--- a/src/zone/zone-segment.cc
+++ b/src/zone/zone-segment.cc
@@ -18,5 +18,6 @@
   memset(this, kZapDeadByte, sizeof(Segment));
 #endif
 }
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/zone/zone-segment.h b/src/zone/zone-segment.h
index d37cf56..e1c3ed0 100644
--- a/src/zone/zone-segment.h
+++ b/src/zone/zone-segment.h
@@ -20,11 +20,7 @@
 
 class Segment {
  public:
-  void Initialize(Segment* next, size_t size, Zone* zone) {
-    next_ = next;
-    size_ = size;
-    zone_ = zone;
-  }
+  void Initialize(size_t size) { size_ = size; }
 
   Zone* zone() const { return zone_; }
   void set_zone(Zone* const zone) { zone_ = zone; }
@@ -48,6 +44,7 @@
   // Constant byte value used for zapping dead memory in debug mode.
   static const unsigned char kZapDeadByte = 0xcd;
 #endif
+
   // Computes the address of the nth byte in this segment.
   Address address(size_t n) const { return Address(this) + n; }
 
diff --git a/src/zone/zone.cc b/src/zone/zone.cc
index 4272e17..7228081 100644
--- a/src/zone/zone.cc
+++ b/src/zone/zone.cc
@@ -41,17 +41,21 @@
 
 }  // namespace
 
-Zone::Zone(AccountingAllocator* allocator)
+Zone::Zone(AccountingAllocator* allocator, const char* name)
     : allocation_size_(0),
       segment_bytes_allocated_(0),
       position_(0),
       limit_(0),
       allocator_(allocator),
-      segment_head_(nullptr) {}
+      segment_head_(nullptr),
+      name_(name) {
+  allocator_->ZoneCreation(this);
+}
 
 Zone::~Zone() {
+  allocator_->ZoneDestruction(this);
+
   DeleteAll();
-  DeleteKeptSegment();
 
   DCHECK(segment_bytes_allocated_ == 0);
 }
@@ -92,73 +96,33 @@
 }
 
 void Zone::DeleteAll() {
-  // Find a segment with a suitable size to keep around.
-  Segment* keep = nullptr;
-  // Traverse the chained list of segments, zapping (in debug mode)
-  // and freeing every segment except the one we wish to keep.
+  // Traverse the chained list of segments and return them all to the allocator.
   for (Segment* current = segment_head_; current;) {
     Segment* next = current->next();
-    if (!keep && current->size() <= kMaximumKeptSegmentSize) {
-      // Unlink the segment we wish to keep from the list.
-      keep = current;
-      keep->set_next(nullptr);
-    } else {
-      size_t size = current->size();
-#ifdef DEBUG
-      // Un-poison first so the zapping doesn't trigger ASan complaints.
-      ASAN_UNPOISON_MEMORY_REGION(current, size);
-#endif
-      current->ZapContents();
-      segment_bytes_allocated_ -= size;
-      allocator_->FreeSegment(current);
-    }
+    size_t size = current->size();
+
+    // Un-poison the segment content so we can re-use or zap it later.
+    ASAN_UNPOISON_MEMORY_REGION(current->start(), current->capacity());
+
+    segment_bytes_allocated_ -= size;
+    allocator_->ReturnSegment(current);
     current = next;
   }
 
-  // If we have found a segment we want to keep, we must recompute the
-  // variables 'position' and 'limit' to prepare for future allocate
-  // attempts. Otherwise, we must clear the position and limit to
-  // force a new segment to be allocated on demand.
-  if (keep) {
-    Address start = keep->start();
-    position_ = RoundUp(start, kAlignment);
-    limit_ = keep->end();
-    // Un-poison so we can re-use the segment later.
-    ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity());
-    keep->ZapContents();
-  } else {
-    position_ = limit_ = 0;
-  }
-
+  position_ = limit_ = 0;
   allocation_size_ = 0;
-  // Update the head segment to be the kept segment (if any).
-  segment_head_ = keep;
-}
-
-void Zone::DeleteKeptSegment() {
-  DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr);
-  if (segment_head_ != nullptr) {
-    size_t size = segment_head_->size();
-#ifdef DEBUG
-    // Un-poison first so the zapping doesn't trigger ASan complaints.
-    ASAN_UNPOISON_MEMORY_REGION(segment_head_, size);
-#endif
-    segment_head_->ZapContents();
-    segment_bytes_allocated_ -= size;
-    allocator_->FreeSegment(segment_head_);
-    segment_head_ = nullptr;
-  }
-
-  DCHECK(segment_bytes_allocated_ == 0);
+  segment_head_ = nullptr;
 }
 
 // Creates a new segment, sets it size, and pushes it to the front
 // of the segment chain. Returns the new segment.
-Segment* Zone::NewSegment(size_t size) {
-  Segment* result = allocator_->AllocateSegment(size);
-  segment_bytes_allocated_ += size;
+Segment* Zone::NewSegment(size_t requested_size) {
+  Segment* result = allocator_->GetSegment(requested_size);
+  DCHECK_GE(result->size(), requested_size);
+  segment_bytes_allocated_ += result->size();
   if (result != nullptr) {
-    result->Initialize(segment_head_, size, this);
+    result->set_zone(this);
+    result->set_next(segment_head_);
     segment_head_ = result;
   }
   return result;
diff --git a/src/zone/zone.h b/src/zone/zone.h
index 9ff259e..4e3b96e 100644
--- a/src/zone/zone.h
+++ b/src/zone/zone.h
@@ -14,6 +14,12 @@
 #include "src/splay-tree.h"
 #include "src/zone/accounting-allocator.h"
 
+#ifndef ZONE_NAME
+#define STRINGIFY(x) #x
+#define TOSTRING(x) STRINGIFY(x)
+#define ZONE_NAME __FILE__ ":" TOSTRING(__LINE__)
+#endif
+
 namespace v8 {
 namespace internal {
 
@@ -25,13 +31,13 @@
 //
 // Note: There is no need to initialize the Zone; the first time an
 // allocation is attempted, a segment of memory will be requested
-// through a call to malloc().
+// through the allocator.
 //
 // Note: The implementation is inherently not thread safe. Do not use
 // from multi-threaded code.
 class V8_EXPORT_PRIVATE Zone final {
  public:
-  explicit Zone(AccountingAllocator* allocator);
+  Zone(AccountingAllocator* allocator, const char* name);
   ~Zone();
 
   // Allocate 'size' bytes of memory in the Zone; expands the Zone by
@@ -44,20 +50,14 @@
     return static_cast<T*>(New(length * sizeof(T)));
   }
 
-  // Deletes all objects and free all memory allocated in the Zone. Keeps one
-  // small (size <= kMaximumKeptSegmentSize) segment around if it finds one.
-  void DeleteAll();
-
-  // Deletes the last small segment kept around by DeleteAll(). You
-  // may no longer allocate in the Zone after a call to this method.
-  void DeleteKeptSegment();
-
   // Returns true if more memory has been allocated in zones than
   // the limit allows.
   bool excess_allocation() const {
     return segment_bytes_allocated_ > kExcessLimit;
   }
 
+  const char* name() const { return name_; }
+
   size_t allocation_size() const { return allocation_size_; }
 
   AccountingAllocator* allocator() const { return allocator_; }
@@ -65,8 +65,9 @@
  private:
 // All pointers returned from New() have this alignment.  In addition, if the
 // object being allocated has a size that is divisible by 8 then its alignment
-// will be 8. ASan requires 8-byte alignment.
-#ifdef V8_USE_ADDRESS_SANITIZER
+// will be 8. ASan requires 8-byte alignment. MIPS also requires 8-byte
+// alignment.
+#if defined(V8_USE_ADDRESS_SANITIZER) || defined(V8_TARGET_ARCH_MIPS)
   static const size_t kAlignment = 8;
   STATIC_ASSERT(kPointerSize <= 8);
 #else
@@ -79,12 +80,12 @@
   // Never allocate segments larger than this size in bytes.
   static const size_t kMaximumSegmentSize = 1 * MB;
 
-  // Never keep segments larger than this size in bytes around.
-  static const size_t kMaximumKeptSegmentSize = 64 * KB;
-
   // Report zone excess when allocation exceeds this limit.
   static const size_t kExcessLimit = 256 * MB;
 
+  // Deletes all objects and free all memory allocated in the Zone.
+  void DeleteAll();
+
   // The number of bytes allocated in this zone so far.
   size_t allocation_size_;
 
@@ -101,7 +102,7 @@
 
   // Creates a new segment, sets it size, and pushes it to the front
   // of the segment chain. Returns the new segment.
-  inline Segment* NewSegment(size_t size);
+  inline Segment* NewSegment(size_t requested_size);
 
   // The free region in the current (front) segment is represented as
   // the half-open interval [position, limit). The 'position' variable
@@ -112,6 +113,7 @@
   AccountingAllocator* allocator_;
 
   Segment* segment_head_;
+  const char* name_;
 };
 
 // ZoneObject is an abstraction that helps define classes of objects
@@ -133,19 +135,6 @@
   void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
 };
 
-// The ZoneScope is used to automatically call DeleteAll() on a
-// Zone when the ZoneScope is destroyed (i.e. goes out of scope)
-class ZoneScope final {
- public:
-  explicit ZoneScope(Zone* zone) : zone_(zone) {}
-  ~ZoneScope() { zone_->DeleteAll(); }
-
-  Zone* zone() const { return zone_; }
-
- private:
-  Zone* zone_;
-};
-
 // The ZoneAllocationPolicy is used to specialize generic data
 // structures to allocate themselves and their elements in the Zone.
 class ZoneAllocationPolicy final {
@@ -171,6 +160,13 @@
   ZoneList(int capacity, Zone* zone)
       : List<T, ZoneAllocationPolicy>(capacity, ZoneAllocationPolicy(zone)) {}
 
+  // Construct a new ZoneList from a std::initializer_list
+  ZoneList(std::initializer_list<T> list, Zone* zone)
+      : List<T, ZoneAllocationPolicy>(static_cast<int>(list.size()),
+                                      ZoneAllocationPolicy(zone)) {
+    for (auto& i : list) Add(i, zone);
+  }
+
   void* operator new(size_t size, Zone* zone) { return zone->New(size); }
 
   // Construct a new ZoneList by copying the elements of the given ZoneList.
diff --git a/tools/callstats.html b/tools/callstats.html
index cb2e0be..b70d40c 100644
--- a/tools/callstats.html
+++ b/tools/callstats.html
@@ -1381,6 +1381,8 @@
         this.version = version;
       }
       add(entry) {
+        // Ignore accidentally added Group entries.
+        if (entry.name.startsWith(GroupedEntry.prefix)) return;
         entry.page = this;
         this.entryDict.set(entry.name, entry);
         var added = false;
@@ -1559,7 +1561,7 @@
         /StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*/, "#DC3912"));
     Group.add('compile', new Group('Compile', /.*Compile.*/, "#FFAA00"));
     Group.add('parse', new Group('Parse', /.*Parse.*/, "#FF6600"));
-    Group.add('callback', new Group('Callback', /.*Callback$/, "#109618"));
+    Group.add('callback', new Group('Callback', /.*Callback.*/, "#109618"));
     Group.add('api', new Group('API', /.*API.*/, "#990099"));
     Group.add('gc', new Group('GC', /GC|AllocateInTargetSpace/, "#0099C6"));
     Group.add('javascript', new Group('JavaScript', /JS_Execution/, "#DD4477"));
@@ -1568,7 +1570,7 @@
 
     class GroupedEntry extends Entry {
       constructor(group) {
-        super(0, 'Group-' + group.name, 0, 0, 0, 0, 0, 0);
+        super(0, GroupedEntry.prefix + group.name, 0, 0, 0, 0, 0, 0);
         this.group = group;
         this.entries = [];
       }
@@ -1636,6 +1638,7 @@
         return this.getVarianceForProperty('time')
       }
     }
+    GroupedEntry.prefix = 'Group-';
 
     class UnclassifiedEntry extends GroupedEntry {
       constructor(page) {
diff --git a/tools/dev/v8gen.py b/tools/dev/v8gen.py
index f0fb74b..b8a34e2 100755
--- a/tools/dev/v8gen.py
+++ b/tools/dev/v8gen.py
@@ -269,7 +269,7 @@
     # Artificially increment modification time as our modifications happen too
     # fast. This makes sure that gn is properly rebuilding the ninja files.
     mtime = os.path.getmtime(gn_args_path) + 1
-    with open(gn_args_path, 'aw'):
+    with open(gn_args_path, 'a'):
       os.utime(gn_args_path, (mtime, mtime))
 
     return True
diff --git a/tools/external-reference-check.py b/tools/external-reference-check.py
deleted file mode 100644
index be01dec..0000000
--- a/tools/external-reference-check.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import re
-import os
-import sys
-
-DECLARE_FILE = "src/assembler.h"
-REGISTER_FILE = "src/external-reference-table.cc"
-DECLARE_RE = re.compile("\s*static ExternalReference ([^(]+)\(")
-REGISTER_RE = re.compile("\s*Add\(ExternalReference::([^(]+)\(")
-
-WORKSPACE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
-
-# Ignore those.
-BLACKLISTED = [
-  "fixed_typed_array_base_data_offset",
-  "page_flags",
-  "math_exp_constants",
-  "math_exp_log_table",
-  "ForDeoptEntry",
-]
-
-def Find(filename, re):
-  references = []
-  with open(filename, "r") as f:
-    for line in f:
-      match = re.match(line)
-      if match:
-        references.append(match.group(1))
-  return references
-
-def Main():
-  declarations = Find(DECLARE_FILE, DECLARE_RE)
-  registrations = Find(REGISTER_FILE, REGISTER_RE)
-  difference = list(set(declarations) - set(registrations) - set(BLACKLISTED))
-  for reference in difference:
-    print("Declared but not registered: ExternalReference::%s" % reference)
-  return len(difference) > 0
-
-if __name__ == "__main__":
-  sys.exit(Main())
diff --git a/tools/gdbinit b/tools/gdbinit
index 1eae053..b696a8f 100644
--- a/tools/gdbinit
+++ b/tools/gdbinit
@@ -68,5 +68,32 @@
 Usage: jss
 end
 
+# Print stack trace with assertion scopes.
+define bta
+python
+import re
+frame_re = re.compile("^#(\d+)\s*(?:0x[a-f\d]+ in )?(.+) \(.+ at (.+)")
+assert_re = re.compile("^\s*(\S+) = .+<v8::internal::Per\w+AssertType::(\w+)_ASSERT, (false|true)>")
+btl = gdb.execute("backtrace full", to_string = True).splitlines()
+for l in btl:
+  match = frame_re.match(l)
+  if match:
+    print("[%-2s] %-60s %-40s" % (match.group(1), match.group(2), match.group(3)))
+  match = assert_re.match(l)
+  if match:
+    if match.group(3) == "false":
+      prefix = "Disallow"
+      color = "\033[91m"
+    else:
+      prefix = "Allow"
+      color = "\033[92m"
+    print("%s -> %s %s (%s)\033[0m" % (color, prefix, match.group(2), match.group(1)))
+end
+end
+document bta
+Print stack trace with assertion scopes
+Usage: bta
+end
+
 set disassembly-flavor intel
 set disable-randomization off
diff --git a/tools/ic-explorer.html b/tools/ic-explorer.html
index 42bbc20..02214e3 100644
--- a/tools/ic-explorer.html
+++ b/tools/ic-explorer.html
@@ -40,6 +40,11 @@
     "use strict"
     var entries = [];
 
+    var properties = ['type', 'category', 'file', 'filePosition', 'state',
+      'key', 'isNative', 'map', 'propertiesMode', 'numberOfOwnProperties',
+      'instanceType'
+    ]
+
     class Entry {
       constructor(id, line) {
         this.id = id;
@@ -50,8 +55,11 @@
         if (parts[0][0] !== "[") return;
         if (parts[1] === "patching") return;
         this.type = parts[0].substr(1);
-        this.category = "Other";
-        this.map = undefined;
+        this.category = "unknown";
+        this.map = "unknown";
+        this.propertiesMode = "unknown";
+        this.numberOfOwnProperties = 0;
+        this.instanceType = "unknown";
         if (this.type.indexOf("Store") !== -1) {
           this.category = "Store";
         } else if (this.type.indexOf("Load") !== -1) {
@@ -70,13 +78,22 @@
           var offset = this.parsePositionAndFile(parts, 2);
           if (offset == -1) return
           this.state = parts[++offset];
-          this.map = parts[offset + 1];
-          if (this.map !== undefined && this.map.startsWith("map=")) {
-            this.map = this.map.substring(4);
-            offset++;
-          } else {
-            this.map = undefined;
-          }
+          var mapPart = parts[offset + 1];
+          if (mapPart !== undefined && mapPart.startsWith("map=")) {
+            if (mapPart[4] == "(") {
+              if (mapPart.endsWith(")")) {
+                this.map = mapPart.substr(5, mapPart.length-6);
+              } else {
+                this.map = mapPart.substr(5);
+              }
+              offset++;
+              offset = this.parseMapProperties(parts, offset);
+            } else {
+              this.map = mapPart.substr(4);
+              offset++;
+            }
+            if (this.map == "(nil)") this.map = "unknown";
+          } 
           if (this.type !== "CompareIC") {
             // if there is no address we have a smi key
             var address = parts[++offset];
@@ -108,6 +125,17 @@
         this.isValid = true;
       }
 
+      parseMapProperties(parts, offset) {
+        var next = parts[++offset];
+        if (!next.startsWith('dict')) return offset;
+        this.propertiesMode = 
+            next.substr(5) == "0" ? "fast" : "slow";
+        this.numberOfOwnProperties  = parts[++offset].substr(4);
+        next = parts[++offset];
+        this.instanceType = next.substr(5, next.length-6);
+        return offset;
+      }
+
       parsePositionAndFile(parts, start) {
         // find the position of 'at' in the parts array.
         var offset = start;
@@ -157,11 +185,6 @@
     }
 
 
-
-    var properties = ['type', 'category', 'file', 'filePosition', 'state',
-      'key', 'isNative', 'map'
-    ]
-
     class Group {
       constructor(property, key, entry) {
         this.property = property;
@@ -332,15 +355,32 @@
         select.add(option);
       }
     }
+    
+    function handleOnLoad() {
+      document.querySelector("#uploadInput").focus();
+    }
   </script>
 </head>
 
-<body>
+<body onload="handleOnLoad()">
   <h1>
       <span style="color: #00FF00">I</span>
       <span style="color: #FF00FF">C</span>
       <span style="color: #00FFFF">E</span>
     </h1> Your IC-Explorer.
+
+  <div id="legend" style="padding-right: 200px">
+    <div style="float:right;  border-style: solid; border-width: 1px; padding:20px">
+    0 uninitialized<br>
+    . premonomorphic<br>
+    1 monomorphic<br>
+    ^ recompute handler<br>
+    P polymorphic<br>
+    N megamorphic<br>
+    G generic
+    </div>
+  </div>
+
   <h2>Usage</h2> Run your script with <code>--trace_ic</code> and upload on this page:<br/>
   <code>/path/to/d8 --trace_ic your_script.js > trace.txt</code>
   <h2>Data</h2>
diff --git a/tools/ignition/linux_perf_report.py b/tools/ignition/linux_perf_report.py
index eaf85b3..69db37c 100755
--- a/tools/ignition/linux_perf_report.py
+++ b/tools/ignition/linux_perf_report.py
@@ -52,6 +52,8 @@
 
 COMPILER_SYMBOLS_RE = re.compile(
   r"v8::internal::(?:\(anonymous namespace\)::)?Compile|v8::internal::Parser")
+JIT_CODE_SYMBOLS_RE = re.compile(
+  r"(LazyCompile|Compile|Eval|Script):(\*|~)")
 
 
 def strip_function_parameters(symbol):
@@ -70,7 +72,8 @@
   return symbol[:-pos]
 
 
-def collapsed_callchains_generator(perf_stream, show_all=False,
+def collapsed_callchains_generator(perf_stream, hide_other=False,
+                                   hide_compiler=False, hide_jit=False,
                                    show_full_signatures=False):
   current_chain = []
   skip_until_end_of_chain = False
@@ -85,7 +88,8 @@
 
     # Empty line signals the end of the callchain.
     if not line:
-      if not skip_until_end_of_chain and current_chain and show_all:
+      if (not skip_until_end_of_chain and current_chain
+          and not hide_other):
         current_chain.append("[other]")
         yield current_chain
       # Reset parser status.
@@ -101,14 +105,26 @@
     symbol = line.split(" ", 1)[1].split("+", 1)[0]
     if not show_full_signatures:
       symbol = strip_function_parameters(symbol)
+
+    # Avoid chains of [unknown]
+    if (symbol == "[unknown]" and current_chain and
+        current_chain[-1] == "[unknown]"):
+      continue
+
     current_chain.append(symbol)
 
     if symbol.startswith("BytecodeHandler:"):
+      current_chain.append("[interpreter]")
       yield current_chain
       skip_until_end_of_chain = True
+    elif JIT_CODE_SYMBOLS_RE.match(symbol):
+      if not hide_jit:
+        current_chain.append("[jit]")
+        yield current_chain
+        skip_until_end_of_chain = True
     elif symbol == "Stub:CEntryStub" and compiler_symbol_in_chain:
-      if show_all:
-        current_chain[-1] = "[compiler]"
+      if not hide_compiler:
+        current_chain.append("[compiler]")
         yield current_chain
       skip_until_end_of_chain = True
     elif COMPILER_SYMBOLS_RE.match(symbol):
@@ -181,8 +197,18 @@
     dest="output_flamegraph"
   )
   command_line_parser.add_argument(
-    "--show-all", "-a",
-    help="show samples outside Ignition bytecode handlers",
+    "--hide-other",
+    help="Hide other samples",
+    action="store_true"
+  )
+  command_line_parser.add_argument(
+    "--hide-compiler",
+    help="Hide samples during compilation",
+    action="store_true"
+  )
+  command_line_parser.add_argument(
+    "--hide-jit",
+    help="Hide samples from JIT code execution",
     action="store_true"
   )
   command_line_parser.add_argument(
@@ -210,8 +236,8 @@
                           stdout=subprocess.PIPE)
 
   callchains = collapsed_callchains_generator(
-    perf.stdout, program_options.show_all,
-    program_options.show_full_signatures)
+    perf.stdout, program_options.hide_other, program_options.hide_compiler,
+    program_options.hide_jit, program_options.show_full_signatures)
 
   if program_options.output_flamegraph:
     write_flamegraph_input_file(program_options.output_stream, callchains)
diff --git a/tools/ignition/linux_perf_report_test.py b/tools/ignition/linux_perf_report_test.py
index d9cef75..9d163c8 100644
--- a/tools/ignition/linux_perf_report_test.py
+++ b/tools/ignition/linux_perf_report_test.py
@@ -40,6 +40,9 @@
    11111111 Builtin:InterpreterEntryTrampoline
    22222222 bar
 
+   00000000 hello
+   11111111 LazyCompile:~Foo
+
    11111111 Builtin:InterpreterEntryTrampoline
    22222222 bar
 """
@@ -50,22 +53,26 @@
     perf_stream = StringIO.StringIO(PERF_SCRIPT_OUTPUT)
     callchains = list(ipr.collapsed_callchains_generator(perf_stream))
     self.assertListEqual(callchains, [
-      ["foo", "BytecodeHandler:bar"],
-      ["foo", "BytecodeHandler:bar"],
-      ["beep", "BytecodeHandler:bar"],
+      ['firstSymbol', 'secondSymbol', '[other]'],
+      ["foo", "BytecodeHandler:bar", "[interpreter]"],
+      ["foo", "BytecodeHandler:bar", "[interpreter]"],
+      ["beep", "BytecodeHandler:bar", "[interpreter]"],
+      ["hello", "v8::internal::Compiler", "Stub:CEntryStub", "[compiler]"],
+      ["Lost", "[misattributed]"],
+      ["hello", "LazyCompile:~Foo", "[jit]"],
       ["[entry trampoline]"],
     ])
 
-  def test_collapsed_callchains_generator_show_other(self):
+  def test_collapsed_callchains_generator_hide_other(self):
     perf_stream = StringIO.StringIO(PERF_SCRIPT_OUTPUT)
     callchains = list(ipr.collapsed_callchains_generator(perf_stream,
-                                                         show_all=True))
+                                                         hide_other=True,
+                                                         hide_compiler=True,
+                                                         hide_jit=True))
     self.assertListEqual(callchains, [
-      ['firstSymbol', 'secondSymbol', '[other]'],
-      ["foo", "BytecodeHandler:bar"],
-      ["foo", "BytecodeHandler:bar"],
-      ["beep", "BytecodeHandler:bar"],
-      ["hello", "v8::internal::Compiler", "[compiler]"],
+      ["foo", "BytecodeHandler:bar", "[interpreter]"],
+      ["foo", "BytecodeHandler:bar", "[interpreter]"],
+      ["beep", "BytecodeHandler:bar", "[interpreter]"],
       ["Lost", "[misattributed]"],
       ["[entry trampoline]"],
     ])
@@ -125,7 +132,7 @@
     """)
     callchains = list(ipr.collapsed_callchains_generator(perf_stream, False))
     self.assertListEqual(callchains, [
-      ["foo", "BytecodeHandler:first"],
+      ["foo", "BytecodeHandler:first", "[interpreter]"],
     ])
 
   def test_compiler_symbols_regex(self):
@@ -137,6 +144,15 @@
     for compiler_symbol in compiler_symbols:
       self.assertTrue(ipr.COMPILER_SYMBOLS_RE.match(compiler_symbol))
 
+  def test_jit_code_symbols_regex(self):
+    jit_code_symbols = [
+      "LazyCompile:~Foo blah.js",
+      "Eval:*",
+      "Script:*Bar tmp.js",
+    ]
+    for jit_code_symbol in jit_code_symbols:
+      self.assertTrue(ipr.JIT_CODE_SYMBOLS_RE.match(jit_code_symbol))
+
   def test_strip_function_parameters(self):
     def should_match(signature, name):
       self.assertEqual(ipr.strip_function_parameters(signature), name)
@@ -145,3 +161,6 @@
     should_match("Foo(foomatic::(anonymous)::bar(baz))", "Foo"),
     should_match("v8::(anonymous ns)::bar<thing(with, parentheses)>(baz, poe)",
        "v8::(anonymous ns)::bar<thing(with, parentheses)>")
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tools/parser-shell.cc b/tools/parser-shell.cc
index 43d2578..0517bbf 100644
--- a/tools/parser-shell.cc
+++ b/tools/parser-shell.cc
@@ -93,9 +93,8 @@
   i::ScriptData* cached_data_impl = NULL;
   // First round of parsing (produce data to cache).
   {
-    Zone zone(reinterpret_cast<i::Isolate*>(isolate)->allocator());
+    Zone zone(reinterpret_cast<i::Isolate*>(isolate)->allocator(), ZONE_NAME);
     ParseInfo info(&zone, script);
-    info.set_global();
     info.set_cached_data(&cached_data_impl);
     info.set_compile_options(v8::ScriptCompiler::kProduceParserCache);
     v8::base::ElapsedTimer timer;
@@ -111,9 +110,8 @@
   }
   // Second round of parsing (consume cached data).
   {
-    Zone zone(reinterpret_cast<i::Isolate*>(isolate)->allocator());
+    Zone zone(reinterpret_cast<i::Isolate*>(isolate)->allocator(), ZONE_NAME);
     ParseInfo info(&zone, script);
-    info.set_global();
     info.set_cached_data(&cached_data_impl);
     info.set_compile_options(v8::ScriptCompiler::kConsumeParserCache);
     v8::base::ElapsedTimer timer;
diff --git a/tools/parser-shell.gyp b/tools/parser-shell.gyp
index 4ef1a82..9b94888 100644
--- a/tools/parser-shell.gyp
+++ b/tools/parser-shell.gyp
@@ -37,6 +37,7 @@
       'type': 'executable',
       'dependencies': [
         '../src/v8.gyp:v8',
+        '../src/v8.gyp:v8_libbase',
         '../src/v8.gyp:v8_libplatform',
       ],
       'conditions': [
diff --git a/tools/presubmit.py b/tools/presubmit.py
index 3be9caf..f9ae2bd 100755
--- a/tools/presubmit.py
+++ b/tools/presubmit.py
@@ -396,13 +396,6 @@
     print "Total violating files: %s" % violations
     return success
 
-
-def CheckExternalReferenceRegistration(workspace):
-  code = subprocess.call(
-      [sys.executable, join(workspace, "tools", "external-reference-check.py")])
-  return code == 0
-
-
 def _CheckStatusFileForDuplicateKeys(filepath):
   comma_space_bracket = re.compile(", *]")
   lines = []
@@ -503,7 +496,6 @@
   print "Running copyright header, trailing whitespaces and " \
         "two empty lines between declarations check..."
   success &= SourceProcessor().Run(workspace)
-  success &= CheckExternalReferenceRegistration(workspace)
   success &= CheckStatusFiles(workspace)
   if success:
     return 0
diff --git a/tools/profviz/composer.js b/tools/profviz/composer.js
index 108911d..ce625ad 100644
--- a/tools/profviz/composer.js
+++ b/tools/profviz/composer.js
@@ -106,8 +106,6 @@
         new TimerEvent("recompile async", "#CC4499", false, 1),
       'V8.CompileEvalMicroSeconds':
         new TimerEvent("compile eval", "#CC4400",  true, 0),
-      'V8.IcMiss':
-        new TimerEvent("ic miss", "#CC9900", false, 0),
       'V8.ParseMicroSeconds':
         new TimerEvent("parse", "#00CC00",  true, 0),
       'V8.PreParseMicroSeconds':
diff --git a/tools/profviz/stdio.js b/tools/profviz/stdio.js
index 5a8311d..8ba12e3 100644
--- a/tools/profviz/stdio.js
+++ b/tools/profviz/stdio.js
@@ -30,10 +30,10 @@
 var range_start_override = undefined;
 var range_end_override = undefined;
 
-if (!processor.parse()) processor.printUsageAndExit();;
+if (!processor.parse()) processor.printUsageAndExit();
 var result = processor.result();
 var distortion = parseInt(result.distortion);
-if (isNaN(distortion)) processor.printUsageAndExit();;
+if (isNaN(distortion)) processor.printUsageAndExit();
 // Convert picoseconds to milliseconds.
 distortion_per_entry = distortion / 1000000;
 var rangelimits = result.range.split(",");
@@ -43,7 +43,7 @@
 if (!isNaN(range_end)) range_end_override = range_end;
 
 var kResX = 1600;
-var kResY = 700;
+var kResY = 600;
 function log_error(text) {
   print(text);
   quit(1);
diff --git a/tools/release/auto_roll.py b/tools/release/auto_roll.py
index c1a99e8..d1a3f48 100755
--- a/tools/release/auto_roll.py
+++ b/tools/release/auto_roll.py
@@ -20,7 +20,7 @@
 https://v8-roll.appspot.com/
 This only works with a Google account.
 
-CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_precise_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel""")
+CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel""")
 
 class Preparation(Step):
   MESSAGE = "Preparation."
diff --git a/tools/release/test_scripts.py b/tools/release/test_scripts.py
index ab92e89..a344376 100644
--- a/tools/release/test_scripts.py
+++ b/tools/release/test_scripts.py
@@ -1044,7 +1044,7 @@
 https://v8-roll.appspot.com/
 This only works with a Google account.
 
-CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_precise_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
+CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
 
 TBR=reviewer@chromium.org"""
 
diff --git a/tools/run-tests.py b/tools/run-tests.py
index f248dff..e94f599 100755
--- a/tools/run-tests.py
+++ b/tools/run-tests.py
@@ -67,6 +67,8 @@
   "bot_default": [
     "mjsunit",
     "cctest",
+    "debugger",
+    "inspector",
     "webkit",
     "fuzzer",
     "message",
@@ -78,6 +80,8 @@
   "default": [
     "mjsunit",
     "cctest",
+    "debugger",
+    "inspector",
     "fuzzer",
     "message",
     "preparser",
@@ -88,6 +92,8 @@
   "optimize_for_size": [
     "mjsunit",
     "cctest",
+    "debugger",
+    "inspector",
     "webkit",
     "intl",
   ],
@@ -255,6 +261,9 @@
   result.add_option("--download-data-only",
                     help="Deprecated",
                     default=False, action="store_true")
+  result.add_option("--enable-inspector",
+                    help="Indicates a build with inspector support",
+                    default=False, action="store_true")
   result.add_option("--extra-flags",
                     help="Additional flags to pass to each test command",
                     default="")
@@ -447,8 +456,13 @@
       print(">>> Latest GN build found is %s" % latest_config)
       options.outdir = os.path.join(DEFAULT_OUT_GN, latest_config)
 
-  build_config_path = os.path.join(
-      BASE_DIR, options.outdir, "v8_build_config.json")
+  if options.buildbot:
+    build_config_path = os.path.join(
+        BASE_DIR, options.outdir, options.mode, "v8_build_config.json")
+  else:
+    build_config_path = os.path.join(
+        BASE_DIR, options.outdir, "v8_build_config.json")
+
   if os.path.exists(build_config_path):
     try:
       with open(build_config_path) as f:
@@ -459,6 +473,10 @@
       return False
     options.auto_detect = True
 
+    # In auto-detect mode the outdir is always where we found the build config.
+    # This ensures that we'll also take the build products from there.
+    options.outdir = os.path.dirname(build_config_path)
+
     options.arch_and_mode = None
     options.arch = build_config["v8_target_cpu"]
     if options.arch == 'x86':
@@ -466,6 +484,7 @@
       options.arch = 'ia32'
     options.asan = build_config["is_asan"]
     options.dcheck_always_on = build_config["dcheck_always_on"]
+    options.enable_inspector = build_config["v8_enable_inspector"]
     options.mode = 'debug' if build_config["is_debug"] else 'release'
     options.msan = build_config["is_msan"]
     options.no_i18n = not build_config["v8_enable_i18n_support"]
@@ -592,6 +611,13 @@
   if options.no_i18n:
     TEST_MAP["bot_default"].remove("intl")
     TEST_MAP["default"].remove("intl")
+  if not options.enable_inspector:
+    TEST_MAP["default"].remove("inspector")
+    TEST_MAP["bot_default"].remove("inspector")
+    TEST_MAP["optimize_for_size"].remove("inspector")
+    TEST_MAP["default"].remove("debugger")
+    TEST_MAP["bot_default"].remove("debugger")
+    TEST_MAP["optimize_for_size"].remove("debugger")
   return True
 
 
@@ -702,15 +728,15 @@
 
   shell_dir = options.shell_dir
   if not shell_dir:
-    if options.buildbot:
+    if options.auto_detect:
+      # If an output dir with a build was passed, test directly in that
+      # directory.
+      shell_dir = os.path.join(BASE_DIR, options.outdir)
+    elif options.buildbot:
       # TODO(machenbach): Get rid of different output folder location on
       # buildbot. Currently this is capitalized Release and Debug.
       shell_dir = os.path.join(BASE_DIR, options.outdir, mode)
       mode = BuildbotToV8Mode(mode)
-    elif options.auto_detect:
-      # If an output dir with a build was passed, test directly in that
-      # directory.
-      shell_dir = os.path.join(BASE_DIR, options.outdir)
     else:
       shell_dir = os.path.join(
           BASE_DIR,
@@ -733,14 +759,8 @@
     # Predictable mode is slower.
     options.timeout *= 2
 
-  # TODO(machenbach): Remove temporary verbose output on windows after
-  # debugging driver-hung-up on XP.
-  verbose_output = (
-      options.verbose or
-      utils.IsWindows() and options.progress == "verbose"
-  )
   ctx = context.Context(arch, MODES[mode]["execution_mode"], shell_dir,
-                        mode_flags, verbose_output,
+                        mode_flags, options.verbose,
                         options.timeout,
                         options.isolates,
                         options.command_prefix,
@@ -851,7 +871,7 @@
 
   run_networked = not options.no_network
   if not run_networked:
-    if verbose_output:
+    if options.verbose:
       print("Network distribution disabled, running tests locally.")
   elif utils.GuessOS() != "linux":
     print("Network distribution is only supported on Linux, sorry!")
diff --git a/tools/sanitizers/sancov_merger.py b/tools/sanitizers/sancov_merger.py
index a4cfec1..867f8b4 100755
--- a/tools/sanitizers/sancov_merger.py
+++ b/tools/sanitizers/sancov_merger.py
@@ -7,7 +7,7 @@
 
 When merging test runner output, the sancov files are expected
 to be located in one directory with the file-name pattern:
-<executable name>.test.<id>.sancov
+<executable name>.test.<id>.<attempt>.sancov
 
 For each executable, this script writes a new file:
 <executable name>.result.sancov
@@ -48,7 +48,7 @@
 
 # Regexp to find sancov file as output by the v8 test runner. Also grabs the
 # executable name in group 1.
-SANCOV_FILE_RE = re.compile(r'^(.*)\.test\.\d+\.sancov$')
+SANCOV_FILE_RE = re.compile(r'^(.*)\.test\.\d+\.\d+\.sancov$')
 
 # Regexp to find sancov result files as returned from swarming.
 SANCOV_RESULTS_FILE_RE = re.compile(r'^.*\.result\.sancov$')
diff --git a/tools/sanitizers/sancov_merger_test.py b/tools/sanitizers/sancov_merger_test.py
index 93b89eb..899c716 100644
--- a/tools/sanitizers/sancov_merger_test.py
+++ b/tools/sanitizers/sancov_merger_test.py
@@ -11,19 +11,19 @@
 # executable name -> file list.
 FILE_MAP = {
   'd8': [
-    'd8.test.1.sancov',
-    'd8.test.2.sancov',
-    'd8.test.3.sancov',
-    'd8.test.4.sancov',
-    'd8.test.5.sancov',
-    'd8.test.6.sancov',
-    'd8.test.7.sancov',
+    'd8.test.1.1.sancov',
+    'd8.test.2.1.sancov',
+    'd8.test.3.1.sancov',
+    'd8.test.4.1.sancov',
+    'd8.test.5.1.sancov',
+    'd8.test.5.2.sancov',
+    'd8.test.6.1.sancov',
   ],
   'cctest': [
-    'cctest.test.1.sancov',
-    'cctest.test.2.sancov',
-    'cctest.test.3.sancov',
-    'cctest.test.4.sancov',
+    'cctest.test.1.1.sancov',
+    'cctest.test.2.1.sancov',
+    'cctest.test.3.1.sancov',
+    'cctest.test.4.1.sancov',
   ],
 }
 
@@ -32,42 +32,42 @@
 # (flag, path, executable name, intermediate result index, file list).
 EXPECTED_INPUTS_2 = [
   (False, '/some/path', 'cctest', 0, [
-    'cctest.test.1.sancov',
-    'cctest.test.2.sancov']),
+    'cctest.test.1.1.sancov',
+    'cctest.test.2.1.sancov']),
   (False, '/some/path', 'cctest', 1, [
-    'cctest.test.3.sancov',
-    'cctest.test.4.sancov']),
+    'cctest.test.3.1.sancov',
+    'cctest.test.4.1.sancov']),
   (False, '/some/path', 'd8', 0, [
-    'd8.test.1.sancov',
-    'd8.test.2.sancov',
-    'd8.test.3.sancov',
-    'd8.test.4.sancov']),
+    'd8.test.1.1.sancov',
+    'd8.test.2.1.sancov',
+    'd8.test.3.1.sancov',
+    'd8.test.4.1.sancov']),
   (False, '/some/path', 'd8', 1, [
-    'd8.test.5.sancov',
-    'd8.test.6.sancov',
-    'd8.test.7.sancov']),
+    'd8.test.5.1.sancov',
+    'd8.test.5.2.sancov',
+    'd8.test.6.1.sancov']),
 ]
 
 
 # The same for 4 cpus.
 EXPECTED_INPUTS_4 = [
   (True, '/some/path', 'cctest', 0, [
-    'cctest.test.1.sancov',
-    'cctest.test.2.sancov']),
+    'cctest.test.1.1.sancov',
+    'cctest.test.2.1.sancov']),
   (True, '/some/path', 'cctest', 1, [
-    'cctest.test.3.sancov',
-    'cctest.test.4.sancov']),
+    'cctest.test.3.1.sancov',
+    'cctest.test.4.1.sancov']),
   (True, '/some/path', 'd8', 0, [
-    'd8.test.1.sancov',
-    'd8.test.2.sancov']),
+    'd8.test.1.1.sancov',
+    'd8.test.2.1.sancov']),
   (True, '/some/path', 'd8', 1, [
-    'd8.test.3.sancov',
-    'd8.test.4.sancov']),
+    'd8.test.3.1.sancov',
+    'd8.test.4.1.sancov']),
   (True, '/some/path', 'd8', 2, [
-    'd8.test.5.sancov',
-    'd8.test.6.sancov']),
+    'd8.test.5.1.sancov',
+    'd8.test.5.2.sancov']),
   (True, '/some/path', 'd8', 3, [
-    'd8.test.7.sancov'])]
+    'd8.test.6.1.sancov'])]
 
 
 class MergerTests(unittest.TestCase):
diff --git a/tools/testrunner/local/execution.py b/tools/testrunner/local/execution.py
index f3d11a8..4cb9e45 100644
--- a/tools/testrunner/local/execution.py
+++ b/tools/testrunner/local/execution.py
@@ -149,8 +149,9 @@
 
     Rename files with PIDs to files with unique test IDs, because the number
     of tests might be higher than pid_max. E.g.:
-    d8.1234.sancov -> d8.test.1.sancov, where 1234 was the process' PID
-    and 1 is the test ID.
+    d8.1234.sancov -> d8.test.42.1.sancov, where 1234 was the process' PID,
+    42 is the test ID and 1 is the attempt (the same test might be rerun on
+    failures).
     """
     if context.sancov_dir and output.pid is not None:
       sancov_file = os.path.join(
@@ -160,7 +161,10 @@
       if os.path.exists(sancov_file):
         parts = sancov_file.split(".")
         new_sancov_file = ".".join(
-            parts[:-2] + ["test", str(self.test.id)] + parts[-1:])
+            parts[:-2] +
+            ["test", str(self.test.id), str(self.test.run)] +
+            parts[-1:]
+        )
         assert not os.path.exists(new_sancov_file)
         os.rename(sancov_file, new_sancov_file)
 
diff --git a/tools/testrunner/testrunner.isolate b/tools/testrunner/testrunner.isolate
index 1e8e9dc..533ef68 100644
--- a/tools/testrunner/testrunner.isolate
+++ b/tools/testrunner/testrunner.isolate
@@ -20,5 +20,12 @@
         ],
       },
     }],
+    ['is_gn==1', {
+      'variables': {
+        'files': [
+          '<(PRODUCT_DIR)/v8_build_config.json',
+        ],
+      },
+    }],
   ],
 }
diff --git a/tools/turbolizer/schedule-view.js b/tools/turbolizer/schedule-view.js
index 2cd49c9..ef47892 100644
--- a/tools/turbolizer/schedule-view.js
+++ b/tools/turbolizer/schedule-view.js
@@ -77,7 +77,7 @@
       // Parse opcode including []
       [
         [/^[A-Za-z0-9_]+(\[.*\])?$/, NODE_STYLE, -1],
-        [/^[A-Za-z0-9_]+(\[.*\])?/, NODE_STYLE, 3]
+        [/^[A-Za-z0-9_]+(\[(\[.*?\]|.)*?\])?/, NODE_STYLE, 3]
       ],
       // Parse optional parameters
       [
diff --git a/tools/update-wasm-fuzzers.sh b/tools/update-wasm-fuzzers.sh
index 3652829..a58681f 100755
--- a/tools/update-wasm-fuzzers.sh
+++ b/tools/update-wasm-fuzzers.sh
@@ -12,30 +12,30 @@
 rm -rf test/fuzzer/wasm
 rm -rf test/fuzzer/wasm_asmjs
 
-make x64.debug -j
+make x64.release -j
 
 mkdir -p test/fuzzer/wasm
 mkdir -p test/fuzzer/wasm_asmjs
 
 # asm.js
 ./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
-  --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \
+  --mode=release --no-presubmit --extra-flags="--dump-wasm-module \
   --dump-wasm-module-path=./test/fuzzer/wasm_asmjs/" mjsunit/wasm/asm*
 ./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
-  --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \
+  --mode=release --no-presubmit --extra-flags="--dump-wasm-module \
   --dump-wasm-module-path=./test/fuzzer/wasm_asmjs/" mjsunit/asm/*
 ./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
-  --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \
+  --mode=release --no-presubmit --extra-flags="--dump-wasm-module \
   --dump-wasm-module-path=./test/fuzzer/wasm_asmjs/" mjsunit/regress/asm/*
 # WASM
 ./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
-  --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \
+  --mode=release --no-presubmit --extra-flags="--dump-wasm-module \
   --dump-wasm-module-path=./test/fuzzer/wasm/" unittests
 ./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
-  --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \
+  --mode=release --no-presubmit --extra-flags="--dump-wasm-module \
   --dump-wasm-module-path=./test/fuzzer/wasm/" mjsunit/wasm/*
 ./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
-  --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \
+  --mode=release --no-presubmit --extra-flags="--dump-wasm-module \
   --dump-wasm-module-path=./test/fuzzer/wasm/" \
   $(cd test/; ls cctest/wasm/test-*.cc | \
   sed -es/wasm\\///g | sed -es/[.]cc/\\/\\*/g)
diff --git a/tools/whitespace.txt b/tools/whitespace.txt
index 2229d87..0f4384f 100644
--- a/tools/whitespace.txt
+++ b/tools/whitespace.txt
@@ -6,4 +6,4 @@
 "I'm so deoptimized today!"
 The doubles heard this and started to unbox.
 The Smi looked at them when a crazy v8-autoroll account showed up......
-The autoroller bought a round of Himbeerbrause. Suddenly ...
+The autoroller bought a round of Himbeerbrause. Suddenly .......