Version 3.22.20

Made Array.prototype.pop throw if the last element is not configurable.

Fixed HObjectAccess for loads from migrating prototypes. (Chromium issue 305309)

Enabled preaging of code objects when --optimize-for-size. (Chromium issue 280984)

Exposed v8::Function::GetDisplayName to public API. (Chromium issue 17356)

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@17354 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index d5b7f8d..ab3b2d9 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,19 @@
+2013-10-24: Version 3.22.20
+
+        Made Array.prototype.pop throw if the last element is not configurable.
+
+        Fixed HObjectAccess for loads from migrating prototypes.
+        (Chromium issue 305309)
+
+        Enabled preaging of code objects when --optimize-for-size.
+        (Chromium issue 280984)
+
+        Exposed v8::Function::GetDisplayName to public API.
+        (Chromium issue 17356)
+
+        Performance and stability improvements on all platforms.
+
+
 2013-10-23: Version 3.22.19
 
         Fix materialization of captured objects with field tracking.
diff --git a/include/v8.h b/include/v8.h
index 77684bf..44a74ed 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -2479,6 +2479,12 @@
   Handle<Value> GetInferredName() const;
 
   /**
+   * User-defined name assigned to the "displayName" property of this function.
+   * Used to facilitate debugging and profiling of JavaScript code.
+   */
+  Handle<Value> GetDisplayName() const;
+
+  /**
    * Returns zero based line number of function body and
    * kLineOffsetNotFound if no information available.
    */
diff --git a/src/api.cc b/src/api.cc
index 8e07b1a..a768824 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -4110,6 +4110,29 @@
 }
 
 
+Handle<Value> Function::GetDisplayName() const {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  ON_BAILOUT(isolate, "v8::Function::GetDisplayName()",
+             return ToApiHandle<Primitive>(
+                isolate->factory()->undefined_value()));
+  ENTER_V8(isolate);
+  i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+  i::Handle<i::String> property_name =
+      isolate->factory()->InternalizeOneByteString(
+          STATIC_ASCII_VECTOR("displayName"));
+  i::LookupResult lookup(isolate);
+  func->LookupRealNamedProperty(*property_name, &lookup);
+  if (lookup.IsFound()) {
+    i::Object* value = lookup.GetLazyValue();
+    if (value && value->IsString()) {
+      i::String* name = i::String::cast(value);
+      if (name->length() > 0) return Utils::ToLocal(i::Handle<i::String>(name));
+    }
+  }
+  return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
+}
+
+
 ScriptOrigin Function::GetScriptOrigin() const {
   i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
   if (func->shared()->script()->IsScript()) {
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index a1d1e1b..e3b39f4 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -208,6 +208,13 @@
 
 static const int kNoCodeAgeSequenceLength = 3;
 
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+  UNREACHABLE();  // This should never be reached on Arm.
+  return Handle<Object>();
+}
+
+
 Code* RelocInfo::code_age_stub() {
   ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
   return Code::GetCodeFromTargetAddress(
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 13c0e44..60f5290 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -826,6 +826,39 @@
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
 
 
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+  // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+  // that make_code_young doesn't do any garbage collection which allows us to
+  // save/restore the registers without worrying about which of them contain
+  // pointers.
+
+  // The following registers must be saved and restored when calling through to
+  // the runtime:
+  //   r0 - contains return address (beginning of patch sequence)
+  //   r1 - isolate
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+  __ PrepareCallCFunction(1, 0, r2);
+  __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ CallCFunction(ExternalReference::get_mark_code_as_executed_function(
+        masm->isolate()), 2);
+  __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+
+  // Perform prologue operations usually performed by the young code stub.
+  __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+  __ add(fp, sp, Operand(2 * kPointerSize));
+
+  // Jump to point after the code-age stub.
+  __ add(r0, r0, Operand(kNoCodeAgeSequenceLength * Assembler::kInstrSize));
+  __ mov(pc, r0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+  GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
 void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 3becc96..34b8921 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -163,16 +163,7 @@
   FrameScope frame_scope(masm_, StackFrame::MANUAL);
 
   info->set_prologue_offset(masm_->pc_offset());
-  {
-    PredictableCodeSizeScope predictible_code_size_scope(
-        masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
-    // The following three instructions must remain together and unmodified
-    // for code aging to work properly.
-    __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
-    __ nop(ip.code());
-    // Adjust FP to point to saved FP.
-    __ add(fp, sp, Operand(2 * kPointerSize));
-  }
+  __ Prologue(BUILD_FUNCTION_FRAME);
   info->AddNoFrameRange(0, masm_->pc_offset());
 
   { Comment cmnt(masm_, "[ Allocate locals");
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 60e21b7..fc7d0fd 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -133,21 +133,7 @@
 
   info()->set_prologue_offset(masm_->pc_offset());
   if (NeedsEagerFrame()) {
-    if (info()->IsStub()) {
-      __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
-      __ Push(Smi::FromInt(StackFrame::STUB));
-      // Adjust FP to point to saved FP.
-      __ add(fp, sp, Operand(2 * kPointerSize));
-    } else {
-      PredictableCodeSizeScope predictible_code_size_scope(
-          masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
-      // The following three instructions must remain together and unmodified
-      // for code aging to work properly.
-      __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
-      __ nop(ip.code());
-      // Adjust FP to point to saved FP.
-      __ add(fp, sp, Operand(2 * kPointerSize));
-    }
+    __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
     frame_is_built_ = true;
     info_->AddNoFrameRange(0, masm_->pc_offset());
   }
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 7ca61d2..a149dee 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -916,6 +916,33 @@
 }
 
 
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+  if (frame_mode == BUILD_STUB_FRAME) {
+    stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+    Push(Smi::FromInt(StackFrame::STUB));
+    // Adjust FP to point to saved FP.
+    add(fp, sp, Operand(2 * kPointerSize));
+  } else {
+    PredictableCodeSizeScope predictible_code_size_scope(
+        this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
+    // The following three instructions must remain together and unmodified
+    // for code aging to work properly.
+    if (FLAG_optimize_for_size && FLAG_age_code) {
+      // Pre-age the code.
+      Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+      add(r0, pc, Operand(-8));
+      ldr(pc, MemOperand(pc, -4));
+      dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
+    } else {
+      stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+      nop(ip.code());
+      // Adjust FP to point to saved FP.
+      add(fp, sp, Operand(2 * kPointerSize));
+    }
+  }
+}
+
+
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   // r0-r3: preserved
   stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index c709e5c..7cf5d9a 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -539,6 +539,8 @@
                          LowDwVfpRegister double_scratch1,
                          Label* not_int32);
 
+  // Generates function and stub prologue code.
+  void Prologue(PrologueFrameMode frame_mode);
 
   // Enter exit frame.
   // stack_space - extra stack space, used for alignment before call to C.
diff --git a/src/array.js b/src/array.js
index 2649798..e98d7f5 100644
--- a/src/array.js
+++ b/src/array.js
@@ -430,7 +430,7 @@
 
   n--;
   var value = this[n];
-  delete this[n];
+  Delete(this, ToName(n), true);
   this.length = n;
   return value;
 }
diff --git a/src/assembler.cc b/src/assembler.cc
index 0a049a4..9ed4360 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -1088,6 +1088,13 @@
 }
 
 
+ExternalReference ExternalReference::get_mark_code_as_executed_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(
+      isolate, FUNCTION_ADDR(Code::MarkCodeAsExecuted)));
+}
+
+
 ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
   return ExternalReference(isolate->date_cache()->stamp_address());
 }
diff --git a/src/assembler.h b/src/assembler.h
index e34b13a..f0b7fed 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -401,6 +401,7 @@
   INLINE(Handle<Cell> target_cell_handle());
   INLINE(void set_target_cell(Cell* cell,
                               WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
+  INLINE(Handle<Object> code_age_stub_handle(Assembler* origin));
   INLINE(Code* code_age_stub());
   INLINE(void set_code_age_stub(Code* stub));
 
@@ -727,6 +728,7 @@
   static ExternalReference date_cache_stamp(Isolate* isolate);
 
   static ExternalReference get_make_code_young_function(Isolate* isolate);
+  static ExternalReference get_mark_code_as_executed_function(Isolate* isolate);
 
   // New heap objects tracking support.
   static ExternalReference record_object_allocation_function(Isolate* isolate);
diff --git a/src/ast.cc b/src/ast.cc
index 0d667cc..481414e 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -139,6 +139,7 @@
       assignment_id_(GetNextId(isolate)),
       is_monomorphic_(false),
       is_uninitialized_(false),
+      is_pre_monomorphic_(false),
       store_mode_(STANDARD_STORE) { }
 
 
@@ -426,7 +427,9 @@
   is_uninitialized_ = oracle->LoadIsUninitialized(this);
   if (is_uninitialized_) return;
 
+  is_pre_monomorphic_ = oracle->LoadIsPreMonomorphic(this);
   is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
+  ASSERT(!is_pre_monomorphic_ || !is_monomorphic_);
   receiver_types_.Clear();
   if (key()->IsPropertyName()) {
     FunctionPrototypeStub proto_stub(Code::LOAD_IC);
@@ -456,7 +459,10 @@
   TypeFeedbackId id = AssignmentFeedbackId();
   is_uninitialized_ = oracle->StoreIsUninitialized(id);
   if (is_uninitialized_) return;
+
+  is_pre_monomorphic_ = oracle->StoreIsPreMonomorphic(id);
   is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
+  ASSERT(!is_pre_monomorphic_ || !is_monomorphic_);
   receiver_types_.Clear();
   if (prop->key()->IsPropertyName()) {
     Literal* lit_key = prop->key()->AsLiteral();
diff --git a/src/ast.h b/src/ast.h
index 4b2010f..b4f7348 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1669,6 +1669,10 @@
     return STANDARD_STORE;
   }
   bool IsUninitialized() { return is_uninitialized_; }
+  bool IsPreMonomorphic() { return is_pre_monomorphic_; }
+  bool HasNoTypeInformation() {
+    return is_uninitialized_ || is_pre_monomorphic_;
+  }
   TypeFeedbackId PropertyFeedbackId() { return reuse(id()); }
 
  protected:
@@ -1681,6 +1685,7 @@
         key_(key),
         load_id_(GetNextId(isolate)),
         is_monomorphic_(false),
+        is_pre_monomorphic_(false),
         is_uninitialized_(false),
         is_string_access_(false),
         is_function_prototype_(false) { }
@@ -1692,6 +1697,7 @@
 
   SmallMapList receiver_types_;
   bool is_monomorphic_ : 1;
+  bool is_pre_monomorphic_ : 1;
   bool is_uninitialized_ : 1;
   bool is_string_access_ : 1;
   bool is_function_prototype_ : 1;
@@ -2098,6 +2104,10 @@
   void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
   virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
   bool IsUninitialized() { return is_uninitialized_; }
+  bool IsPreMonomorphic() { return is_pre_monomorphic_; }
+  bool HasNoTypeInformation() {
+    return is_uninitialized_ || is_pre_monomorphic_;
+  }
   virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
     return &receiver_types_;
   }
@@ -2130,6 +2140,7 @@
 
   bool is_monomorphic_ : 1;
   bool is_uninitialized_ : 1;
+  bool is_pre_monomorphic_ : 1;
   KeyedAccessStoreMode store_mode_ : 5;  // Windows treats as signed,
                                          // must have extra bit.
   SmallMapList receiver_types_;
diff --git a/src/builtins.h b/src/builtins.h
index 7364878..c1c8a5d 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -204,6 +204,11 @@
                                     Code::kNoExtraICState)              \
   V(StackCheck,                     BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
+                                                                        \
+  V(MarkCodeAsExecutedOnce,         BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(MarkCodeAsExecutedTwice,        BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
   CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
 
 // Define list of builtin handlers implemented in assembly.
@@ -413,6 +418,9 @@
   CODE_AGE_LIST(DECLARE_CODE_AGE_BUILTIN_GENERATOR)
 #undef DECLARE_CODE_AGE_BUILTIN_GENERATOR
 
+  static void Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm);
+  static void Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm);
+
   static void InitBuiltinFunctionTable();
 
   bool initialized_;
diff --git a/src/codegen.cc b/src/codegen.cc
index 7b2f81b..573ddc6 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -113,12 +113,12 @@
   masm->GetCode(&desc);
   Handle<Code> code =
       isolate->factory()->NewCode(desc, flags, masm->CodeObject(),
-                                  false, is_crankshafted);
+                                  false, is_crankshafted,
+                                  info->prologue_offset());
   isolate->counters()->total_compiled_code_size()->Increment(
       code->instruction_size());
   isolate->heap()->IncrementCodeGeneratedBytes(is_crankshafted,
       code->instruction_size());
-  code->set_prologue_offset(info->prologue_offset());
   return code;
 }
 
diff --git a/src/compiler.cc b/src/compiler.cc
index 39524a9..5e4d17b 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -112,7 +112,7 @@
   zone_ = zone;
   deferred_handles_ = NULL;
   code_stub_ = NULL;
-  prologue_offset_ = kPrologueOffsetNotSet;
+  prologue_offset_ = Code::kPrologueOffsetNotSet;
   opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count();
   no_frame_ranges_ = isolate->cpu_profiler()->is_profiling()
                    ? new List<OffsetRange>(2) : NULL;
@@ -123,7 +123,7 @@
     mode_ = STUB;
     return;
   }
-  mode_ = isolate->use_crankshaft() ? mode : NONOPT;
+  mode_ = mode;
   abort_due_to_dependency_ = false;
   if (script_->type()->value() == Script::TYPE_NATIVE) {
     MarkAsNative();
diff --git a/src/compiler.h b/src/compiler.h
index d166c2a..2d9e52a 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -35,8 +35,6 @@
 namespace v8 {
 namespace internal {
 
-static const int kPrologueOffsetNotSet = -1;
-
 class ScriptDataImpl;
 class HydrogenCodeStub;
 
@@ -269,12 +267,12 @@
   void set_bailout_reason(BailoutReason reason) { bailout_reason_ = reason; }
 
   int prologue_offset() const {
-    ASSERT_NE(kPrologueOffsetNotSet, prologue_offset_);
+    ASSERT_NE(Code::kPrologueOffsetNotSet, prologue_offset_);
     return prologue_offset_;
   }
 
   void set_prologue_offset(int prologue_offset) {
-    ASSERT_EQ(kPrologueOffsetNotSet, prologue_offset_);
+    ASSERT_EQ(Code::kPrologueOffsetNotSet, prologue_offset_);
     prologue_offset_ = prologue_offset;
   }
 
diff --git a/src/debug.cc b/src/debug.cc
index c820b97..35970e5 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -2106,6 +2106,7 @@
 
           if (!shared->allows_lazy_compilation()) continue;
           if (!shared->script()->IsScript()) continue;
+          if (function->IsBuiltin()) continue;
           if (shared->code()->gc_metadata() == active_code_marker) continue;
 
           Code::Kind kind = function->code()->kind();
diff --git a/src/factory.cc b/src/factory.cc
index 4927cac..1dd246f 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -1015,10 +1015,12 @@
                               Code::Flags flags,
                               Handle<Object> self_ref,
                               bool immovable,
-                              bool crankshafted) {
+                              bool crankshafted,
+                              int prologue_offset) {
   CALL_HEAP_FUNCTION(isolate(),
                      isolate()->heap()->CreateCode(
-                         desc, flags, self_ref, immovable, crankshafted),
+                         desc, flags, self_ref, immovable, crankshafted,
+                         prologue_offset),
                      Code);
 }
 
diff --git a/src/factory.h b/src/factory.h
index 68980c5..ee25bf2 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -380,7 +380,8 @@
                        Code::Flags flags,
                        Handle<Object> self_reference,
                        bool immovable = false,
-                       bool crankshafted = false);
+                       bool crankshafted = false,
+                       int prologue_offset = Code::kPrologueOffsetNotSet);
 
   Handle<Code> CopyCode(Handle<Code> code);
 
diff --git a/src/frames.h b/src/frames.h
index 2bbbd98..d2dbfe2 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -922,6 +922,13 @@
 };
 
 
+// Used specify the type of prologue to generate.
+enum PrologueFrameMode {
+  BUILD_FUNCTION_FRAME,
+  BUILD_STUB_FRAME
+};
+
+
 // Reads all frames on the current stack and copies them into the current
 // zone memory.
 Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone);
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index f87cf88..fec9ee5 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -345,8 +345,6 @@
   code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
   code->set_handler_table(*cgen.handler_table());
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  code->set_has_debug_break_slots(
-      info->isolate()->debugger()->IsDebuggerActive());
   code->set_compiled_optimizable(info->IsOptimizable());
 #endif  // ENABLE_DEBUGGER_SUPPORT
   code->set_allow_osr_at_loop_nesting_level(0);
diff --git a/src/heap.cc b/src/heap.cc
index fd5504f..1c55c60 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -4140,7 +4140,8 @@
                               Code::Flags flags,
                               Handle<Object> self_reference,
                               bool immovable,
-                              bool crankshafted) {
+                              bool crankshafted,
+                              int prologue_offset) {
   // Allocate ByteArray before the Code object, so that we do not risk
   // leaving uninitialized Code object (and breaking the heap).
   ByteArray* reloc_info;
@@ -4190,10 +4191,18 @@
   code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
   code->set_gc_metadata(Smi::FromInt(0));
   code->set_ic_age(global_ic_age_);
-  code->set_prologue_offset(kPrologueOffsetNotSet);
+  code->set_prologue_offset(prologue_offset);
   if (code->kind() == Code::OPTIMIZED_FUNCTION) {
     code->set_marked_for_deoptimization(false);
   }
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  if (code->kind() == Code::FUNCTION) {
+    code->set_has_debug_break_slots(
+        isolate_->debugger()->IsDebuggerActive());
+  }
+#endif
+
   // Allow self references to created code object by patching the handle to
   // point to the newly allocated Code object.
   if (!self_reference.is_null()) {
@@ -5520,9 +5529,10 @@
 #ifndef V8_HOST_ARCH_64_BIT
   size += kPointerSize;
 #endif
+  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
 
   HeapObject* object;
-  { MaybeObject* maybe_object = old_pointer_space_->AllocateRaw(size);
+  { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_POINTER_SPACE);
     if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
   }
   object = EnsureDoubleAligned(this, object, size);
diff --git a/src/heap.h b/src/heap.h
index 0bd2c40..92e8089 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1125,11 +1125,13 @@
   // self_reference. This allows generated code to reference its own Code
   // object by containing this pointer.
   // Please note this function does not perform a garbage collection.
-  MUST_USE_RESULT MaybeObject* CreateCode(const CodeDesc& desc,
-                                          Code::Flags flags,
-                                          Handle<Object> self_reference,
-                                          bool immovable = false,
-                                          bool crankshafted = false);
+  MUST_USE_RESULT MaybeObject* CreateCode(
+      const CodeDesc& desc,
+      Code::Flags flags,
+      Handle<Object> self_reference,
+      bool immovable = false,
+      bool crankshafted = false,
+      int prologue_offset = Code::kPrologueOffsetNotSet);
 
   MUST_USE_RESULT MaybeObject* CopyCode(Code* code);
 
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index c8b149b..7df1aae 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -2777,19 +2777,15 @@
 
 class HCheckInstanceType V8_FINAL : public HUnaryOperation {
  public:
-  static HCheckInstanceType* NewIsSpecObject(HValue* value, Zone* zone) {
-    return new(zone) HCheckInstanceType(value, IS_SPEC_OBJECT);
-  }
-  static HCheckInstanceType* NewIsJSArray(HValue* value, Zone* zone) {
-    return new(zone) HCheckInstanceType(value, IS_JS_ARRAY);
-  }
-  static HCheckInstanceType* NewIsString(HValue* value, Zone* zone) {
-    return new(zone) HCheckInstanceType(value, IS_STRING);
-  }
-  static HCheckInstanceType* NewIsInternalizedString(
-      HValue* value, Zone* zone) {
-    return new(zone) HCheckInstanceType(value, IS_INTERNALIZED_STRING);
-  }
+  enum Check {
+    IS_SPEC_OBJECT,
+    IS_JS_ARRAY,
+    IS_STRING,
+    IS_INTERNALIZED_STRING,
+    LAST_INTERVAL_CHECK = IS_JS_ARRAY
+  };
+
+  DECLARE_INSTRUCTION_FACTORY_P2(HCheckInstanceType, HValue*, Check);
 
   virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
 
@@ -2817,14 +2813,6 @@
   virtual int RedefinedOperandIndex() { return 0; }
 
  private:
-  enum Check {
-    IS_SPEC_OBJECT,
-    IS_JS_ARRAY,
-    IS_STRING,
-    IS_INTERNALIZED_STRING,
-    LAST_INTERVAL_CHECK = IS_JS_ARRAY
-  };
-
   const char* GetCheckName();
 
   HCheckInstanceType(HValue* value, Check check)
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 9d990bf..77dab3d 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -3215,12 +3215,6 @@
 }
 
 
-void HGraphBuilder::PushAndAdd(HInstruction* instr) {
-  Push(instr);
-  AddInstruction(instr);
-}
-
-
 template <class Instruction>
 HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
   int count = call->argument_count();
@@ -3898,9 +3892,7 @@
     set_current_block(body_exit);
 
     HValue* current_index = Pop();
-    HInstruction* new_index = New<HAdd>(current_index,
-                                        graph()->GetConstant1());
-    PushAndAdd(new_index);
+    Push(Add<HAdd>(current_index, graph()->GetConstant1()));
     body_exit = current_block();
   }
 
@@ -4233,7 +4225,11 @@
   Handle<AccessorPair> accessors;
   if (LookupAccessorPair(map, name, &accessors, holder) &&
       accessors->setter()->IsJSFunction()) {
-    *setter = Handle<JSFunction>(JSFunction::cast(accessors->setter()));
+    Handle<JSFunction> func(JSFunction::cast(accessors->setter()));
+    CallOptimization call_optimization(func);
+    // TODO(dcarney): temporary hack unless crankshaft can handle api calls.
+    if (call_optimization.is_simple_api_call()) return false;
+    *setter = func;
     return true;
   }
   return false;
@@ -4746,7 +4742,11 @@
     if (!callback->IsAccessorPair()) return false;
     Object* getter = Handle<AccessorPair>::cast(callback)->getter();
     if (!getter->IsJSFunction()) return false;
-    accessor_ = handle(JSFunction::cast(getter));
+    Handle<JSFunction> accessor = handle(JSFunction::cast(getter));
+    CallOptimization call_optimization(accessor);
+    // TODO(dcarney): temporary hack unless crankshaft can handle api calls.
+    if (call_optimization.is_simple_api_call()) return false;
+    accessor_ = accessor;
   } else if (lookup_.IsConstant()) {
     constant_ = handle(lookup_.GetConstantFromMap(*map), isolate());
   }
@@ -4759,6 +4759,9 @@
   Handle<Map> map = map_;
   while (map->prototype()->IsJSObject()) {
     holder_ = handle(JSObject::cast(map->prototype()));
+    if (holder_->map()->is_deprecated()) {
+      JSObject::TryMigrateInstance(holder_);
+    }
     map = Handle<Map>(holder_->map());
     if (!CanInlinePropertyAccess(*map)) {
       lookup_.NotFound();
@@ -5319,7 +5322,8 @@
     HValue* left = Pop();
 
     HInstruction* instr = BuildBinaryOperation(operation, left, right);
-    PushAndAdd(instr);
+    AddInstruction(instr);
+    Push(instr);
     if (instr->HasObservableSideEffects()) {
       Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE);
     }
@@ -5798,13 +5802,14 @@
         expr->GetStoreMode(), has_side_effects);
   } else {
     if (is_store) {
-      if (expr->IsAssignment() && expr->AsAssignment()->IsUninitialized()) {
+      if (expr->IsAssignment() &&
+          expr->AsAssignment()->HasNoTypeInformation()) {
         Add<HDeoptimize>("Insufficient type feedback for keyed store",
                          Deoptimizer::SOFT);
       }
       instr = BuildStoreKeyedGeneric(obj, key, val);
     } else {
-      if (expr->AsProperty()->IsUninitialized()) {
+      if (expr->AsProperty()->HasNoTypeInformation()) {
         Add<HDeoptimize>("Insufficient type feedback for keyed load",
                          Deoptimizer::SOFT);
       }
@@ -5958,7 +5963,7 @@
       HInstruction* checked_object;
       if (AreStringTypes(types)) {
         checked_object =
-            AddInstruction(HCheckInstanceType::NewIsString(object, zone()));
+            Add<HCheckInstanceType>(object, HCheckInstanceType::IS_STRING);
       } else {
         checked_object = Add<HCheckMaps>(object, types);
       }
@@ -6953,9 +6958,9 @@
     }
 
     Drop(arguments_count - 1);
-    PushAndAdd(New<HPushArgument>(Pop()));
+    Push(Add<HPushArgument>(Pop()));
     for (int i = 1; i < arguments_count; i++) {
-      PushAndAdd(New<HPushArgument>(arguments_values->at(i)));
+      Push(Add<HPushArgument>(arguments_values->at(i)));
     }
 
     HInvokeFunction* call = New<HInvokeFunction>(function,
@@ -7068,8 +7073,8 @@
       if (known_global_function) {
         // Push the global object instead of the global receiver because
         // code generated by the full code generator expects it.
-        HGlobalObject* global_object = New<HGlobalObject>();
-        PushAndAdd(global_object);
+        HGlobalObject* global_object = Add<HGlobalObject>();
+        Push(global_object);
         CHECK_ALIVE(VisitExpressions(expr->arguments()));
 
         CHECK_ALIVE(VisitForValue(expr->expression()));
@@ -7108,7 +7113,7 @@
         }
       } else {
         HGlobalObject* receiver = Add<HGlobalObject>();
-        PushAndAdd(New<HPushArgument>(receiver));
+        Push(Add<HPushArgument>(receiver));
         CHECK_ALIVE(VisitArgumentList(expr->arguments()));
 
         call = New<HCallGlobal>(var->name(), argument_count);
@@ -7121,8 +7126,8 @@
       CHECK_ALIVE(VisitForValue(expr->expression()));
       HValue* function = Top();
       HGlobalObject* global = Add<HGlobalObject>();
-      HGlobalReceiver* receiver = New<HGlobalReceiver>(global);
-      PushAndAdd(receiver);
+      HGlobalReceiver* receiver = Add<HGlobalReceiver>(global);
+      Push(receiver);
       CHECK_ALIVE(VisitExpressions(expr->arguments()));
       Add<HCheckValue>(function, expr->target());
 
@@ -7148,7 +7153,7 @@
       HValue* function = Top();
       HGlobalObject* global_object = Add<HGlobalObject>();
       HGlobalReceiver* receiver = Add<HGlobalReceiver>(global_object);
-      PushAndAdd(New<HPushArgument>(receiver));
+      Push(Add<HPushArgument>(receiver));
       CHECK_ALIVE(VisitArgumentList(expr->arguments()));
 
       call = New<HCallFunction>(function, argument_count);
@@ -7640,7 +7645,7 @@
   }
   BuildCheckHeapObject(string);
   HValue* checkstring =
-      AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
+      Add<HCheckInstanceType>(string, HCheckInstanceType::IS_STRING);
   HInstruction* length = BuildLoadStringLength(string, checkstring);
   AddInstruction(length);
   HInstruction* checked_index = Add<HBoundsCheck>(index, length);
@@ -8243,9 +8248,9 @@
           return ast_context()->ReturnControl(result, expr->id());
         } else {
           BuildCheckHeapObject(left);
-          AddInstruction(HCheckInstanceType::NewIsSpecObject(left, zone()));
+          Add<HCheckInstanceType>(left, HCheckInstanceType::IS_SPEC_OBJECT);
           BuildCheckHeapObject(right);
-          AddInstruction(HCheckInstanceType::NewIsSpecObject(right, zone()));
+          Add<HCheckInstanceType>(right, HCheckInstanceType::IS_SPEC_OBJECT);
           HCompareObjectEqAndBranch* result =
               New<HCompareObjectEqAndBranch>(left, right);
           return ast_context()->ReturnControl(result, expr->id());
@@ -8257,17 +8262,17 @@
   } else if (combined_type->Is(Type::InternalizedString()) &&
              Token::IsEqualityOp(op)) {
     BuildCheckHeapObject(left);
-    AddInstruction(HCheckInstanceType::NewIsInternalizedString(left, zone()));
+    Add<HCheckInstanceType>(left, HCheckInstanceType::IS_INTERNALIZED_STRING);
     BuildCheckHeapObject(right);
-    AddInstruction(HCheckInstanceType::NewIsInternalizedString(right, zone()));
+    Add<HCheckInstanceType>(right, HCheckInstanceType::IS_INTERNALIZED_STRING);
     HCompareObjectEqAndBranch* result =
         New<HCompareObjectEqAndBranch>(left, right);
     return ast_context()->ReturnControl(result, expr->id());
   } else if (combined_type->Is(Type::String())) {
     BuildCheckHeapObject(left);
-    AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
+    Add<HCheckInstanceType>(left, HCheckInstanceType::IS_STRING);
     BuildCheckHeapObject(right);
-    AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
+    Add<HCheckInstanceType>(right, HCheckInstanceType::IS_STRING);
     HStringCompareAndBranch* result =
         New<HStringCompareAndBranch>(left, right, op);
     return ast_context()->ReturnControl(result, expr->id());
diff --git a/src/hydrogen.h b/src/hydrogen.h
index cb3688c..b5046bd 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -1309,8 +1309,6 @@
   HValue* EnforceNumberType(HValue* number, Handle<Type> expected);
   HValue* TruncateToNumber(HValue* value, Handle<Type>* expected);
 
-  void PushAndAdd(HInstruction* instr);
-
   void FinishExitWithHardDeoptimization(const char* reason,
                                         HBasicBlock* continuation);
 
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 5a35b20..05cc23a 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -47,6 +47,7 @@
 
 
 static const byte kCallOpcode = 0xE8;
+static const int kNoCodeAgeSequenceLength = 5;
 
 
 // The modes possibly affected by apply must be in kApplyMask.
@@ -190,6 +191,13 @@
 }
 
 
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  ASSERT(*pc_ == kCallOpcode);
+  return Memory::Object_Handle_at(pc_ + 1);
+}
+
+
 Code* RelocInfo::code_age_stub() {
   ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
   ASSERT(*pc_ == kCallOpcode);
@@ -379,7 +387,8 @@
 void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
   if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
     RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
-  } else if (!RelocInfo::IsNone(rmode)) {
+  } else if (!RelocInfo::IsNone(rmode)
+      && rmode != RelocInfo::CODE_AGE_SEQUENCE) {
     RecordRelocInfo(rmode);
   }
   emit(x);
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 8cf73c3..cfeaca6 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -1414,7 +1414,8 @@
                      TypeFeedbackId ast_id) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  ASSERT(RelocInfo::IsCodeTarget(rmode)
+      || rmode == RelocInfo::CODE_AGE_SEQUENCE);
   EMIT(0xE8);
   emit(code, rmode, ast_id);
 }
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 24a2e6e..e5e6ec5 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -563,6 +563,44 @@
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
 
 
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+  // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+  // that make_code_young doesn't do any garbage collection which allows us to
+  // save/restore the registers without worrying about which of them contain
+  // pointers.
+  __ pushad();
+  __ mov(eax, Operand(esp, 8 * kPointerSize));
+  __ sub(eax, Immediate(Assembler::kCallInstructionLength));
+  {  // NOLINT
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PrepareCallCFunction(2, ebx);
+    __ mov(Operand(esp, 1 * kPointerSize),
+           Immediate(ExternalReference::isolate_address(masm->isolate())));
+    __ mov(Operand(esp, 0), eax);
+    __ CallCFunction(
+        ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+        2);
+  }
+  __ popad();
+
+  // Perform prologue operations usually performed by the young code stub.
+  __ pop(eax);   // Pop return address into scratch register.
+  __ push(ebp);  // Caller's frame pointer.
+  __ mov(ebp, esp);
+  __ push(esi);  // Callee's context.
+  __ push(edi);  // Callee's JS Function.
+  __ push(eax);  // Push return address after frame prologue.
+
+  // Jump to point after the code-age stub.
+  __ ret(0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+  GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
 void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
   // Enter an internal frame.
   {
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index da88146..226b6b0 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -1117,7 +1117,6 @@
 
 #undef __
 
-static const int kNoCodeAgeSequenceLength = 5;
 
 static byte* GetNoCodeAgeSequence(uint32_t* length) {
   static bool initialized = false;
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 725a6f8..704fb4e 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -158,10 +158,7 @@
   FrameScope frame_scope(masm_, StackFrame::MANUAL);
 
   info->set_prologue_offset(masm_->pc_offset());
-  __ push(ebp);  // Caller's frame pointer.
-  __ mov(ebp, esp);
-  __ push(esi);  // Callee's context.
-  __ push(edi);  // Callee's JS Function.
+  __ Prologue(BUILD_FUNCTION_FRAME);
   info->AddNoFrameRange(0, masm_->pc_offset());
 
   { Comment cmnt(masm_, "[ Allocate locals");
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index c231fe1..ebeaaa8 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -188,15 +188,8 @@
   if (NeedsEagerFrame()) {
     ASSERT(!frame_is_built_);
     frame_is_built_ = true;
-    __ push(ebp);  // Caller's frame pointer.
-    __ mov(ebp, esp);
+    __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
     info()->AddNoFrameRange(0, masm_->pc_offset());
-    __ push(esi);  // Callee's context.
-    if (info()->IsStub()) {
-      __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
-    } else {
-      __ push(edi);  // Callee's JS function.
-    }
   }
 
   if (info()->IsOptimizing() &&
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 1c13e83..0800823 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -561,29 +561,34 @@
 }
 
 
+static inline bool CanBeImmediateConstant(HValue* value) {
+  return value->IsConstant() && HConstant::cast(value)->NotInNewSpace();
+}
+
+
 LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
-  return value->IsConstant()
+  return CanBeImmediateConstant(value)
       ? chunk_->DefineConstantOperand(HConstant::cast(value))
       : Use(value);
 }
 
 
 LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
-  return value->IsConstant()
+  return CanBeImmediateConstant(value)
       ? chunk_->DefineConstantOperand(HConstant::cast(value))
       : UseAtStart(value);
 }
 
 
 LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
-  return value->IsConstant()
+  return CanBeImmediateConstant(value)
       ? chunk_->DefineConstantOperand(HConstant::cast(value))
       : UseRegister(value);
 }
 
 
 LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
-  return value->IsConstant()
+  return CanBeImmediateConstant(value)
       ? chunk_->DefineConstantOperand(HConstant::cast(value))
       : UseRegisterAtStart(value);
 }
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 805861e..ed69fd0 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1013,6 +1013,30 @@
 }
 
 
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+  if (frame_mode == BUILD_STUB_FRAME) {
+    push(ebp);  // Caller's frame pointer.
+    mov(ebp, esp);
+    push(esi);  // Callee's context.
+    push(Immediate(Smi::FromInt(StackFrame::STUB)));
+  } else {
+    PredictableCodeSizeScope predictible_code_size_scope(this,
+        kNoCodeAgeSequenceLength);
+    if (FLAG_optimize_for_size && FLAG_age_code) {
+        // Pre-age the code.
+      call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+          RelocInfo::CODE_AGE_SEQUENCE);
+      Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
+    } else {
+      push(ebp);  // Caller's frame pointer.
+      mov(ebp, esp);
+      push(esi);  // Callee's context.
+      push(edi);  // Callee's JS function.
+    }
+  }
+}
+
+
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   push(ebp);
   mov(ebp, esp);
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index a6d782e..30f8a8d 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -225,6 +225,9 @@
   void DebugBreak();
 #endif
 
+  // Generates function and stub prologue code.
+  void Prologue(PrologueFrameMode frame_mode);
+
   // Enter specific kind of exit frame. Expects the number of
   // arguments in register eax and sets up the number of arguments in
   // register edi and the pointer to the first argument in register
diff --git a/src/ic.cc b/src/ic.cc
index a6ffb13..4bff543 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -117,7 +117,9 @@
 #define TRACE_IC(type, name)             \
   ASSERT((TraceIC(type, name), true))
 
-IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
+IC::IC(FrameDepth depth, Isolate* isolate)
+    : isolate_(isolate),
+      target_set_(false) {
   // To improve the performance of the (much used) IC code, we unfold a few
   // levels of the stack frame iteration code. This yields a ~35% speedup when
   // running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
@@ -1363,40 +1365,46 @@
     return Runtime::GetObjectPropertyOrFail(isolate(), object, key);
   }
 
+  MaybeObject* maybe_object = NULL;
+  Handle<Code> stub = generic_stub();
+
   // Check for values that can be converted into an internalized string directly
   // or is representable as a smi.
   key = TryConvertKey(key, isolate());
 
   if (key->IsInternalizedString()) {
-    return LoadIC::Load(object, Handle<String>::cast(key));
-  }
-
-  if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
+    maybe_object = LoadIC::Load(object, Handle<String>::cast(key));
+    if (maybe_object->IsFailure()) return maybe_object;
+  } else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
     ASSERT(!object->IsJSGlobalProxy());
-    Handle<Code> stub = generic_stub();
-    if (miss_mode == MISS_FORCE_GENERIC) {
-      TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "force generic");
-    } else if (object->IsString() && key->IsNumber()) {
-      if (state() == UNINITIALIZED) stub = string_stub();
-    } else if (object->IsJSObject()) {
-      Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-      if (receiver->elements()->map() ==
-          isolate()->heap()->non_strict_arguments_elements_map()) {
-        stub = non_strict_arguments_stub();
-      } else if (receiver->HasIndexedInterceptor()) {
-        stub = indexed_interceptor_stub();
-      } else if (!key->ToSmi()->IsFailure() &&
-                 (!target().is_identical_to(non_strict_arguments_stub()))) {
-        stub = LoadElementStub(receiver);
+    if (miss_mode != MISS_FORCE_GENERIC) {
+      if (object->IsString() && key->IsNumber()) {
+        if (state() == UNINITIALIZED) stub = string_stub();
+      } else if (object->IsJSObject()) {
+        Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+        if (receiver->elements()->map() ==
+            isolate()->heap()->non_strict_arguments_elements_map()) {
+          stub = non_strict_arguments_stub();
+        } else if (receiver->HasIndexedInterceptor()) {
+          stub = indexed_interceptor_stub();
+        } else if (!key->ToSmi()->IsFailure() &&
+                   (!target().is_identical_to(non_strict_arguments_stub()))) {
+          stub = LoadElementStub(receiver);
+        }
       }
     }
+  }
 
+  if (!is_target_set()) {
+    if (*stub == *generic_stub()) {
+      TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
+    }
     ASSERT(!stub.is_null());
     set_target(*stub);
     TRACE_IC("LoadIC", key);
   }
 
-
+  if (maybe_object != NULL) return maybe_object;
   return Runtime::GetObjectPropertyOrFail(isolate(), object, key);
 }
 
@@ -1936,53 +1944,58 @@
   // or is representable as a smi.
   key = TryConvertKey(key, isolate());
 
+  MaybeObject* maybe_object = NULL;
+  Handle<Code> stub = generic_stub();
+
   if (key->IsInternalizedString()) {
-    return StoreIC::Store(object,
-                          Handle<String>::cast(key),
-                          value,
-                          JSReceiver::MAY_BE_STORE_FROM_KEYED);
-  }
+    maybe_object = StoreIC::Store(object,
+                                  Handle<String>::cast(key),
+                                  value,
+                                  JSReceiver::MAY_BE_STORE_FROM_KEYED);
+    if (maybe_object->IsFailure()) return maybe_object;
+  } else {
+    bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded() &&
+        !(FLAG_harmony_observation && object->IsJSObject() &&
+          JSObject::cast(*object)->map()->is_observed());
+    if (use_ic && !object->IsSmi()) {
+      // Don't use ICs for maps of the objects in Array's prototype chain. We
+      // expect to be able to trap element sets to objects with those maps in
+      // the runtime to enable optimization of element hole access.
+      Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
+      if (heap_object->map()->IsMapInArrayPrototypeChain()) use_ic = false;
+    }
 
-  bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded() &&
-      !(FLAG_harmony_observation && object->IsJSObject() &&
-        JSObject::cast(*object)->map()->is_observed());
-  if (use_ic && !object->IsSmi()) {
-    // Don't use ICs for maps of the objects in Array's prototype chain. We
-    // expect to be able to trap element sets to objects with those maps in the
-    // runtime to enable optimization of element hole access.
-    Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
-    if (heap_object->map()->IsMapInArrayPrototypeChain()) use_ic = false;
-  }
+    if (use_ic) {
+      ASSERT(!object->IsJSGlobalProxy());
 
-  if (use_ic) {
-    ASSERT(!object->IsJSGlobalProxy());
-
-    Handle<Code> stub = generic_stub();
-    if (miss_mode != MISS_FORCE_GENERIC) {
-      if (object->IsJSObject()) {
-        Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-        bool key_is_smi_like = key->IsSmi() || !key->ToSmi()->IsFailure();
-        if (receiver->elements()->map() ==
-            isolate()->heap()->non_strict_arguments_elements_map()) {
-          stub = non_strict_arguments_stub();
-        } else if (key_is_smi_like &&
-                   (!target().is_identical_to(non_strict_arguments_stub()))) {
-          KeyedAccessStoreMode store_mode = GetStoreMode(receiver, key, value);
-          stub = StoreElementStub(receiver, store_mode);
-        } else {
-          TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "key not a number");
+      if (miss_mode != MISS_FORCE_GENERIC) {
+        if (object->IsJSObject()) {
+          Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+          bool key_is_smi_like = key->IsSmi() || !key->ToSmi()->IsFailure();
+          if (receiver->elements()->map() ==
+              isolate()->heap()->non_strict_arguments_elements_map()) {
+            stub = non_strict_arguments_stub();
+          } else if (key_is_smi_like &&
+                     (!target().is_identical_to(non_strict_arguments_stub()))) {
+            KeyedAccessStoreMode store_mode =
+                GetStoreMode(receiver, key, value);
+            stub = StoreElementStub(receiver, store_mode);
+          }
         }
-      } else {
-        TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "not an object");
       }
-    } else {
-      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "force generic");
+    }
+  }
+
+  if (!is_target_set()) {
+    if (*stub == *generic_stub()) {
+      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
     }
     ASSERT(!stub.is_null());
     set_target(*stub);
     TRACE_IC("StoreIC", key);
   }
 
+  if (maybe_object) return maybe_object;
   return Runtime::SetObjectPropertyOrFail(
       isolate(), object , key, value, NONE, strict_mode());
 }
diff --git a/src/ic.h b/src/ic.h
index 3f0c5c6..fde4bc7 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -155,7 +155,12 @@
 #endif
 
   // Set the call-site target.
-  void set_target(Code* code) { SetTargetAtAddress(address(), code); }
+  void set_target(Code* code) {
+    SetTargetAtAddress(address(), code);
+    target_set_ = true;
+  }
+
+  bool is_target_set() { return target_set_; }
 
 #ifdef DEBUG
   char TransitionMarkFromState(IC::State state);
@@ -235,6 +240,7 @@
   // The original code target that missed.
   Handle<Code> target_;
   State state_;
+  bool target_set_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
 };
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index 2fa6804..de91051 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -261,6 +261,13 @@
 
 static const int kNoCodeAgeSequenceLength = 7;
 
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+  UNREACHABLE();  // This should never be reached on Arm.
+  return Handle<Object>();
+}
+
+
 Code* RelocInfo::code_age_stub() {
   ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
   return Code::GetCodeFromTargetAddress(
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index ed7764b..0b49583 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -857,6 +857,49 @@
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
 
 
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+  // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+  // that make_code_young doesn't do any garbage collection which allows us to
+  // save/restore the registers without worrying about which of them contain
+  // pointers.
+
+  __ mov(a0, ra);
+  // Adjust a0 to point to the head of the PlatformCodeAge sequence
+  __ Subu(a0, a0,
+      Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
+  // Restore the original return address of the function
+  __ mov(ra, at);
+
+  // The following registers must be saved and restored when calling through to
+  // the runtime:
+  //   a0 - contains return address (beginning of patch sequence)
+  //   a1 - isolate
+  RegList saved_regs =
+      (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ MultiPush(saved_regs);
+  __ PrepareCallCFunction(1, 0, a2);
+  __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ CallCFunction(
+      ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+      2);
+  __ MultiPop(saved_regs);
+
+  // Perform prologue operations usually performed by the young code stub.
+  __ Push(ra, fp, cp, a1);
+  __ Addu(fp, sp, Operand(2 * kPointerSize));
+
+  // Jump to point after the code-age stub.
+  __ Addu(a0, a0, Operand((kNoCodeAgeSequenceLength) * Assembler::kInstrSize));
+  __ Jump(a0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+  GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
 void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index b9e282f..cbd0788 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -171,12 +171,7 @@
   FrameScope frame_scope(masm_, StackFrame::MANUAL);
 
   info->set_prologue_offset(masm_->pc_offset());
-  // The following three instructions must remain together and unmodified for
-  // code aging to work properly.
-  __ Push(ra, fp, cp, a1);
-  __ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
-  // Adjust fp to point to caller's fp.
-  __ Addu(fp, sp, Operand(2 * kPointerSize));
+  __ Prologue(BUILD_FUNCTION_FRAME);
   info->AddNoFrameRange(0, masm_->pc_offset());
 
   { Comment cmnt(masm_, "[ Allocate locals");
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 2c6e5a5..f54d4a5 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -133,21 +133,7 @@
 
   info()->set_prologue_offset(masm_->pc_offset());
   if (NeedsEagerFrame()) {
-    if (info()->IsStub()) {
-      __ Push(ra, fp, cp);
-      __ Push(Smi::FromInt(StackFrame::STUB));
-      // Adjust FP to point to saved FP.
-      __ Addu(fp, sp, Operand(2 * kPointerSize));
-    } else {
-      // The following three instructions must remain together and unmodified
-      // for code aging to work properly.
-      __ Push(ra, fp, cp, a1);
-      // Add unused nop to ensure prologue sequence is identical for
-      // full-codegen and lithium-codegen.
-      __ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
-      // Adj. FP to point to saved FP.
-      __ Addu(fp, sp, Operand(2 * kPointerSize));
-    }
+    __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
     frame_is_built_ = true;
     info_->AddNoFrameRange(0, masm_->pc_offset());
   }
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index e42ba6c..52d8a4c 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -4585,6 +4585,40 @@
 }
 
 
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+  if (frame_mode == BUILD_STUB_FRAME) {
+    Push(ra, fp, cp);
+    Push(Smi::FromInt(StackFrame::STUB));
+    // Adjust FP to point to saved FP.
+    Addu(fp, sp, Operand(2 * kPointerSize));
+  } else {
+    PredictableCodeSizeScope predictible_code_size_scope(
+      this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
+    // The following three instructions must remain together and unmodified
+    // for code aging to work properly.
+    if (FLAG_optimize_for_size && FLAG_age_code) {
+      // Pre-age the code.
+      Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+      nop(Assembler::CODE_AGE_MARKER_NOP);
+      // Save the function's original return address
+      // (it will be clobbered by Call(t9))
+      mov(at, ra);
+      // Load the stub address to t9 and call it
+      li(t9,
+         Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
+      Call(t9);
+      // Record the stub address in the empty space for GetCodeAgeAndParity()
+      dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
+    } else {
+      Push(ra, fp, cp, a1);
+      nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+      // Adjust fp to point to caller's fp.
+      Addu(fp, sp, Operand(2 * kPointerSize));
+    }
+  }
+}
+
+
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   addiu(sp, sp, -5 * kPointerSize);
   li(t8, Operand(Smi::FromInt(type)));
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 84e60ba..0805bb9 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -1498,6 +1498,9 @@
     And(reg, reg, Operand(mask));
   }
 
+  // Generates function and stub prologue code.
+  void Prologue(PrologueFrameMode frame_mode);
+
   // Activation support.
   void EnterFrame(StackFrame::Type type);
   void LeaveFrame(StackFrame::Type type);
diff --git a/src/msan.h b/src/msan.h
new file mode 100644
index 0000000..484c9fa
--- /dev/null
+++ b/src/msan.h
@@ -0,0 +1,49 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// MemorySanitizer support.
+
+#ifndef V8_MSAN_H_
+#define V8_MSAN_H_
+
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+#if __has_feature(memory_sanitizer) && !defined(MEMORY_SANITIZER)
+# define MEMORY_SANITIZER
+#endif
+
+#ifdef MEMORY_SANITIZER
+# include <sanitizer/msan_interface.h>
+// Marks a memory range as fully initialized.
+# define MSAN_MEMORY_IS_INITIALIZED(p, s) __msan_unpoison((p), (s))
+#else
+# define MSAN_MEMORY_IS_INITIALIZED(p, s)
+#endif
+
+#endif  // V8_MSAN_H_
diff --git a/src/objects.cc b/src/objects.cc
index 4412e76..feca669 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -4473,14 +4473,14 @@
 Handle<Map> NormalizedMapCache::Get(Handle<NormalizedMapCache> cache,
                                     Handle<JSObject> obj,
                                     PropertyNormalizationMode mode) {
-  Map* fast = obj->map();
-  int index = fast->Hash() % kEntries;
-  Object* result = cache->get(index);
+  int index = obj->map()->Hash() % kEntries;
+  Handle<Object> result = handle(cache->get(index), cache->GetIsolate());
   if (result->IsMap() &&
-      Map::cast(result)->EquivalentToForNormalization(fast, mode)) {
+      Handle<Map>::cast(result)->EquivalentToForNormalization(obj->map(),
+                                                              mode)) {
 #ifdef VERIFY_HEAP
     if (FLAG_verify_heap) {
-      Map::cast(result)->SharedMapVerify();
+      Handle<Map>::cast(result)->SharedMapVerify();
     }
 #endif
 #ifdef DEBUG
@@ -4488,27 +4488,25 @@
       // The cached map should match newly created normalized map bit-by-bit,
       // except for the code cache, which can contain some ics which can be
       // applied to the shared map.
-      Object* fresh;
-      MaybeObject* maybe_fresh =
-          fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
-      if (maybe_fresh->ToObject(&fresh)) {
-        ASSERT(memcmp(Map::cast(fresh)->address(),
-                      Map::cast(result)->address(),
-                      Map::kCodeCacheOffset) == 0);
-        STATIC_ASSERT(Map::kDependentCodeOffset ==
-                      Map::kCodeCacheOffset + kPointerSize);
-        int offset = Map::kDependentCodeOffset + kPointerSize;
-        ASSERT(memcmp(Map::cast(fresh)->address() + offset,
-                      Map::cast(result)->address() + offset,
-                      Map::kSize - offset) == 0);
-      }
+      Handle<Map> fresh = Map::CopyNormalized(handle(obj->map()), mode,
+                                              SHARED_NORMALIZED_MAP);
+
+      ASSERT(memcmp(fresh->address(),
+                    Handle<Map>::cast(result)->address(),
+                    Map::kCodeCacheOffset) == 0);
+      STATIC_ASSERT(Map::kDependentCodeOffset ==
+                    Map::kCodeCacheOffset + kPointerSize);
+      int offset = Map::kDependentCodeOffset + kPointerSize;
+      ASSERT(memcmp(fresh->address() + offset,
+                    Handle<Map>::cast(result)->address() + offset,
+                    Map::kSize - offset) == 0);
     }
 #endif
-    return handle(Map::cast(result));
+    return Handle<Map>::cast(result);
   }
 
   Isolate* isolate = cache->GetIsolate();
-  Handle<Map> map = Map::CopyNormalized(handle(fast), mode,
+  Handle<Map> map = Map::CopyNormalized(handle(obj->map()), mode,
                                         SHARED_NORMALIZED_MAP);
   ASSERT(map->is_dictionary_map());
   cache->set(index, *map);
@@ -6649,6 +6647,14 @@
 }
 
 
+Handle<Map> Map::RawCopy(Handle<Map> map,
+                         int instance_size) {
+  CALL_HEAP_FUNCTION(map->GetIsolate(),
+                     map->RawCopy(instance_size),
+                     Map);
+}
+
+
 MaybeObject* Map::RawCopy(int instance_size) {
   Map* result;
   MaybeObject* maybe_result =
@@ -6673,25 +6679,15 @@
 Handle<Map> Map::CopyNormalized(Handle<Map> map,
                                 PropertyNormalizationMode mode,
                                 NormalizedMapSharingMode sharing) {
-  CALL_HEAP_FUNCTION(map->GetIsolate(),
-                     map->CopyNormalized(mode, sharing),
-                     Map);
-}
-
-
-MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode,
-                                 NormalizedMapSharingMode sharing) {
-  int new_instance_size = instance_size();
+  int new_instance_size = map->instance_size();
   if (mode == CLEAR_INOBJECT_PROPERTIES) {
-    new_instance_size -= inobject_properties() * kPointerSize;
+    new_instance_size -= map->inobject_properties() * kPointerSize;
   }
 
-  Map* result;
-  MaybeObject* maybe_result = RawCopy(new_instance_size);
-  if (!maybe_result->To(&result)) return maybe_result;
+  Handle<Map> result = Map::RawCopy(map, new_instance_size);
 
   if (mode != CLEAR_INOBJECT_PROPERTIES) {
-    result->set_inobject_properties(inobject_properties());
+    result->set_inobject_properties(map->inobject_properties());
   }
 
   result->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
@@ -6841,31 +6837,21 @@
 }
 
 
+// Since this method is used to rewrite an existing transition tree, it can
+// always insert transitions without checking.
 Handle<Map> Map::CopyInstallDescriptors(Handle<Map> map,
                                         int new_descriptor,
                                         Handle<DescriptorArray> descriptors) {
-  CALL_HEAP_FUNCTION(map->GetIsolate(),
-                     map->CopyInstallDescriptors(new_descriptor, *descriptors),
-                     Map);
-}
-
-
-// Since this method is used to rewrite an existing transition tree, it can
-// always insert transitions without checking.
-MaybeObject* Map::CopyInstallDescriptors(int new_descriptor,
-                                         DescriptorArray* descriptors) {
   ASSERT(descriptors->IsSortedNoDuplicates());
 
-  Map* result;
-  MaybeObject* maybe_result = CopyDropDescriptors();
-  if (!maybe_result->To(&result)) return maybe_result;
+  Handle<Map> result = Map::CopyDropDescriptors(map);
 
-  result->InitializeDescriptors(descriptors);
+  result->InitializeDescriptors(*descriptors);
   result->SetNumberOfOwnDescriptors(new_descriptor + 1);
 
-  int unused_property_fields = this->unused_property_fields();
+  int unused_property_fields = map->unused_property_fields();
   if (descriptors->GetDetails(new_descriptor).type() == FIELD) {
-    unused_property_fields = this->unused_property_fields() - 1;
+    unused_property_fields = map->unused_property_fields() - 1;
     if (unused_property_fields < 0) {
       unused_property_fields += JSObject::kFieldsAdded;
     }
@@ -6874,14 +6860,12 @@
   result->set_unused_property_fields(unused_property_fields);
   result->set_owns_descriptors(false);
 
-  Name* name = descriptors->GetKey(new_descriptor);
-  TransitionArray* transitions;
-  MaybeObject* maybe_transitions =
-      AddTransition(name, result, SIMPLE_TRANSITION);
-  if (!maybe_transitions->To(&transitions)) return maybe_transitions;
+  Handle<Name> name = handle(descriptors->GetKey(new_descriptor));
+  Handle<TransitionArray> transitions = Map::AddTransition(map, name, result,
+                                                           SIMPLE_TRANSITION);
 
-  set_transitions(transitions);
-  result->SetBackPointer(this);
+  map->set_transitions(*transitions);
+  result->SetBackPointer(*map);
 
   return result;
 }
@@ -10327,7 +10311,7 @@
 
 void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) {
   Address* p = rinfo->target_reference_address();
-  VisitExternalReferences(p, p + 1);
+  VisitExternalReference(p);
 }
 
 
@@ -10384,6 +10368,10 @@
     } else if (RelocInfo::IsRuntimeEntry(mode)) {
       Address p = it.rinfo()->target_runtime_entry(origin);
       it.rinfo()->set_target_runtime_entry(p, SKIP_WRITE_BARRIER);
+    } else if (mode == RelocInfo::CODE_AGE_SEQUENCE) {
+      Handle<Object> p = it.rinfo()->code_age_stub_handle(origin);
+      Code* code = Code::cast(*p);
+      it.rinfo()->set_code_age_stub(code);
     } else {
       it.rinfo()->apply(delta);
     }
@@ -10625,6 +10613,12 @@
 }
 
 
+void Code::MarkCodeAsExecuted(byte* sequence, Isolate* isolate) {
+  PatchPlatformCodeAge(isolate, sequence, kExecutedOnceCodeAge,
+      NO_MARKING_PARITY);
+}
+
+
 void Code::MakeOlder(MarkingParity current_parity) {
   byte* sequence = FindCodeAgeSequence();
   if (sequence != NULL) {
@@ -10642,18 +10636,14 @@
 
 
 bool Code::IsOld() {
-  byte* sequence = FindCodeAgeSequence();
-  if (sequence == NULL) return false;
-  Age age;
-  MarkingParity parity;
-  GetCodeAgeAndParity(sequence, &age, &parity);
-  return age >= kSexagenarianCodeAge;
+  Age age = GetAge();
+  return age >= kIsOldCodeAge;
 }
 
 
 byte* Code::FindCodeAgeSequence() {
   return FLAG_age_code &&
-      prologue_offset() != kPrologueOffsetNotSet &&
+      prologue_offset() != Code::kPrologueOffsetNotSet &&
       (kind() == OPTIMIZED_FUNCTION ||
        (kind() == FUNCTION && !has_debug_break_slots()))
       ? instruction_start() + prologue_offset()
@@ -10661,7 +10651,7 @@
 }
 
 
-int Code::GetAge() {
+Code::Age Code::GetAge() {
   byte* sequence = FindCodeAgeSequence();
   if (sequence == NULL) {
     return Code::kNoAge;
@@ -10693,6 +10683,20 @@
   }
   CODE_AGE_LIST(HANDLE_CODE_AGE)
 #undef HANDLE_CODE_AGE
+  stub = *builtins->MarkCodeAsExecutedOnce();
+  if (code == stub) {
+    // Treat that's never been executed as old immediatly.
+    *age = kIsOldCodeAge;
+    *parity = NO_MARKING_PARITY;
+    return;
+  }
+  stub = *builtins->MarkCodeAsExecutedTwice();
+  if (code == stub) {
+    // Pre-age code that has only been executed once.
+    *age = kPreAgedCodeAge;
+    *parity = NO_MARKING_PARITY;
+    return;
+  }
   UNREACHABLE();
 }
 
@@ -10709,6 +10713,14 @@
     }
     CODE_AGE_LIST(HANDLE_CODE_AGE)
 #undef HANDLE_CODE_AGE
+    case kNotExecutedCodeAge: {
+      ASSERT(parity == NO_MARKING_PARITY);
+      return *builtins->MarkCodeAsExecutedOnce();
+    }
+    case kExecutedOnceCodeAge: {
+      ASSERT(parity == NO_MARKING_PARITY);
+      return *builtins->MarkCodeAsExecutedTwice();
+    }
     default:
       UNREACHABLE();
       break;
diff --git a/src/objects.h b/src/objects.h
index 299ca2d..431f8b8 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -4978,6 +4978,8 @@
 
   static const ExtraICState kNoExtraICState = 0;
 
+  static const int kPrologueOffsetNotSet = -1;
+
 #ifdef ENABLE_DISASSEMBLER
   // Printing
   static const char* ICState2String(InlineCacheState state);
@@ -5297,11 +5299,15 @@
 
 #define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge,
   enum Age {
+    kNotExecutedCodeAge = -2,
+    kExecutedOnceCodeAge = -1,
     kNoAge = 0,
     CODE_AGE_LIST(DECLARE_CODE_AGE_ENUM)
     kAfterLastCodeAge,
     kLastCodeAge = kAfterLastCodeAge - 1,
-    kCodeAgeCount = kAfterLastCodeAge - 1
+    kCodeAgeCount = kAfterLastCodeAge - 1,
+    kIsOldCodeAge = kSexagenarianCodeAge,
+    kPreAgedCodeAge = kIsOldCodeAge - 1
   };
 #undef DECLARE_CODE_AGE_ENUM
 
@@ -5310,10 +5316,14 @@
   // relatively safe to flush this code object and replace it with the lazy
   // compilation stub.
   static void MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate);
+  static void MarkCodeAsExecuted(byte* sequence, Isolate* isolate);
   void MakeOlder(MarkingParity);
   static bool IsYoungSequence(byte* sequence);
   bool IsOld();
-  int GetAge();
+  Age GetAge();
+  static inline Code* GetPreAgedCodeAgeStub(Isolate* isolate) {
+    return GetCodeAgeStub(isolate, kNotExecutedCodeAge, NO_MARKING_PARITY);
+  }
 
   void PrintDeoptLocation(int bailout_id);
   bool CanDeoptAt(Address pc);
@@ -5955,6 +5965,7 @@
   // descriptor array of the map. Returns NULL if no updated map is found.
   Map* CurrentMapForDeprecated();
 
+  static Handle<Map> RawCopy(Handle<Map> map, int instance_size);
   MUST_USE_RESULT MaybeObject* RawCopy(int instance_size);
   MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors();
   static Handle<Map> CopyDropDescriptors(Handle<Map> map);
@@ -5972,9 +5983,6 @@
       Handle<Map> map,
       int new_descriptor,
       Handle<DescriptorArray> descriptors);
-  MUST_USE_RESULT MaybeObject* CopyInstallDescriptors(
-      int new_descriptor,
-      DescriptorArray* descriptors);
   MUST_USE_RESULT MaybeObject* ShareDescriptor(DescriptorArray* descriptors,
                                                Descriptor* descriptor);
   MUST_USE_RESULT MaybeObject* CopyAddDescriptor(Descriptor* descriptor,
@@ -5996,8 +6004,6 @@
   static Handle<Map> CopyNormalized(Handle<Map> map,
                                     PropertyNormalizationMode mode,
                                     NormalizedMapSharingMode sharing);
-  MUST_USE_RESULT MaybeObject* CopyNormalized(PropertyNormalizationMode mode,
-                                              NormalizedMapSharingMode sharing);
 
   inline void AppendDescriptor(Descriptor* desc,
                                const DescriptorArray::WhitenessWitness&);
@@ -10340,6 +10346,9 @@
   // [start, end). Any or all of the values may be modified on return.
   virtual void VisitPointers(Object** start, Object** end) = 0;
 
+  // Handy shorthand for visiting a single pointer.
+  virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
+
   // To allow lazy clearing of inline caches the visitor has
   // a rich interface for iterating over Code objects..
 
@@ -10368,22 +10377,14 @@
   // about the code's age.
   virtual void VisitCodeAgeSequence(RelocInfo* rinfo);
 
-  // Handy shorthand for visiting a single pointer.
-  virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
-
   // Visit pointer embedded into a code object.
   virtual void VisitEmbeddedPointer(RelocInfo* rinfo);
 
-  // Visits a contiguous arrays of external references (references to the C++
-  // heap) in the half-open range [start, end). Any or all of the values
-  // may be modified on return.
-  virtual void VisitExternalReferences(Address* start, Address* end) {}
-
+  // Visits an external reference embedded into a code object.
   virtual void VisitExternalReference(RelocInfo* rinfo);
 
-  inline void VisitExternalReference(Address* p) {
-    VisitExternalReferences(p, p + 1);
-  }
+  // Visits an external reference. The value may be modified on return.
+  virtual void VisitExternalReference(Address* p) {}
 
   // Visits a handle that has an embedder-assigned class ID.
   virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {}
diff --git a/src/runtime.cc b/src/runtime.cc
index b25547b..feb78c0 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -2933,19 +2933,19 @@
   source_shared->set_dont_flush(true);
 
   // Set the code, scope info, formal parameter count, and the length
-  // of the target shared function info.  Set the source code of the
-  // target function to undefined.  SetCode is only used for built-in
-  // constructors like String, Array, and Object, and some web code
-  // doesn't like seeing source code for constructors.
+  // of the target shared function info.
   target_shared->ReplaceCode(source_shared->code());
   target_shared->set_scope_info(source_shared->scope_info());
   target_shared->set_length(source_shared->length());
   target_shared->set_formal_parameter_count(
       source_shared->formal_parameter_count());
-  target_shared->set_script(isolate->heap()->undefined_value());
-
-  // Since we don't store the source we should never optimize this.
-  target_shared->code()->set_optimizable(false);
+  target_shared->set_script(source_shared->script());
+  target_shared->set_start_position_and_type(
+      source_shared->start_position_and_type());
+  target_shared->set_end_position(source_shared->end_position());
+  bool was_native = target_shared->native();
+  target_shared->set_compiler_hints(source_shared->compiler_hints());
+  target_shared->set_native(was_native);
 
   // Set the code of the target function.
   target->ReplaceCode(source_shared->code());
@@ -8346,7 +8346,7 @@
 
   // If the function is not optimizable or debugger is active continue using the
   // code from the full compiler.
-  if (!FLAG_crankshaft ||
+  if (!isolate->use_crankshaft() ||
       function->shared()->optimization_disabled() ||
       isolate->DebuggerHasBreakPoints()) {
     if (FLAG_trace_opt) {
@@ -8628,7 +8628,7 @@
                                             Handle<JSFunction> function,
                                             Handle<Code> unoptimized) {
   // Keep track of whether we've succeeded in optimizing.
-  if (!unoptimized->optimizable()) return false;
+  if (!isolate->use_crankshaft() || !unoptimized->optimizable()) return false;
   // If we are trying to do OSR when there are already optimized
   // activations of the function, it means (a) the function is directly or
   // indirectly recursive and (b) an optimized invocation has been
diff --git a/src/serialize.cc b/src/serialize.cc
index a232d8c..7ed3666 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -532,63 +532,59 @@
       UNCLASSIFIED,
       52,
       "cpu_features");
-  Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
-      UNCLASSIFIED,
-      53,
-      "Heap::NewSpaceAllocationTopAddress");
-  Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
-      UNCLASSIFIED,
-      54,
-      "Heap::NewSpaceAllocationLimitAddress");
   Add(ExternalReference(Runtime::kAllocateInNewSpace, isolate).address(),
       UNCLASSIFIED,
-      55,
+      53,
       "Runtime::AllocateInNewSpace");
   Add(ExternalReference::old_pointer_space_allocation_top_address(
       isolate).address(),
       UNCLASSIFIED,
-      56,
+      54,
       "Heap::OldPointerSpaceAllocationTopAddress");
   Add(ExternalReference::old_pointer_space_allocation_limit_address(
       isolate).address(),
       UNCLASSIFIED,
-      57,
+      55,
       "Heap::OldPointerSpaceAllocationLimitAddress");
   Add(ExternalReference(Runtime::kAllocateInOldPointerSpace, isolate).address(),
       UNCLASSIFIED,
-      58,
+      56,
       "Runtime::AllocateInOldPointerSpace");
   Add(ExternalReference::old_data_space_allocation_top_address(
       isolate).address(),
       UNCLASSIFIED,
-      59,
+      57,
       "Heap::OldDataSpaceAllocationTopAddress");
   Add(ExternalReference::old_data_space_allocation_limit_address(
       isolate).address(),
       UNCLASSIFIED,
-      60,
+      58,
       "Heap::OldDataSpaceAllocationLimitAddress");
   Add(ExternalReference(Runtime::kAllocateInOldDataSpace, isolate).address(),
       UNCLASSIFIED,
-      61,
+      59,
       "Runtime::AllocateInOldDataSpace");
   Add(ExternalReference::new_space_high_promotion_mode_active_address(isolate).
       address(),
       UNCLASSIFIED,
-      62,
+      60,
       "Heap::NewSpaceAllocationLimitAddress");
   Add(ExternalReference::allocation_sites_list_address(isolate).address(),
       UNCLASSIFIED,
-      63,
+      61,
       "Heap::allocation_sites_list_address()");
   Add(ExternalReference::record_object_allocation_function(isolate).address(),
       UNCLASSIFIED,
-      64,
+      62,
       "HeapProfiler::RecordObjectAllocationFromMasm");
   Add(ExternalReference::address_of_uint32_bias().address(),
       UNCLASSIFIED,
-      65,
+      63,
       "uint32_bias");
+  Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
+      UNCLASSIFIED,
+      64,
+      "Code::MarkCodeAsExecuted");
 
   // Add a small set of deopt entry addresses to encoder without generating the
   // deopt table code, which isn't possible at deserialization time.
@@ -599,7 +595,7 @@
         entry,
         Deoptimizer::LAZY,
         Deoptimizer::CALCULATE_ENTRY_ADDRESS);
-    Add(address, LAZY_DEOPTIMIZATION, 65 + entry, "lazy_deopt");
+    Add(address, LAZY_DEOPTIMIZATION, 64 + entry, "lazy_deopt");
   }
 }
 
@@ -1682,19 +1678,15 @@
 }
 
 
-void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
-                                                           Address* end) {
-  Address references_start = reinterpret_cast<Address>(start);
+void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
+  Address references_start = reinterpret_cast<Address>(p);
   int skip = OutputRawData(references_start, kCanReturnSkipInsteadOfSkipping);
 
-  for (Address* current = start; current < end; current++) {
-    sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
-    sink_->PutInt(skip, "SkipB4ExternalRef");
-    skip = 0;
-    int reference_id = serializer_->EncodeExternalReference(*current);
-    sink_->PutInt(reference_id, "reference id");
-  }
-  bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize);
+  sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
+  sink_->PutInt(skip, "SkipB4ExternalRef");
+  int reference_id = serializer_->EncodeExternalReference(*p);
+  sink_->PutInt(reference_id, "reference id");
+  bytes_processed_so_far_ += kPointerSize;
 }
 
 
diff --git a/src/serialize.h b/src/serialize.h
index 15d9369..47627ac 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -339,10 +339,6 @@
  private:
   virtual void VisitPointers(Object** start, Object** end);
 
-  virtual void VisitExternalReferences(Address* start, Address* end) {
-    UNREACHABLE();
-  }
-
   virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
     UNREACHABLE();
   }
@@ -521,7 +517,7 @@
     void Serialize();
     void VisitPointers(Object** start, Object** end);
     void VisitEmbeddedPointer(RelocInfo* target);
-    void VisitExternalReferences(Address* start, Address* end);
+    void VisitExternalReference(Address* p);
     void VisitExternalReference(RelocInfo* rinfo);
     void VisitCodeTarget(RelocInfo* target);
     void VisitCodeEntry(Address entry_address);
diff --git a/src/spaces.cc b/src/spaces.cc
index d8aecf3..9ed8d9e 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -29,6 +29,7 @@
 
 #include "macro-assembler.h"
 #include "mark-compact.h"
+#include "msan.h"
 #include "platform.h"
 
 namespace v8 {
@@ -717,6 +718,7 @@
                                                 executable,
                                                 owner);
   result->set_reserved_memory(&reservation);
+  MSAN_MEMORY_IS_INITIALIZED(base, chunk_size);
   return result;
 }
 
diff --git a/src/type-info.cc b/src/type-info.cc
index da4d183..65d1364 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -128,6 +128,16 @@
 }
 
 
+bool TypeFeedbackOracle::LoadIsPreMonomorphic(Property* expr) {
+  Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
+  if (map_or_code->IsCode()) {
+    Handle<Code> code = Handle<Code>::cast(map_or_code);
+    return code->is_inline_cache_stub() && code->ic_state() == PREMONOMORPHIC;
+  }
+  return false;
+}
+
+
 bool TypeFeedbackOracle::LoadIsPolymorphic(Property* expr) {
   Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
   if (map_or_code->IsCode()) {
@@ -166,6 +176,16 @@
 }
 
 
+bool TypeFeedbackOracle::StoreIsPreMonomorphic(TypeFeedbackId ast_id) {
+  Handle<Object> map_or_code = GetInfo(ast_id);
+  if (map_or_code->IsCode()) {
+    Handle<Code> code = Handle<Code>::cast(map_or_code);
+    return code->ic_state() == PREMONOMORPHIC;
+  }
+  return false;
+}
+
+
 bool TypeFeedbackOracle::StoreIsKeyedPolymorphic(TypeFeedbackId ast_id) {
   Handle<Object> map_or_code = GetInfo(ast_id);
   if (map_or_code->IsCode()) {
@@ -622,12 +642,6 @@
 
       case Code::KEYED_LOAD_IC:
       case Code::KEYED_STORE_IC:
-        if (target->ic_state() == MONOMORPHIC ||
-            target->ic_state() == POLYMORPHIC) {
-          SetInfo(ast_id, target);
-        }
-        break;
-
       case Code::BINARY_OP_IC:
       case Code::COMPARE_IC:
       case Code::TO_BOOLEAN_IC:
diff --git a/src/type-info.h b/src/type-info.h
index 7d7d7ea..f295c06 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -243,9 +243,11 @@
 
   bool LoadIsMonomorphicNormal(Property* expr);
   bool LoadIsUninitialized(Property* expr);
+  bool LoadIsPreMonomorphic(Property* expr);
   bool LoadIsPolymorphic(Property* expr);
   bool StoreIsUninitialized(TypeFeedbackId ast_id);
   bool StoreIsMonomorphicNormal(TypeFeedbackId ast_id);
+  bool StoreIsPreMonomorphic(TypeFeedbackId ast_id);
   bool StoreIsKeyedPolymorphic(TypeFeedbackId ast_id);
   bool CallIsMonomorphic(Call* expr);
   bool CallNewIsMonomorphic(CallNew* expr);
diff --git a/src/version.cc b/src/version.cc
index e563ffa..765e365 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     22
-#define BUILD_NUMBER      19
+#define BUILD_NUMBER      20
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 07d0703..afac886 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -43,6 +43,7 @@
 
 
 static const byte kCallOpcode = 0xE8;
+static const int kNoCodeAgeSequenceLength = 6;
 
 
 void Assembler::emitl(uint32_t x) {
@@ -61,11 +62,8 @@
 }
 
 
-void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) {
+void Assembler::emitq(uint64_t x) {
   Memory::uint64_at(pc_) = x;
-  if (!RelocInfo::IsNone(rmode)) {
-    RecordRelocInfo(rmode, x);
-  }
   pc_ += sizeof(uint64_t);
 }
 
@@ -79,7 +77,8 @@
 void Assembler::emit_code_target(Handle<Code> target,
                                  RelocInfo::Mode rmode,
                                  TypeFeedbackId ast_id) {
-  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  ASSERT(RelocInfo::IsCodeTarget(rmode) ||
+      rmode == RelocInfo::CODE_AGE_SEQUENCE);
   if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
     RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id.ToInt());
   } else {
@@ -392,6 +391,13 @@
 }
 
 
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  ASSERT(*pc_ == kCallOpcode);
+  return origin->code_target_object_handle_at(pc_ + 1);
+}
+
+
 Code* RelocInfo::code_age_stub() {
   ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
   ASSERT(*pc_ == kCallOpcode);
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index a92196d..9fe7b83 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -1465,26 +1465,24 @@
 
 void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
   // Non-relocatable values might not need a 64-bit representation.
-  if (RelocInfo::IsNone(rmode)) {
-    if (is_uint32(value)) {
-      movl(dst, Immediate(static_cast<int32_t>(value)));
-      return;
-    } else if (is_int32(value)) {
-      movq(dst, Immediate(static_cast<int32_t>(value)));
-      return;
-    }
+  ASSERT(RelocInfo::IsNone(rmode));
+  if (is_uint32(value)) {
+    movl(dst, Immediate(static_cast<int32_t>(value)));
+  } else if (is_int32(value)) {
+    movq(dst, Immediate(static_cast<int32_t>(value)));
+  } else {
     // Value cannot be represented by 32 bits, so do a full 64 bit immediate
     // value.
+    EnsureSpace ensure_space(this);
+    emit_rex_64(dst);
+    emit(0xB8 | dst.low_bits());
+    emitq(value);
   }
-  EnsureSpace ensure_space(this);
-  emit_rex_64(dst);
-  emit(0xB8 | dst.low_bits());
-  emitq(value, rmode);
 }
 
 
 void Assembler::movq(Register dst, ExternalReference ref) {
-  int64_t value = reinterpret_cast<int64_t>(ref.address());
+  Address value = reinterpret_cast<Address>(ref.address());
   movq(dst, value, RelocInfo::EXTERNAL_REFERENCE);
 }
 
@@ -1899,7 +1897,7 @@
 }
 
 
-void Assembler::xchg(Register dst, Register src) {
+void Assembler::xchgq(Register dst, Register src) {
   EnsureSpace ensure_space(this);
   if (src.is(rax) || dst.is(rax)) {  // Single-byte encoding
     Register other = src.is(rax) ? dst : src;
@@ -1917,6 +1915,24 @@
 }
 
 
+void Assembler::xchgl(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  if (src.is(rax) || dst.is(rax)) {  // Single-byte encoding
+    Register other = src.is(rax) ? dst : src;
+    emit_optional_rex_32(other);
+    emit(0x90 | other.low_bits());
+  } else if (dst.low_bits() == 4) {
+    emit_optional_rex_32(dst, src);
+    emit(0x87);
+    emit_modrm(dst, src);
+  } else {
+    emit_optional_rex_32(src, dst);
+    emit(0x87);
+    emit_modrm(src, dst);
+  }
+}
+
+
 void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
   EnsureSpace ensure_space(this);
   emit(0x48);  // REX.W
@@ -2035,6 +2051,14 @@
 }
 
 
+void Assembler::testl(const Operand& op, Register reg) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(reg, op);
+  emit(0x85);
+  emit_operand(reg, op);
+}
+
+
 void Assembler::testq(const Operand& op, Register reg) {
   EnsureSpace ensure_space(this);
   emit_rex_64(reg, op);
@@ -3004,8 +3028,8 @@
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   ASSERT(!RelocInfo::IsNone(rmode));
-  // Don't record external references unless the heap will be serialized.
   if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+    // Don't record external references unless the heap will be serialized.
 #ifdef DEBUG
     if (!Serializer::enabled()) {
       Serializer::TooLateToEnableNow();
@@ -3014,6 +3038,9 @@
     if (!Serializer::enabled() && !emit_debug_code()) {
       return;
     }
+  } else if (rmode == RelocInfo::CODE_AGE_SEQUENCE) {
+    // Don't record psuedo relocation info for code age sequence mode.
+    return;
   }
   RelocInfo rinfo(pc_, rmode, data, NULL);
   reloc_info_writer.Write(&rinfo);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index fd1e1e7..c715bce 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -723,7 +723,6 @@
   // All 64-bit immediates must have a relocation mode.
   void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
   void movq(Register dst, int64_t value, RelocInfo::Mode rmode);
-  void movq(Register dst, const char* s, RelocInfo::Mode rmode);
   // Moves the address of the external reference into the register.
   void movq(Register dst, ExternalReference ext);
   void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
@@ -756,7 +755,8 @@
   void cmovl(Condition cc, Register dst, const Operand& src);
 
   // Exchange two registers
-  void xchg(Register dst, Register src);
+  void xchgq(Register dst, Register src);
+  void xchgl(Register dst, Register src);
 
   // Arithmetics
   void addl(Register dst, Register src) {
@@ -991,6 +991,10 @@
     arithmetic_op(0x09, src, dst);
   }
 
+  void orl(const Operand& dst, Register src) {
+    arithmetic_op_32(0x09, src, dst);
+  }
+
   void or_(Register dst, Immediate src) {
     immediate_arithmetic_op(0x1, dst, src);
   }
@@ -1016,6 +1020,10 @@
     shift(dst, imm8, 0x0);
   }
 
+  void roll(Register dst, Immediate imm8) {
+    shift_32(dst, imm8, 0x0);
+  }
+
   void rcr(Register dst, Immediate imm8) {
     shift(dst, imm8, 0x3);
   }
@@ -1123,6 +1131,10 @@
     arithmetic_op_32(0x2B, dst, src);
   }
 
+  void subl(const Operand& dst, Register src) {
+    arithmetic_op_32(0x29, src, dst);
+  }
+
   void subl(const Operand& dst, Immediate src) {
     immediate_arithmetic_op_32(0x5, dst, src);
   }
@@ -1141,6 +1153,7 @@
   void testb(const Operand& op, Register reg);
   void testl(Register dst, Register src);
   void testl(Register reg, Immediate mask);
+  void testl(const Operand& op, Register reg);
   void testl(const Operand& op, Immediate mask);
   void testq(const Operand& op, Register reg);
   void testq(Register dst, Register src);
@@ -1166,6 +1179,10 @@
     immediate_arithmetic_op_32(0x6, dst, src);
   }
 
+  void xorl(const Operand& dst, Register src) {
+    arithmetic_op_32(0x31, src, dst);
+  }
+
   void xorl(const Operand& dst, Immediate src) {
     immediate_arithmetic_op_32(0x6, dst, src);
   }
@@ -1474,7 +1491,7 @@
   void emit(byte x) { *pc_++ = x; }
   inline void emitl(uint32_t x);
   inline void emitp(void* x, RelocInfo::Mode rmode);
-  inline void emitq(uint64_t x, RelocInfo::Mode rmode);
+  inline void emitq(uint64_t x);
   inline void emitw(uint16_t x);
   inline void emit_code_target(Handle<Code> target,
                                RelocInfo::Mode rmode,
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index df6acb6..2f5e0c1 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -627,6 +627,42 @@
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
 
 
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+  // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+  // that make_code_young doesn't do any garbage collection which allows us to
+  // save/restore the registers without worrying about which of them contain
+  // pointers.
+  __ Pushad();
+  __ movq(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
+  __ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+  __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+  {  // NOLINT
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PrepareCallCFunction(1);
+    __ CallCFunction(
+        ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+        1);
+  }
+  __ Popad();
+
+  // Perform prologue operations usually performed by the young code stub.
+  __ pop(r10);   // Pop return address into scratch register.
+  __ push(rbp);  // Caller's frame pointer.
+  __ movq(rbp, rsp);
+  __ push(rsi);  // Callee's context.
+  __ push(rdi);  // Callee's JS Function.
+  __ push(r10);  // Push return address after frame prologue.
+
+  // Jump to point after the code-age stub.
+  __ ret(0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+  GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
 void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
   // Enter an internal frame.
   {
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 5373827..2002608 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -675,8 +675,6 @@
 #undef __
 
 
-static const int kNoCodeAgeSequenceLength = 6;
-
 static byte* GetNoCodeAgeSequence(uint32_t* length) {
   static bool initialized = false;
   static byte sequence[kNoCodeAgeSequenceLength];
@@ -733,11 +731,8 @@
     Code* stub = GetCodeAgeStub(isolate, age, parity);
     CodePatcher patcher(sequence, young_length);
     patcher.masm()->call(stub->instruction_start());
-    for (int i = 0;
-         i < kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength;
-         i++) {
-      patcher.masm()->nop();
-    }
+    patcher.masm()->Nop(
+        kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
   }
 }
 
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 20c5622..02ba67b 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -152,10 +152,7 @@
   FrameScope frame_scope(masm_, StackFrame::MANUAL);
 
   info->set_prologue_offset(masm_->pc_offset());
-  __ push(rbp);  // Caller's frame pointer.
-  __ movq(rbp, rsp);
-  __ push(rsi);  // Callee's context.
-  __ push(rdi);  // Callee's JS Function.
+  __ Prologue(BUILD_FUNCTION_FRAME);
   info->AddNoFrameRange(0, masm_->pc_offset());
 
   { Comment cmnt(masm_, "[ Allocate locals");
@@ -1129,7 +1126,7 @@
       Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
                      isolate()));
   RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
-  __ LoadHeapObject(rbx, cell);
+  __ Move(rbx, cell);
   __ Move(FieldOperand(rbx, Cell::kValueOffset),
           Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
 
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index c82ef8d..6c8e377 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -143,14 +143,7 @@
   if (NeedsEagerFrame()) {
     ASSERT(!frame_is_built_);
     frame_is_built_ = true;
-    __ push(rbp);  // Caller's frame pointer.
-    __ movq(rbp, rsp);
-    __ push(rsi);  // Callee's context.
-    if (info()->IsStub()) {
-      __ Push(Smi::FromInt(StackFrame::STUB));
-    } else {
-      __ push(rdi);  // Callee's JS function.
-    }
+    __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
     info()->AddNoFrameRange(0, masm_->pc_offset());
   }
 
@@ -1556,8 +1549,7 @@
 
 void LCodeGen::DoConstantT(LConstantT* instr) {
   Handle<Object> value = instr->value(isolate());
-  AllowDeferredHandleDereference smi_check;
-  __ LoadObject(ToRegister(instr->result()), value);
+  __ Move(ToRegister(instr->result()), value);
 }
 
 
@@ -2129,7 +2121,7 @@
 
   if (instr->right()->IsConstantOperand()) {
     Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
-    __ CmpObject(left, right);
+    __ Cmp(left, right);
   } else {
     Register right = ToRegister(instr->right());
     __ cmpq(left, right);
@@ -2497,7 +2489,7 @@
     InstanceofStub stub(flags);
 
     __ push(ToRegister(instr->value()));
-    __ PushHeapObject(instr->function());
+    __ Push(instr->function());
 
     static const int kAdditionalDelta = 10;
     int delta =
@@ -3208,7 +3200,7 @@
 
 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   __ push(rsi);  // The context is the first argument.
-  __ PushHeapObject(instr->hydrogen()->pairs());
+  __ Push(instr->hydrogen()->pairs());
   __ Push(Smi::FromInt(instr->hydrogen()->flags()));
   CallRuntime(Runtime::kDeclareGlobals, 3, instr);
 }
@@ -3242,7 +3234,7 @@
 
   if (can_invoke_directly) {
     if (rdi_state == RDI_UNINITIALIZED) {
-      __ LoadHeapObject(rdi, function);
+      __ Move(rdi, function);
     }
 
     // Change context.
@@ -4838,8 +4830,7 @@
 
 void LCodeGen::DoCheckValue(LCheckValue* instr) {
   Register reg = ToRegister(instr->value());
-  Handle<HeapObject> object = instr->hydrogen()->object().handle();
-  __ CmpHeapObject(reg, object);
+  __ Cmp(reg, instr->hydrogen()->object().handle());
   DeoptimizeIf(not_equal, instr->environment());
 }
 
@@ -5066,7 +5057,7 @@
   // rax = regexp literal clone.
   int literal_offset =
       FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
-  __ LoadHeapObject(rcx, instr->hydrogen()->literals());
+  __ Move(rcx, instr->hydrogen()->literals());
   __ movq(rbx, FieldOperand(rcx, literal_offset));
   __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
   __ j(not_equal, &materialized, Label::kNear);
@@ -5137,13 +5128,7 @@
 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
   ASSERT(!operand->IsDoubleRegister());
   if (operand->IsConstantOperand()) {
-    Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
-    AllowDeferredHandleDereference smi_check;
-    if (object->IsSmi()) {
-      __ Push(Handle<Smi>::cast(object));
-    } else {
-      __ PushHeapObject(Handle<HeapObject>::cast(object));
-    }
+    __ Push(ToHandle(LConstantOperand::cast(operand)));
   } else if (operand->IsRegister()) {
     __ push(ToRegister(operand));
   } else {
@@ -5257,7 +5242,7 @@
   __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
          Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(not_equal, &check_frame_marker, Label::kNear);
-  __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
+  __ movq(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
 
   // Check the marker in the calling frame.
   __ bind(&check_frame_marker);
diff --git a/src/x64/lithium-gap-resolver-x64.cc b/src/x64/lithium-gap-resolver-x64.cc
index 71db17c..8d1c2a2 100644
--- a/src/x64/lithium-gap-resolver-x64.cc
+++ b/src/x64/lithium-gap-resolver-x64.cc
@@ -200,7 +200,7 @@
       } else if (cgen_->IsInteger32Constant(constant_source)) {
         __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
       } else {
-        __ LoadObject(dst, cgen_->ToHandle(constant_source));
+        __ Move(dst, cgen_->ToHandle(constant_source));
       }
     } else if (destination->IsDoubleRegister()) {
       double v = cgen_->ToDouble(constant_source);
@@ -222,7 +222,7 @@
         // value.
         __ movq(dst, Immediate(cgen_->ToInteger32(constant_source)));
       } else {
-        __ LoadObject(kScratchRegister, cgen_->ToHandle(constant_source));
+        __ Move(kScratchRegister, cgen_->ToHandle(constant_source));
         __ movq(dst, kScratchRegister);
       }
     }
@@ -262,7 +262,7 @@
     // Swap two general-purpose registers.
     Register src = cgen_->ToRegister(source);
     Register dst = cgen_->ToRegister(destination);
-    __ xchg(dst, src);
+    __ xchgq(dst, src);
 
   } else if ((source->IsRegister() && destination->IsStackSlot()) ||
              (source->IsStackSlot() && destination->IsRegister())) {
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 9dcb9d1..075f07c 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -740,7 +740,7 @@
 
   bind(&profiler_disabled);
   // Call the api function!
-  movq(rax, reinterpret_cast<int64_t>(function_address),
+  movq(rax, reinterpret_cast<Address>(function_address),
        RelocInfo::EXTERNAL_REFERENCE);
 
   bind(&end_profiler_check);
@@ -2492,8 +2492,7 @@
   if (source->IsSmi()) {
     Move(dst, Smi::cast(*source));
   } else {
-    ASSERT(source->IsHeapObject());
-    movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
+    MoveHeapObject(dst, source);
   }
 }
 
@@ -2503,8 +2502,7 @@
   if (source->IsSmi()) {
     Move(dst, Smi::cast(*source));
   } else {
-    ASSERT(source->IsHeapObject());
-    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+    MoveHeapObject(kScratchRegister, source);
     movq(dst, kScratchRegister);
   }
 }
@@ -2515,8 +2513,7 @@
   if (source->IsSmi()) {
     Cmp(dst, Smi::cast(*source));
   } else {
-    ASSERT(source->IsHeapObject());
-    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+    MoveHeapObject(kScratchRegister, source);
     cmpq(dst, kScratchRegister);
   }
 }
@@ -2527,8 +2524,7 @@
   if (source->IsSmi()) {
     Cmp(dst, Smi::cast(*source));
   } else {
-    ASSERT(source->IsHeapObject());
-    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+    MoveHeapObject(kScratchRegister, source);
     cmpq(dst, kScratchRegister);
   }
 }
@@ -2539,47 +2535,22 @@
   if (source->IsSmi()) {
     Push(Smi::cast(*source));
   } else {
-    ASSERT(source->IsHeapObject());
-    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+    MoveHeapObject(kScratchRegister, source);
     push(kScratchRegister);
   }
 }
 
 
-void MacroAssembler::LoadHeapObject(Register result,
-                                    Handle<HeapObject> object) {
+void MacroAssembler::MoveHeapObject(Register result,
+                                    Handle<Object> object) {
   AllowDeferredHandleDereference using_raw_address;
+  ASSERT(object->IsHeapObject());
   if (isolate()->heap()->InNewSpace(*object)) {
     Handle<Cell> cell = isolate()->factory()->NewCell(object);
     movq(result, cell, RelocInfo::CELL);
     movq(result, Operand(result, 0));
   } else {
-    Move(result, object);
-  }
-}
-
-
-void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
-  AllowDeferredHandleDereference using_raw_address;
-  if (isolate()->heap()->InNewSpace(*object)) {
-    Handle<Cell> cell = isolate()->factory()->NewCell(object);
-    movq(kScratchRegister, cell, RelocInfo::CELL);
-    cmpq(reg, Operand(kScratchRegister, 0));
-  } else {
-    Cmp(reg, object);
-  }
-}
-
-
-void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
-  AllowDeferredHandleDereference using_raw_address;
-  if (isolate()->heap()->InNewSpace(*object)) {
-    Handle<Cell> cell = isolate()->factory()->NewCell(object);
-    movq(kScratchRegister, cell, RelocInfo::CELL);
-    movq(kScratchRegister, Operand(kScratchRegister, 0));
-    push(kScratchRegister);
-  } else {
-    Push(object);
+    movq(result, object, RelocInfo::EMBEDDED_OBJECT);
   }
 }
 
@@ -2664,7 +2635,8 @@
 #ifdef DEBUG
   int end_position = pc_offset() + CallSize(code_object);
 #endif
-  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  ASSERT(RelocInfo::IsCodeTarget(rmode) ||
+      rmode == RelocInfo::CODE_AGE_SEQUENCE);
   call(code_object, rmode, ast_id);
 #ifdef DEBUG
   CHECK_EQ(end_position, pc_offset());
@@ -3591,7 +3563,7 @@
   ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Get the function and setup the context.
-  LoadHeapObject(rdi, function);
+  Move(rdi, function);
   movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
   // We call indirectly through the code field in the function to
@@ -3678,6 +3650,30 @@
 }
 
 
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+  if (frame_mode == BUILD_STUB_FRAME) {
+    push(rbp);  // Caller's frame pointer.
+    movq(rbp, rsp);
+    push(rsi);  // Callee's context.
+    Push(Smi::FromInt(StackFrame::STUB));
+  } else {
+    PredictableCodeSizeScope predictible_code_size_scope(this,
+        kNoCodeAgeSequenceLength);
+    if (FLAG_optimize_for_size && FLAG_age_code) {
+        // Pre-age the code.
+      Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+           RelocInfo::CODE_AGE_SEQUENCE);
+      Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
+    } else {
+      push(rbp);  // Caller's frame pointer.
+      movq(rbp, rsp);
+      push(rsi);  // Callee's context.
+      push(rdi);  // Callee's JS function.
+    }
+  }
+}
+
+
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   push(rbp);
   movq(rbp, rsp);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index d1c2434..df0ac5d 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -282,6 +282,9 @@
   void DebugBreak();
 #endif
 
+  // Generates function and stub prologue code.
+  void Prologue(PrologueFrameMode frame_mode);
+
   // Enter specific kind of exit frame; either in normal or
   // debug mode. Expects the number of arguments in register rax and
   // sets up the number of arguments in register rdi and the pointer
@@ -812,27 +815,7 @@
 
   // Load a heap object and handle the case of new-space objects by
   // indirecting via a global cell.
-  void LoadHeapObject(Register result, Handle<HeapObject> object);
-  void CmpHeapObject(Register reg, Handle<HeapObject> object);
-  void PushHeapObject(Handle<HeapObject> object);
-
-  void LoadObject(Register result, Handle<Object> object) {
-    AllowDeferredHandleDereference heap_object_check;
-    if (object->IsHeapObject()) {
-      LoadHeapObject(result, Handle<HeapObject>::cast(object));
-    } else {
-      Move(result, object);
-    }
-  }
-
-  void CmpObject(Register reg, Handle<Object> object) {
-    AllowDeferredHandleDereference heap_object_check;
-    if (object->IsHeapObject()) {
-      CmpHeapObject(reg, Handle<HeapObject>::cast(object));
-    } else {
-      Cmp(reg, object);
-    }
-  }
+  void MoveHeapObject(Register result, Handle<Object> object);
 
   // Load a global cell into a register.
   void LoadGlobalCell(Register dst, Handle<Cell> cell);
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 94650fd..5c31133 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -464,7 +464,7 @@
 
   // Get the function and setup the context.
   Handle<JSFunction> function = optimization.constant_function();
-  __ LoadHeapObject(rdi, function);
+  __ Move(rdi, function);
   __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
   // Construct the FunctionCallbackInfo on the stack.
   __ movq(args.GetArgumentOperand(offset - FCA::kCalleeIndex), rdi);
@@ -834,7 +834,7 @@
 
   if (details.type() == CONSTANT) {
     Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
-    __ CmpObject(value_reg, constant);
+    __ Cmp(value_reg, constant);
     __ j(not_equal, miss_label);
   } else if (FLAG_track_fields && representation.IsSmi()) {
     __ JumpIfNotSmi(value_reg, miss_label);
@@ -1411,7 +1411,7 @@
 
 void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
   // Return the constant value.
-  __ LoadObject(rax, value);
+  __ Move(rax, value);
   __ ret(0);
 }
 
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 38ca9f5..d5e838e 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -17504,6 +17504,70 @@
 }
 
 
+THREADED_TEST(FunctionGetDisplayName) {
+  LocalContext env;
+  v8::HandleScope scope(env->GetIsolate());
+  const char* code = "var error = false;"
+                     "function a() { this.x = 1; };"
+                     "a.displayName = 'display_a';"
+                     "var b = (function() {"
+                     "  var f = function() { this.x = 2; };"
+                     "  f.displayName = 'display_b';"
+                     "  return f;"
+                     "})();"
+                     "var c = function() {};"
+                     "c.__defineGetter__('displayName', function() {"
+                     "  error = true;"
+                     "  throw new Error();"
+                     "});"
+                     "function d() {};"
+                     "d.__defineGetter__('displayName', function() {"
+                     "  error = true;"
+                     "  return 'wrong_display_name';"
+                     "});"
+                     "function e() {};"
+                     "e.displayName = 'wrong_display_name';"
+                     "e.__defineSetter__('displayName', function() {"
+                     "  error = true;"
+                     "  throw new Error();"
+                     "});"
+                     "function f() {};"
+                     "f.displayName = { 'foo': 6, toString: function() {"
+                     "  error = true;"
+                     "  return 'wrong_display_name';"
+                     "}};"
+                     "var g = function() {"
+                     "  arguments.callee.displayName = 'set_in_runtime';"
+                     "}; g();"
+                     ;
+  v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"));
+  v8::Script::Compile(v8::String::New(code), &origin)->Run();
+  v8::Local<v8::Value> error = env->Global()->Get(v8::String::New("error"));
+  v8::Local<v8::Function> a = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("a")));
+  v8::Local<v8::Function> b = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("b")));
+  v8::Local<v8::Function> c = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("c")));
+  v8::Local<v8::Function> d = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("d")));
+  v8::Local<v8::Function> e = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("e")));
+  v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("f")));
+  v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("g")));
+  CHECK_EQ(false, error->BooleanValue());
+  CHECK_EQ("display_a", *v8::String::Utf8Value(a->GetDisplayName()));
+  CHECK_EQ("display_b", *v8::String::Utf8Value(b->GetDisplayName()));
+  CHECK(c->GetDisplayName()->IsUndefined());
+  CHECK(d->GetDisplayName()->IsUndefined());
+  CHECK(e->GetDisplayName()->IsUndefined());
+  CHECK(f->GetDisplayName()->IsUndefined());
+  CHECK_EQ("set_in_runtime", *v8::String::Utf8Value(g->GetDisplayName()));
+}
+
+
 THREADED_TEST(ScriptLineNumber) {
   LocalContext env;
   v8::HandleScope scope(env->GetIsolate());
diff --git a/test/cctest/test-assembler-x64.cc b/test/cctest/test-assembler-x64.cc
index ce258ba..cd1ed28 100644
--- a/test/cctest/test-assembler-x64.cc
+++ b/test/cctest/test-assembler-x64.cc
@@ -51,6 +51,8 @@
 typedef int (*F1)(int64_t x);
 typedef int (*F2)(int64_t x, int64_t y);
 typedef int (*F3)(double x);
+typedef int64_t (*F4)(int64_t* x, int64_t* y);
+typedef int64_t (*F5)(int64_t x);
 
 #ifdef _WIN64
 static const Register arg1 = rcx;
@@ -167,6 +169,157 @@
 }
 
 
+TEST(AssemblerX64XchglOperations) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                 &actual_size,
+                                                 true));
+  CHECK(buffer);
+  Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+
+  __ movq(rax, Operand(arg1, 0));
+  __ movq(rbx, Operand(arg2, 0));
+  __ xchgl(rax, rbx);
+  __ movq(Operand(arg1, 0), rax);
+  __ movq(Operand(arg2, 0), rbx);
+  __ ret(0);
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  // Call the function from C++.
+  int64_t left   = V8_2PART_UINT64_C(0x10000000, 20000000);
+  int64_t right  = V8_2PART_UINT64_C(0x30000000, 40000000);
+  int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
+  CHECK_EQ(V8_2PART_UINT64_C(0x00000000, 40000000), left);
+  CHECK_EQ(V8_2PART_UINT64_C(0x00000000, 20000000), right);
+  USE(result);
+}
+
+
+TEST(AssemblerX64OrlOperations) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                 &actual_size,
+                                                 true));
+  CHECK(buffer);
+  Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+
+  __ movq(rax, Operand(arg2, 0));
+  __ orl(Operand(arg1, 0), rax);
+  __ ret(0);
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  // Call the function from C++.
+  int64_t left   = V8_2PART_UINT64_C(0x10000000, 20000000);
+  int64_t right  = V8_2PART_UINT64_C(0x30000000, 40000000);
+  int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
+  CHECK_EQ(V8_2PART_UINT64_C(0x10000000, 60000000), left);
+  USE(result);
+}
+
+
+TEST(AssemblerX64RollOperations) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                 &actual_size,
+                                                 true));
+  CHECK(buffer);
+  Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+
+  __ movq(rax, arg1);
+  __ roll(rax, Immediate(1));
+  __ ret(0);
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  // Call the function from C++.
+  int64_t src    = V8_2PART_UINT64_C(0x10000000, C0000000);
+  int64_t result = FUNCTION_CAST<F5>(buffer)(src);
+  CHECK_EQ(V8_2PART_UINT64_C(0x00000000, 80000001), result);
+}
+
+
+TEST(AssemblerX64SublOperations) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                 &actual_size,
+                                                 true));
+  CHECK(buffer);
+  Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+
+  __ movq(rax, Operand(arg2, 0));
+  __ subl(Operand(arg1, 0), rax);
+  __ ret(0);
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  // Call the function from C++.
+  int64_t left   = V8_2PART_UINT64_C(0x10000000, 20000000);
+  int64_t right  = V8_2PART_UINT64_C(0x30000000, 40000000);
+  int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
+  CHECK_EQ(V8_2PART_UINT64_C(0x10000000, e0000000), left);
+  USE(result);
+}
+
+
+TEST(AssemblerX64TestlOperations) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                 &actual_size,
+                                                 true));
+  CHECK(buffer);
+  Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+
+  // Set rax with the ZF flag of the testl instruction.
+  Label done;
+  __ movq(rax, Immediate(1));
+  __ movq(rbx, Operand(arg2, 0));
+  __ testl(Operand(arg1, 0), rbx);
+  __ j(zero, &done, Label::kNear);
+  __ movq(rax, Immediate(0));
+  __ bind(&done);
+  __ ret(0);
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  // Call the function from C++.
+  int64_t left   = V8_2PART_UINT64_C(0x10000000, 20000000);
+  int64_t right  = V8_2PART_UINT64_C(0x30000000, 00000000);
+  int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
+  CHECK_EQ(static_cast<int64_t>(1), result);
+}
+
+
+TEST(AssemblerX64XorlOperations) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                 &actual_size,
+                                                 true));
+  CHECK(buffer);
+  Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+
+  __ movq(rax, Operand(arg2, 0));
+  __ xorl(Operand(arg1, 0), rax);
+  __ ret(0);
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  // Call the function from C++.
+  int64_t left   = V8_2PART_UINT64_C(0x10000000, 20000000);
+  int64_t right  = V8_2PART_UINT64_C(0x30000000, 60000000);
+  int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
+  CHECK_EQ(V8_2PART_UINT64_C(0x10000000, 40000000), left);
+  USE(result);
+}
+
+
 TEST(AssemblerX64MemoryOperands) {
   // Allocate an executable page of memory.
   size_t actual_size;
diff --git a/test/cctest/test-compiler.cc b/test/cctest/test-compiler.cc
index 1561155..9fd68e5 100644
--- a/test/cctest/test-compiler.cc
+++ b/test/cctest/test-compiler.cc
@@ -377,8 +377,10 @@
         *v8::Local<v8::Function>::Cast(env->Global()->Get(v8_str("closure1"))));
     Handle<JSFunction> fun2 = v8::Utils::OpenHandle(
         *v8::Local<v8::Function>::Cast(env->Global()->Get(v8_str("closure2"))));
-    CHECK(fun1->IsOptimized() || !fun1->IsOptimizable());
-    CHECK(fun2->IsOptimized() || !fun2->IsOptimizable());
+    CHECK(fun1->IsOptimized()
+          || !CcTest::i_isolate()->use_crankshaft() || !fun1->IsOptimizable());
+    CHECK(fun2->IsOptimized()
+          || !CcTest::i_isolate()->use_crankshaft() || !fun2->IsOptimizable());
     CHECK_EQ(fun1->code(), fun2->code());
   }
 }
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 9f4ad46..74c2b75 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -1031,6 +1031,7 @@
   // If we do not flush code this test is invalid.
   if (!FLAG_flush_code) return;
   i::FLAG_allow_natives_syntax = true;
+  i::FLAG_optimize_for_size = false;
   CcTest::InitializeVM();
   Isolate* isolate = CcTest::i_isolate();
   Factory* factory = isolate->factory();
@@ -1076,10 +1077,76 @@
 }
 
 
+TEST(TestCodeFlushingPreAged) {
+  // If we do not flush code this test is invalid.
+  if (!FLAG_flush_code) return;
+  i::FLAG_allow_natives_syntax = true;
+  i::FLAG_optimize_for_size = true;
+  CcTest::InitializeVM();
+  Isolate* isolate = Isolate::Current();
+  Factory* factory = isolate->factory();
+  v8::HandleScope scope(CcTest::isolate());
+  const char* source = "function foo() {"
+                       "  var x = 42;"
+                       "  var y = 42;"
+                       "  var z = x + y;"
+                       "};"
+                       "foo()";
+  Handle<String> foo_name = factory->InternalizeUtf8String("foo");
+
+  // Compile foo, but don't run it.
+  { v8::HandleScope scope(CcTest::isolate());
+    CompileRun(source);
+  }
+
+  // Check function is compiled.
+  Object* func_value = Isolate::Current()->context()->global_object()->
+      GetProperty(*foo_name)->ToObjectChecked();
+  CHECK(func_value->IsJSFunction());
+  Handle<JSFunction> function(JSFunction::cast(func_value));
+  CHECK(function->shared()->is_compiled());
+
+  // The code has been run so will survive at least one GC.
+  CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+  CHECK(function->shared()->is_compiled());
+
+  // The code was only run once, so it should be pre-aged and collected on the
+  // next GC.
+  CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+  CHECK(!function->shared()->is_compiled() || function->IsOptimized());
+
+  // Execute the function again twice, and ensure it is reset to the young age.
+  { v8::HandleScope scope(CcTest::isolate());
+    CompileRun("foo();"
+               "foo();");
+  }
+
+  // The code will survive at least two GC now that it is young again.
+  CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+  CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+  CHECK(function->shared()->is_compiled());
+
+  // Simulate several GCs that use full marking.
+  const int kAgingThreshold = 6;
+  for (int i = 0; i < kAgingThreshold; i++) {
+    CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+  }
+
+  // foo should no longer be in the compilation cache
+  CHECK(!function->shared()->is_compiled() || function->IsOptimized());
+  CHECK(!function->is_compiled() || function->IsOptimized());
+  // Call foo to get it recompiled.
+  CompileRun("foo()");
+  CHECK(function->shared()->is_compiled());
+  CHECK(function->is_compiled());
+}
+
+
 TEST(TestCodeFlushingIncremental) {
   // If we do not flush code this test is invalid.
   if (!FLAG_flush_code || !FLAG_flush_code_incrementally) return;
   i::FLAG_allow_natives_syntax = true;
+  i::FLAG_optimize_for_size = false;
   CcTest::InitializeVM();
   Isolate* isolate = CcTest::i_isolate();
   Factory* factory = isolate->factory();
@@ -1148,6 +1215,7 @@
   // If we do not flush code this test is invalid.
   if (!FLAG_flush_code || !FLAG_flush_code_incrementally) return;
   i::FLAG_allow_natives_syntax = true;
+  i::FLAG_optimize_for_size = false;
   CcTest::InitializeVM();
   Isolate* isolate = CcTest::i_isolate();
   Factory* factory = isolate->factory();
@@ -1216,6 +1284,7 @@
   // If we do not flush code this test is invalid.
   if (!FLAG_flush_code || !FLAG_flush_code_incrementally) return;
   i::FLAG_allow_natives_syntax = true;
+  i::FLAG_optimize_for_size = false;
   CcTest::InitializeVM();
   Isolate* isolate = CcTest::i_isolate();
   Factory* factory = isolate->factory();
diff --git a/test/message/paren_in_arg_string.out b/test/message/paren_in_arg_string.out
index 3bc978b..0ed59ba 100644
--- a/test/message/paren_in_arg_string.out
+++ b/test/message/paren_in_arg_string.out
@@ -2,5 +2,5 @@
 var paren_in_arg_string_bad = new Function(')', 'return;');
                               ^
 SyntaxError: Function arg string contains parenthesis
-    at Function (<anonymous>)
-    at *%(basename)s:29:31
\ No newline at end of file
+    at Function (native)
+    at *%(basename)s:29:31
diff --git a/test/mjsunit/regress/regress-array-pop-nonconfigurable.js b/test/mjsunit/regress/regress-array-pop-nonconfigurable.js
new file mode 100644
index 0000000..129e198
--- /dev/null
+++ b/test/mjsunit/regress/regress-array-pop-nonconfigurable.js
@@ -0,0 +1,31 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var a = [];
+Object.defineProperty(a, 0, {});
+assertThrows(function() { a.pop(); });
+
diff --git a/test/mjsunit/regress/regress-crbug-305309.js b/test/mjsunit/regress/regress-crbug-305309.js
new file mode 100644
index 0000000..cd89bed
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-305309.js
@@ -0,0 +1,49 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function BadProto() {
+  this.constant_function = function() {};
+  this.one = 1;
+  this.two = 2;
+}
+var b1 = new BadProto();
+var b2 = new BadProto();
+
+function Ctor() {}
+Ctor.prototype = b1;
+var a = new Ctor();
+
+function Two(x) {
+  return x.two;
+}
+assertEquals(2, Two(a));
+assertEquals(2, Two(a));
+b2.constant_function = "no longer constant!";
+%OptimizeFunctionOnNextCall(Two);
+assertEquals(2, Two(a));
diff --git a/test/mjsunit/unbox-double-arrays.js b/test/mjsunit/unbox-double-arrays.js
index 4e8718e..5ed4040 100644
--- a/test/mjsunit/unbox-double-arrays.js
+++ b/test/mjsunit/unbox-double-arrays.js
@@ -345,8 +345,6 @@
                       -Infinity,
                       expected_array_value(7));
 
-  assertOptimized(test_various_stores);
-
   // Make sure that we haven't converted from fast double.
   assertTrue(%HasFastDoubleElements(large_array));
 }
diff --git a/tools/presubmit.py b/tools/presubmit.py
index 780dab9..1ab6347 100755
--- a/tools/presubmit.py
+++ b/tools/presubmit.py
@@ -282,8 +282,8 @@
   Check that all files include a copyright notice and no trailing whitespaces.
   """
 
-  RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c', 'SConscript',
-      'SConstruct', '.status', '.gyp', '.gypi']
+  RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c',
+                         '.status', '.gyp', '.gypi']
 
   # Overwriting the one in the parent class.
   def FindFilesIn(self, path):
@@ -292,7 +292,7 @@
                                 stdout=PIPE, cwd=path, shell=True)
       result = []
       for file in output.stdout.read().split():
-        for dir_part in os.path.dirname(file).split(os.sep):
+        for dir_part in os.path.dirname(file).replace(os.sep, '/').split('/'):
           if self.IgnoreDir(dir_part):
             break
         else: