Version 3.21.16

Every place where AllocationMemento is initialized with an AllocationSite is now checked to be sure a valid Site goes in. This is temporary code to diagnose chromium bug 284577.

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@16723 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 8084cc5..85ec681 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,12 @@
+2013-09-16: Version 3.21.16
+
+        Every place where AllocationMemento is initialized with an
+        AllocationSite is now checked to be sure a valid Site goes in. This is
+        temporary code to diagnose chromium bug 284577.
+
+        Performance and stability improvements on all platforms.
+
+
 2013-09-13: Version 3.21.15
 
         Non-JSObject heap objects are now handled using slow-path IC stub
diff --git a/src/api.cc b/src/api.cc
index 71a8f4a..5b41f92 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3813,7 +3813,7 @@
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
   EXCEPTION_PREAMBLE(isolate);
-  i::Handle<i::JSObject> result = i::Copy(self);
+  i::Handle<i::JSObject> result = i::JSObject::Copy(self);
   has_pending_exception = result.is_null();
   EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
   return Utils::ToLocal(result);
@@ -6212,7 +6212,7 @@
   i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
   EXCEPTION_PREAMBLE(isolate);
   ENTER_V8(isolate);
-  i::Handle<i::JSObject> result = i::Copy(paragon_handle);
+  i::Handle<i::JSObject> result = i::JSObject::Copy(paragon_handle);
   has_pending_exception = result.is_null();
   EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
   return Utils::ToLocal(result);
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index f60e1f8..b7a343f 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -807,12 +807,13 @@
   // The following registers must be saved and restored when calling through to
   // the runtime:
   //   r0 - contains return address (beginning of patch sequence)
-  //   r1 - function object
+  //   r1 - isolate
   FrameScope scope(masm, StackFrame::MANUAL);
   __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
-  __ PrepareCallCFunction(1, 0, r1);
+  __ PrepareCallCFunction(1, 0, r2);
+  __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
   __ CallCFunction(
-      ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+      ExternalReference::get_make_code_young_function(masm->isolate()), 2);
   __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
   __ mov(pc, r0);
 }
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index cd1809f..39b2dc5 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -2765,9 +2765,10 @@
 
   if (do_gc) {
     // Passing r0.
-    __ PrepareCallCFunction(1, 0, r1);
+    __ PrepareCallCFunction(2, 0, r1);
+    __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
     __ CallCFunction(ExternalReference::perform_gc_function(isolate),
-        1, 0);
+        2, 0);
   }
 
   ExternalReference scope_depth =
@@ -3375,8 +3376,7 @@
     receiver = r0;
   }
 
-  StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss,
-                                         support_wrapper_);
+  StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss);
 
   __ bind(&miss);
   StubCompiler::TailCallBuiltin(
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 1bcf3e3..732d9f3 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -870,7 +870,8 @@
 }
 
 
-void Code::PatchPlatformCodeAge(byte* sequence,
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+                                byte* sequence,
                                 Code::Age age,
                                 MarkingParity parity) {
   uint32_t young_length;
@@ -879,7 +880,7 @@
     CopyBytes(sequence, young_sequence, young_length);
     CPU::FlushICache(sequence, young_length);
   } else {
-    Code* stub = GetCodeAgeStub(age, parity);
+    Code* stub = GetCodeAgeStub(isolate, age, parity);
     CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
     patcher.masm()->add(r0, pc, Operand(-8));
     patcher.masm()->ldr(pc, MemOperand(pc, -4));
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 59a8818..7f504ea 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1883,11 +1883,9 @@
 
 LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
   LOperand* string = UseRegister(instr->string());
-  LOperand* index = UseRegister(instr->index());
-  LOperand* value = UseTempRegister(instr->value());
-  LSeqStringSetChar* result =
-      new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
-  return DefineAsRegister(result);
+  LOperand* index = UseRegisterOrConstant(instr->index());
+  LOperand* value = UseRegister(instr->value());
+  return new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
 }
 
 
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 7f65023..b7fc3b6 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -1975,32 +1975,42 @@
 
 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   Register string = ToRegister(instr->string());
-  Register index = ToRegister(instr->index());
+  LOperand* index_op = instr->index();
   Register value = ToRegister(instr->value());
+  Register scratch = scratch0();
   String::Encoding encoding = instr->encoding();
 
   if (FLAG_debug_code) {
-    __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
-    __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
+    __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+    __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
 
-    __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
+    __ and_(scratch, scratch,
+            Operand(kStringRepresentationMask | kStringEncodingMask));
     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
-    __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING
-                           ? one_byte_seq_type : two_byte_seq_type));
+    __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
+                            ? one_byte_seq_type : two_byte_seq_type));
     __ Check(eq, kUnexpectedStringType);
   }
 
-  __ add(ip,
-         string,
-         Operand(SeqString::kHeaderSize - kHeapObjectTag));
-  if (encoding == String::ONE_BYTE_ENCODING) {
-    __ strb(value, MemOperand(ip, index));
+  if (index_op->IsConstantOperand()) {
+    int constant_index = ToInteger32(LConstantOperand::cast(index_op));
+    if (encoding == String::ONE_BYTE_ENCODING) {
+      __ strb(value,
+              FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
+    } else {
+      __ strh(value,
+          FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
+    }
   } else {
-    // MemOperand with ip as the base register is not allowed for strh, so
-    // we do the address calculation explicitly.
-    __ add(ip, ip, Operand(index, LSL, 1));
-    __ strh(value, MemOperand(ip));
+    Register index = ToRegister(index_op);
+    if (encoding == String::ONE_BYTE_ENCODING) {
+      __ add(scratch, string, Operand(index));
+      __ strb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
+    } else {
+      __ add(scratch, string, Operand(index, LSL, 1));
+      __ strh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
+    }
   }
 }
 
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 085af3f..681299e 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -380,31 +380,27 @@
                                             Register receiver,
                                             Register scratch1,
                                             Register scratch2,
-                                            Label* miss,
-                                            bool support_wrappers) {
+                                            Label* miss) {
   Label check_wrapper;
 
   // Check if the object is a string leaving the instance type in the
   // scratch1 register.
-  GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
-                      support_wrappers ? &check_wrapper : miss);
+  GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
 
   // Load length directly from the string.
   __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
   __ Ret();
 
-  if (support_wrappers) {
-    // Check if the object is a JSValue wrapper.
-    __ bind(&check_wrapper);
-    __ cmp(scratch1, Operand(JS_VALUE_TYPE));
-    __ b(ne, miss);
+  // Check if the object is a JSValue wrapper.
+  __ bind(&check_wrapper);
+  __ cmp(scratch1, Operand(JS_VALUE_TYPE));
+  __ b(ne, miss);
 
-    // Unwrap the value and check if the wrapped value is a string.
-    __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
-    GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
-    __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
-    __ Ret();
-  }
+  // Unwrap the value and check if the wrapped value is a string.
+  __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+  GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+  __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
+  __ Ret();
 }
 
 
diff --git a/src/array.js b/src/array.js
index 5f89ebb..4a7aea5 100644
--- a/src/array.js
+++ b/src/array.js
@@ -399,14 +399,13 @@
   n--;
   var value = this[n];
 
-  EnqueueSpliceRecord(this, n, [value], 0);
-
   try {
     BeginPerformSplice(this);
     delete this[n];
     this.length = n;
   } finally {
     EndPerformSplice(this);
+    EnqueueSpliceRecord(this, n, [value], 0);
   }
 
   return value;
@@ -441,8 +440,6 @@
   var n = TO_UINT32(this.length);
   var m = %_ArgumentsLength();
 
-  EnqueueSpliceRecord(this, n, [], m);
-
   try {
     BeginPerformSplice(this);
     for (var i = 0; i < m; i++) {
@@ -451,6 +448,7 @@
     this.length = n + m;
   } finally {
     EndPerformSplice(this);
+    EnqueueSpliceRecord(this, n, [], m);
   }
 
   return this.length;
@@ -581,14 +579,13 @@
 function ObservedArrayShift(len) {
   var first = this[0];
 
-  EnqueueSpliceRecord(this, 0, [first], 0);
-
   try {
     BeginPerformSplice(this);
     SimpleMove(this, 0, 1, len, 0);
     this.length = len - 1;
   } finally {
     EndPerformSplice(this);
+    EnqueueSpliceRecord(this, 0, [first], 0);
   }
 
   return first;
@@ -627,8 +624,6 @@
   var len = TO_UINT32(this.length);
   var num_arguments = %_ArgumentsLength();
 
-  EnqueueSpliceRecord(this, 0, [], num_arguments);
-
   try {
     BeginPerformSplice(this);
     SimpleMove(this, 0, 0, len, num_arguments);
@@ -638,6 +633,7 @@
     this.length = len + num_arguments;
   } finally {
     EndPerformSplice(this);
+    EnqueueSpliceRecord(this, 0, [], num_arguments);
   }
 
   return len + num_arguments;
diff --git a/src/ast.cc b/src/ast.cc
index 823dede..5f085d3 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -460,10 +460,7 @@
   receiver_types_.Clear();
   if (key()->IsPropertyName()) {
     FunctionPrototypeStub proto_stub(Code::LOAD_IC);
-    StringLengthStub string_stub(Code::LOAD_IC, false);
-    if (oracle->LoadIsStub(this, &string_stub)) {
-      is_string_length_ = true;
-    } else if (oracle->LoadIsStub(this, &proto_stub)) {
+    if (oracle->LoadIsStub(this, &proto_stub)) {
       is_function_prototype_ = true;
     } else {
       Literal* lit_key = key()->AsLiteral();
diff --git a/src/ast.h b/src/ast.h
index c630906..71a51ab 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1646,7 +1646,6 @@
 
   BailoutId LoadId() const { return load_id_; }
 
-  bool IsStringLength() const { return is_string_length_; }
   bool IsStringAccess() const { return is_string_access_; }
   bool IsFunctionPrototype() const { return is_function_prototype_; }
 
@@ -1674,7 +1673,6 @@
         load_id_(GetNextId(isolate)),
         is_monomorphic_(false),
         is_uninitialized_(false),
-        is_string_length_(false),
         is_string_access_(false),
         is_function_prototype_(false) { }
 
@@ -1687,7 +1685,6 @@
   SmallMapList receiver_types_;
   bool is_monomorphic_ : 1;
   bool is_uninitialized_ : 1;
-  bool is_string_length_ : 1;
   bool is_string_access_ : 1;
   bool is_function_prototype_ : 1;
 };
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 23d4269..23d5442 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -357,40 +357,45 @@
 
   HObjectAccess access = HObjectAccess::ForAllocationSiteTransitionInfo();
   HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
+  HValue* push_value;
   if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
     HValue* elements = AddLoadElements(boilerplate);
 
     IfBuilder if_fixed_cow(this);
     if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
     if_fixed_cow.Then();
-    environment()->Push(BuildCloneShallowArray(boilerplate,
-                                               allocation_site,
-                                               alloc_site_mode,
-                                               FAST_ELEMENTS,
-                                               0/*copy-on-write*/));
+    push_value = BuildCloneShallowArray(boilerplate,
+                                        allocation_site,
+                                        alloc_site_mode,
+                                        FAST_ELEMENTS,
+                                        0/*copy-on-write*/);
+    environment()->Push(push_value);
     if_fixed_cow.Else();
 
     IfBuilder if_fixed(this);
     if_fixed.If<HCompareMap>(elements, factory->fixed_array_map());
     if_fixed.Then();
-    environment()->Push(BuildCloneShallowArray(boilerplate,
-                                               allocation_site,
-                                               alloc_site_mode,
-                                               FAST_ELEMENTS,
-                                               length));
+    push_value = BuildCloneShallowArray(boilerplate,
+                                        allocation_site,
+                                        alloc_site_mode,
+                                        FAST_ELEMENTS,
+                                        length);
+    environment()->Push(push_value);
     if_fixed.Else();
-    environment()->Push(BuildCloneShallowArray(boilerplate,
-                                               allocation_site,
-                                               alloc_site_mode,
-                                               FAST_DOUBLE_ELEMENTS,
-                                               length));
+    push_value = BuildCloneShallowArray(boilerplate,
+                                        allocation_site,
+                                        alloc_site_mode,
+                                        FAST_DOUBLE_ELEMENTS,
+                                        length);
+    environment()->Push(push_value);
   } else {
     ElementsKind elements_kind = casted_stub()->ComputeElementsKind();
-    environment()->Push(BuildCloneShallowArray(boilerplate,
-                                               allocation_site,
-                                               alloc_site_mode,
-                                               elements_kind,
-                                               length));
+    push_value = BuildCloneShallowArray(boilerplate,
+                                        allocation_site,
+                                        alloc_site_mode,
+                                        elements_kind,
+                                        length);
+    environment()->Push(push_value);
   }
 
   checker.ElseDeopt("Uninitialized boilerplate literals");
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 946eb76..30ec1c7 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -830,19 +830,12 @@
 
 class StringLengthStub: public ICStub {
  public:
-  StringLengthStub(Code::Kind kind, bool support_wrapper)
-      : ICStub(kind), support_wrapper_(support_wrapper) { }
+  explicit StringLengthStub(Code::Kind kind) : ICStub(kind) { }
   virtual void Generate(MacroAssembler* masm);
 
  private:
   STATIC_ASSERT(KindBits::kSize == 4);
-  class WrapperModeBits: public BitField<bool, 4, 1> {};
-  virtual CodeStub::Major MajorKey() { return StringLength; }
-  virtual int MinorKey() {
-    return KindBits::encode(kind()) | WrapperModeBits::encode(support_wrapper_);
-  }
-
-  bool support_wrapper_;
+    virtual CodeStub::Major MajorKey() { return StringLength; }
 };
 
 
diff --git a/src/handles.cc b/src/handles.cc
index b3704df..033fdab 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -294,21 +294,6 @@
 }
 
 
-Handle<JSObject> Copy(Handle<JSObject> obj) {
-  Isolate* isolate = obj->GetIsolate();
-  CALL_HEAP_FUNCTION(isolate,
-                     isolate->heap()->CopyJSObject(*obj), JSObject);
-}
-
-
-Handle<JSObject> DeepCopy(Handle<JSObject> obj) {
-  Isolate* isolate = obj->GetIsolate();
-  CALL_HEAP_FUNCTION(isolate,
-                     obj->DeepCopy(isolate),
-                     JSObject);
-}
-
-
 // Wrappers for scripts are kept alive and cached in weak global
 // handles referred from foreign objects held by the scripts as long as
 // they are used. When they are not used anymore, the garbage
diff --git a/src/handles.h b/src/handles.h
index c3e4dca..585f7b4 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -255,10 +255,6 @@
 Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
                                                    uint32_t index);
 
-Handle<JSObject> Copy(Handle<JSObject> obj);
-
-Handle<JSObject> DeepCopy(Handle<JSObject> obj);
-
 Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
                                       Handle<JSArray> array);
 
diff --git a/src/heap.cc b/src/heap.cc
index 24e4039..108cfb3 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -4310,6 +4310,10 @@
   AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
       reinterpret_cast<Address>(result) + map->instance_size());
   alloc_memento->set_map_no_write_barrier(allocation_memento_map());
+
+  // TODO(mvstanton): To diagnose bug 284577, some extra checks
+  CHECK(allocation_site->map() == allocation_site_map());
+
   alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
   return result;
 }
@@ -5053,6 +5057,10 @@
       AllocationMemento* alloc_memento;
       if (maybe_alloc_memento->To(&alloc_memento)) {
         alloc_memento->set_map_no_write_barrier(allocation_memento_map());
+
+        // TODO(mvstanton): To diagnose bug 284577, some extra checks
+        CHECK(site->map() == allocation_site_map());
+
         alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
       }
     }
@@ -5075,6 +5083,10 @@
     AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
         reinterpret_cast<Address>(clone) + object_size);
     alloc_memento->set_map_no_write_barrier(allocation_memento_map());
+
+    // TODO(mvstanton): To diagnose bug 284577, some extra checks
+    CHECK(site->map() == allocation_site_map());
+
     alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
   }
 
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 7d33141..7d3b879 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -1303,6 +1303,8 @@
 // Inserts an int3/stop break instruction for debugging purposes.
 class HDebugBreak V8_FINAL : public HTemplateInstruction<0> {
  public:
+  DECLARE_INSTRUCTION_FACTORY_P0(HDebugBreak);
+
   virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
     return Representation::None();
   }
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 23c373f..e1b89a4 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -1831,6 +1831,19 @@
   Handle<Map> alloc_memento_map(
       isolate()->heap()->allocation_memento_map());
   AddStoreMapConstant(alloc_memento, alloc_memento_map);
+
+  {
+    // TODO(mvstanton): the code below is turned on to diagnose chromium bug
+    // 284577.
+    Handle<Map> alloc_site_map(isolate()->heap()->allocation_site_map());
+    IfBuilder builder(this);
+    builder.If<HCompareMap>(alloc_site, alloc_site_map);
+    builder.Then();
+    builder.Else();
+    Add<HDebugBreak>();
+    builder.End();
+  }
+
   HObjectAccess access = HObjectAccess::ForAllocationMementoSite();
   Add<HStoreNamedField>(alloc_memento, access, alloc_site);
   return alloc_memento;
@@ -4172,8 +4185,7 @@
       IsFastLiteral(Handle<JSObject>::cast(boilerplate),
                     kMaxFastLiteralDepth,
                     &max_properties)) {
-    Handle<JSObject> boilerplate_object =
-        Handle<JSObject>::cast(boilerplate);
+    Handle<JSObject> boilerplate_object = Handle<JSObject>::cast(boilerplate);
 
     literal = BuildFastLiteral(boilerplate_object,
                                Handle<Object>::null(),
@@ -5136,9 +5148,7 @@
     CHECK_ALIVE(VisitForValue(prop->obj()));
     HValue* object = Top();
     HValue* key = NULL;
-    if ((!prop->IsStringLength() &&
-         !prop->IsFunctionPrototype() &&
-         !prop->key()->IsPropertyName()) ||
+    if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) ||
         prop->IsStringAccess()) {
       CHECK_ALIVE(VisitForValue(prop->key()));
       key = Top();
@@ -5828,17 +5838,20 @@
 }
 
 
+static bool AreStringTypes(SmallMapList* types) {
+  if (types == NULL || types->length() == 0) return false;
+  for (int i = 0; i < types->length(); i++) {
+    if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
+  }
+  return true;
+}
+
+
 void HOptimizedGraphBuilder::BuildLoad(Property* expr,
                                        int position,
                                        BailoutId ast_id) {
   HInstruction* instr = NULL;
-  if (expr->IsStringLength()) {
-    HValue* string = Pop();
-    BuildCheckHeapObject(string);
-    HInstruction* checkstring =
-        AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
-    instr = BuildLoadStringLength(string, checkstring);
-  } else if (expr->IsStringAccess()) {
+  if (expr->IsStringAccess()) {
     HValue* index = Pop();
     HValue* string = Pop();
     HValue* context = environment()->context();
@@ -5874,6 +5887,12 @@
       } else {
         instr = BuildLoadNamedMonomorphic(Pop(), name, map);
       }
+    } else if (AreStringTypes(types) &&
+               name->Equals(isolate()->heap()->length_string())) {
+      BuildCheckHeapObject(Pop());
+      HValue* checked_object =
+          AddInstruction(HCheckInstanceType::NewIsString(object, zone()));
+      instr = BuildLoadStringLength(object, checked_object);
     } else if (types != NULL && types->length() > 1) {
       return HandlePolymorphicLoadNamedField(
           position, ast_id, Pop(), types, name);
@@ -5914,9 +5933,7 @@
   if (TryArgumentsAccess(expr)) return;
 
   CHECK_ALIVE(VisitForValue(expr->obj()));
-  if ((!expr->IsStringLength() &&
-       !expr->IsFunctionPrototype() &&
-       !expr->key()->IsPropertyName()) ||
+  if ((!expr->IsFunctionPrototype() && !expr->key()->IsPropertyName()) ||
       expr->IsStringAccess()) {
     CHECK_ALIVE(VisitForValue(expr->key()));
   }
@@ -7567,9 +7584,7 @@
   HValue* object = Top();
 
   HValue* key = NULL;
-  if ((!prop->IsStringLength() &&
-       !prop->IsFunctionPrototype() &&
-       !prop->key()->IsPropertyName()) ||
+  if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) ||
       prop->IsStringAccess()) {
     CHECK_ALIVE(VisitForValue(prop->key()));
     key = Top();
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index a159748..91eba98 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -539,10 +539,12 @@
   __ mov(eax, Operand(esp, 8 * kPointerSize));
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    __ PrepareCallCFunction(1, ebx);
+    __ PrepareCallCFunction(2, ebx);
+    __ mov(Operand(esp, 1 * kPointerSize),
+           Immediate(ExternalReference::isolate_address(masm->isolate())));
     __ mov(Operand(esp, 0), eax);
     __ CallCFunction(
-        ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+        ExternalReference::get_make_code_young_function(masm->isolate()), 2);
   }
   __ popad();
   __ ret(0);
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index a83c1ae..3ea749f 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -984,7 +984,7 @@
           ASSERT_EQ(Token::SHL, op);
           if (CpuFeatures::IsSupported(SSE2)) {
             CpuFeatureScope use_sse2(masm, SSE2);
-            __ cvtsi2sd(xmm0, left);
+            __ Cvtsi2sd(xmm0, left);
             __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
           } else {
             __ mov(Operand(esp, 1 * kPointerSize), left);
@@ -1370,7 +1370,7 @@
         // Store the result in the HeapNumber and return.
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatureScope use_sse2(masm, SSE2);
-          __ cvtsi2sd(xmm0, ebx);
+          __ Cvtsi2sd(xmm0, ebx);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         } else {
           __ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -1594,7 +1594,7 @@
         // Store the result in the HeapNumber and return.
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatureScope use_sse2(masm, SSE2);
-          __ cvtsi2sd(xmm0, ebx);
+          __ Cvtsi2sd(xmm0, ebx);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         } else {
           __ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -1782,7 +1782,7 @@
         // Store the result in the HeapNumber and return.
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatureScope use_sse2(masm, SSE2);
-          __ cvtsi2sd(xmm0, ebx);
+          __ Cvtsi2sd(xmm0, ebx);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         } else {
           __ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -2329,12 +2329,12 @@
   __ jmp(not_numbers);  // Argument in eax is not a number.
   __ bind(&load_smi_edx);
   __ SmiUntag(edx);  // Untag smi before converting to float.
-  __ cvtsi2sd(xmm0, edx);
+  __ Cvtsi2sd(xmm0, edx);
   __ SmiTag(edx);  // Retag smi for heap number overwriting test.
   __ jmp(&load_eax);
   __ bind(&load_smi_eax);
   __ SmiUntag(eax);  // Untag smi before converting to float.
-  __ cvtsi2sd(xmm1, eax);
+  __ Cvtsi2sd(xmm1, eax);
   __ SmiTag(eax);  // Retag smi for heap number overwriting test.
   __ jmp(&done, Label::kNear);
   __ bind(&load_float_eax);
@@ -2350,11 +2350,11 @@
   __ mov(scratch, left);
   ASSERT(!scratch.is(right));  // We're about to clobber scratch.
   __ SmiUntag(scratch);
-  __ cvtsi2sd(xmm0, scratch);
+  __ Cvtsi2sd(xmm0, scratch);
 
   __ mov(scratch, right);
   __ SmiUntag(scratch);
-  __ cvtsi2sd(xmm1, scratch);
+  __ Cvtsi2sd(xmm1, scratch);
 }
 
 
@@ -2365,7 +2365,7 @@
                                                   Register scratch,
                                                   XMMRegister xmm_scratch) {
   __ cvttsd2si(int32_result, Operand(operand));
-  __ cvtsi2sd(xmm_scratch, int32_result);
+  __ Cvtsi2sd(xmm_scratch, int32_result);
   __ pcmpeqd(xmm_scratch, operand);
   __ movmskps(scratch, xmm_scratch);
   // Two least significant bits should be both set.
@@ -2470,7 +2470,7 @@
 
   // Save 1 in double_result - we need this several times later on.
   __ mov(scratch, Immediate(1));
-  __ cvtsi2sd(double_result, scratch);
+  __ Cvtsi2sd(double_result, scratch);
 
   if (exponent_type_ == ON_STACK) {
     Label base_is_smi, unpack_exponent;
@@ -2490,7 +2490,7 @@
 
     __ bind(&base_is_smi);
     __ SmiUntag(base);
-    __ cvtsi2sd(double_base, base);
+    __ Cvtsi2sd(double_base, base);
 
     __ bind(&unpack_exponent);
     __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -2683,7 +2683,7 @@
   // and may not have contained the exponent value in the first place when the
   // exponent is a smi.  We reset it with exponent value before bailing out.
   __ j(not_equal, &done);
-  __ cvtsi2sd(double_exponent, exponent);
+  __ Cvtsi2sd(double_exponent, exponent);
 
   // Returning or bailing out.
   Counters* counters = masm->isolate()->counters();
@@ -2756,8 +2756,7 @@
     __ j(not_equal, &miss);
   }
 
-  StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss,
-                                         support_wrapper_);
+  StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss);
   __ bind(&miss);
   StubCompiler::TailCallBuiltin(
       masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
@@ -4508,6 +4507,8 @@
     // stack alignment is known to be correct. This function takes one argument
     // which is passed on the stack, and we know that the stack has been
     // prepared to pass at least one argument.
+    __ mov(Operand(esp, 1 * kPointerSize),
+           Immediate(ExternalReference::isolate_address(masm->isolate())));
     __ mov(Operand(esp, 0 * kPointerSize), eax);  // Result.
     __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
   }
@@ -6258,7 +6259,7 @@
     __ bind(&right_smi);
     __ mov(ecx, eax);  // Can't clobber eax because we can still jump away.
     __ SmiUntag(ecx);
-    __ cvtsi2sd(xmm1, ecx);
+    __ Cvtsi2sd(xmm1, ecx);
 
     __ bind(&left);
     __ JumpIfSmi(edx, &left_smi, Label::kNear);
@@ -6270,7 +6271,7 @@
     __ bind(&left_smi);
     __ mov(ecx, edx);  // Can't clobber edx because we can still jump away.
     __ SmiUntag(ecx);
-    __ cvtsi2sd(xmm0, ecx);
+    __ Cvtsi2sd(xmm0, ecx);
 
     __ bind(&done);
     // Compare operands.
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 84a4d23..9385423 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -768,7 +768,7 @@
   __ SmiUntag(ebx);
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope fscope(masm, SSE2);
-    __ cvtsi2sd(xmm0, ebx);
+    __ Cvtsi2sd(xmm0, ebx);
     __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
               xmm0);
   } else {
@@ -1165,7 +1165,8 @@
 }
 
 
-void Code::PatchPlatformCodeAge(byte* sequence,
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+                                byte* sequence,
                                 Code::Age age,
                                 MarkingParity parity) {
   uint32_t young_length;
@@ -1174,7 +1175,7 @@
     CopyBytes(sequence, young_sequence, young_length);
     CPU::FlushICache(sequence, young_length);
   } else {
-    Code* stub = GetCodeAgeStub(age, parity);
+    Code* stub = GetCodeAgeStub(isolate, age, parity);
     CodePatcher patcher(sequence, young_length);
     patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
   }
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index d50b780..7d7b51d 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -3977,7 +3977,7 @@
     __ bind(&negative_sign);
     // Truncate, then compare and compensate.
     __ cvttsd2si(output_reg, Operand(input_reg));
-    __ cvtsi2sd(xmm_scratch, output_reg);
+    __ Cvtsi2sd(xmm_scratch, output_reg);
     __ ucomisd(input_reg, xmm_scratch);
     __ j(equal, &done, Label::kNear);
     __ sub(output_reg, Immediate(1));
@@ -4027,7 +4027,7 @@
   __ RecordComment("D2I conversion overflow");
   DeoptimizeIf(equal, instr->environment());
 
-  __ cvtsi2sd(xmm_scratch, output_reg);
+  __ Cvtsi2sd(xmm_scratch, output_reg);
   __ ucomisd(xmm_scratch, input_temp);
   __ j(equal, &done);
   __ sub(output_reg, Immediate(1));
@@ -4978,7 +4978,7 @@
   ASSERT(output->IsDoubleRegister());
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope scope(masm(), SSE2);
-    __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+    __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
   } else if (input->IsRegister()) {
     Register input_reg = ToRegister(input);
     __ push(input_reg);
@@ -5087,7 +5087,7 @@
     __ xor_(reg, 0x80000000);
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope feature_scope(masm(), SSE2);
-      __ cvtsi2sd(xmm0, Operand(reg));
+      __ Cvtsi2sd(xmm0, Operand(reg));
     } else {
       __ push(reg);
       __ fild_s(Operand(esp, 0));
@@ -5308,7 +5308,7 @@
                                 bool deoptimize_on_minus_zero,
                                 LEnvironment* env,
                                 NumberUntagDMode mode) {
-  Label load_smi, done;
+  Label convert, load_smi, done;
 
   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
     // Smi check.
@@ -5317,26 +5317,15 @@
     // Heap number map check.
     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
            factory()->heap_number_map());
-    if (!can_convert_undefined_to_nan) {
-      DeoptimizeIf(not_equal, env);
+    if (can_convert_undefined_to_nan) {
+      __ j(not_equal, &convert, Label::kNear);
     } else {
-      Label heap_number, convert;
-      __ j(equal, &heap_number, Label::kNear);
-
-      // Convert undefined (and hole) to NaN.
-      __ cmp(input_reg, factory()->undefined_value());
       DeoptimizeIf(not_equal, env);
-
-      __ bind(&convert);
-      ExternalReference nan =
-          ExternalReference::address_of_canonical_non_hole_nan();
-      __ movdbl(result_reg, Operand::StaticVariable(nan));
-      __ jmp(&done, Label::kNear);
-
-      __ bind(&heap_number);
     }
+
     // Heap number to XMM conversion.
     __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+
     if (deoptimize_on_minus_zero) {
       XMMRegister xmm_scratch = xmm0;
       __ xorps(xmm_scratch, xmm_scratch);
@@ -5347,6 +5336,19 @@
       DeoptimizeIf(not_zero, env);
     }
     __ jmp(&done, Label::kNear);
+
+    if (can_convert_undefined_to_nan) {
+      __ bind(&convert);
+
+      // Convert undefined (and hole) to NaN.
+      __ cmp(input_reg, factory()->undefined_value());
+      DeoptimizeIf(not_equal, env);
+
+      ExternalReference nan =
+          ExternalReference::address_of_canonical_non_hole_nan();
+      __ movdbl(result_reg, Operand::StaticVariable(nan));
+      __ jmp(&done, Label::kNear);
+    }
   } else {
     ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
   }
@@ -5356,7 +5358,7 @@
   // input register since we avoid dependencies.
   __ mov(temp_reg, input_reg);
   __ SmiUntag(temp_reg);  // Untag smi before converting to float.
-  __ cvtsi2sd(result_reg, Operand(temp_reg));
+  __ Cvtsi2sd(result_reg, Operand(temp_reg));
   __ bind(&done);
 }
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index b65d328..d8a475c 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -283,7 +283,7 @@
                                Label::Distance dst) {
   ASSERT(!input_reg.is(scratch));
   cvttsd2si(result_reg, Operand(input_reg));
-  cvtsi2sd(scratch, Operand(result_reg));
+  Cvtsi2sd(scratch, Operand(result_reg));
   ucomisd(scratch, input_reg);
   j(not_equal, conversion_failed, dst);
   j(parity_even, conversion_failed, dst);  // NaN.
@@ -392,7 +392,7 @@
 
     movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
     cvttsd2si(result_reg, Operand(xmm0));
-    cvtsi2sd(temp, Operand(result_reg));
+    Cvtsi2sd(temp, Operand(result_reg));
     ucomisd(xmm0, temp);
     RecordComment("Deferred TaggedToI: lost precision");
     j(not_equal, lost_precision, Label::kNear);
@@ -457,7 +457,7 @@
   cmp(src, Immediate(0));
   movdbl(scratch,
          Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE32));
-  cvtsi2sd(dst, src);
+  Cvtsi2sd(dst, src);
   j(not_sign, &done, Label::kNear);
   addsd(dst, scratch);
   bind(&done);
@@ -676,6 +676,12 @@
 #endif
 
 
+void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
+  xorps(dst, dst);
+  cvtsi2sd(dst, src);
+}
+
+
 void MacroAssembler::Set(Register dst, const Immediate& x) {
   if (x.is_zero()) {
     xor_(dst, dst);  // Shorter than mov.
@@ -834,7 +840,7 @@
   SmiUntag(scratch1);
   if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
     CpuFeatureScope fscope(this, SSE2);
-    cvtsi2sd(scratch2, scratch1);
+    Cvtsi2sd(scratch2, scratch1);
     movdbl(FieldOperand(elements, key, times_4,
                         FixedDoubleArray::kHeaderSize - elements_offset),
            scratch2);
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index e4e4533..d813692 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -366,6 +366,12 @@
   void Set(Register dst, const Immediate& x);
   void Set(const Operand& dst, const Immediate& x);
 
+  // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
+  // hinders register renaming and makes dependence chains longer. So we use
+  // xorps to clear the dst register before cvtsi2sd to solve this issue.
+  void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
+  void Cvtsi2sd(XMMRegister dst, const Operand& src);
+
   // Support for constant splitting.
   bool IsUnsafeImmediate(const Immediate& x);
   void SafeSet(Register dst, const Immediate& x);
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 354c2fd..89ea6be 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -329,32 +329,28 @@
                                             Register receiver,
                                             Register scratch1,
                                             Register scratch2,
-                                            Label* miss,
-                                            bool support_wrappers) {
+                                            Label* miss) {
   Label check_wrapper;
 
   // Check if the object is a string leaving the instance type in the
   // scratch register.
-  GenerateStringCheck(masm, receiver, scratch1, miss,
-                      support_wrappers ? &check_wrapper : miss);
+  GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
 
   // Load length from the string and convert to a smi.
   __ mov(eax, FieldOperand(receiver, String::kLengthOffset));
   __ ret(0);
 
-  if (support_wrappers) {
-    // Check if the object is a JSValue wrapper.
-    __ bind(&check_wrapper);
-    __ cmp(scratch1, JS_VALUE_TYPE);
-    __ j(not_equal, miss);
+  // Check if the object is a JSValue wrapper.
+  __ bind(&check_wrapper);
+  __ cmp(scratch1, JS_VALUE_TYPE);
+  __ j(not_equal, miss);
 
-    // Check if the wrapped value is a string and load the length
-    // directly if it is.
-    __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
-    GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
-    __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
-    __ ret(0);
-  }
+  // Check if the wrapped value is a string and load the length
+  // directly if it is.
+  __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+  GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+  __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
+  __ ret(0);
 }
 
 
@@ -862,7 +858,7 @@
     __ SmiUntag(value_reg);
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope use_sse2(masm, SSE2);
-      __ cvtsi2sd(xmm0, value_reg);
+      __ Cvtsi2sd(xmm0, value_reg);
     } else {
       __ push(value_reg);
       __ fild_s(Operand(esp, 0));
@@ -1041,7 +1037,7 @@
     __ SmiUntag(value_reg);
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope use_sse2(masm, SSE2);
-      __ cvtsi2sd(xmm0, value_reg);
+      __ Cvtsi2sd(xmm0, value_reg);
     } else {
       __ push(value_reg);
       __ fild_s(Operand(esp, 0));
diff --git a/src/ic.cc b/src/ic.cc
index 5518751..5267af1 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -549,9 +549,11 @@
                                       Code::ExtraICState extra_ic_state,
                                       Handle<Object> object,
                                       Handle<String> name) {
+  bool use_ic = FLAG_use_ic;
   if (object->IsJSObject()) {
     Handle<JSObject> receiver = Handle<JSObject>::cast(object);
     if (receiver->map()->is_deprecated()) {
+      use_ic = false;
       JSObject::MigrateInstance(receiver);
     }
   }
@@ -590,9 +592,7 @@
   }
 
   // Lookup is valid: Update inline cache and stub cache.
-  if (FLAG_use_ic) {
-    UpdateCaches(&lookup, state, extra_ic_state, object, name);
-  }
+  if (use_ic) UpdateCaches(&lookup, state, extra_ic_state, object, name);
 
   // Get the property.
   PropertyAttributes attr;
@@ -819,9 +819,11 @@
                                     Handle<String>::cast(key));
   }
 
+  bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
   if (object->IsJSObject()) {
     Handle<JSObject> receiver = Handle<JSObject>::cast(object);
     if (receiver->map()->is_deprecated()) {
+      use_ic = false;
       JSObject::MigrateInstance(receiver);
     }
   }
@@ -830,7 +832,6 @@
     return TypeError("non_object_property_call", object, key);
   }
 
-  bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
   ASSERT(!(use_ic && object->IsJSGlobalProxy()));
 
   if (use_ic && state != MEGAMORPHIC) {
@@ -874,21 +875,20 @@
     return TypeError("non_object_property_load", object, name);
   }
 
-  if (FLAG_use_ic) {
+  bool use_ic = FLAG_use_ic;
+
+  if (use_ic) {
     // Use specialized code for getting the length of strings and
     // string wrapper objects.  The length property of string wrapper
     // objects is read-only and therefore always returns the length of
     // the underlying string value.  See ECMA-262 15.5.5.1.
-    if ((object->IsString() || object->IsStringWrapper()) &&
+    if (object->IsStringWrapper() &&
         name->Equals(isolate()->heap()->length_string())) {
       Handle<Code> stub;
       if (state == UNINITIALIZED) {
         stub = pre_monomorphic_stub();
-      } else if (state == PREMONOMORPHIC) {
-        StringLengthStub string_length_stub(kind(), !object->IsString());
-        stub = string_length_stub.GetCode(isolate());
-      } else if (state == MONOMORPHIC && object->IsStringWrapper()) {
-        StringLengthStub string_length_stub(kind(), true);
+      } else if (state == PREMONOMORPHIC || state == MONOMORPHIC) {
+        StringLengthStub string_length_stub(kind());
         stub = string_length_stub.GetCode(isolate());
       } else if (state != MEGAMORPHIC) {
         ASSERT(state != GENERIC);
@@ -897,14 +897,12 @@
       if (!stub.is_null()) {
         set_target(*stub);
 #ifdef DEBUG
-        if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
+        if (FLAG_trace_ic) PrintF("[LoadIC : +#length /stringwrapper]\n");
 #endif
       }
       // Get the string if we have a string wrapper object.
-      Handle<Object> string = object->IsJSValue()
-          ? Handle<Object>(Handle<JSValue>::cast(object)->value(), isolate())
-          : object;
-      return Smi::FromInt(String::cast(*string)->length());
+      String* string = String::cast(JSValue::cast(*object)->value());
+      return Smi::FromInt(string->length());
     }
 
     // Use specialized code for getting prototype of functions.
@@ -936,13 +934,14 @@
   uint32_t index;
   if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) {
     // Rewrite to the generic keyed load stub.
-    if (FLAG_use_ic) set_target(*generic_stub());
+    if (use_ic) set_target(*generic_stub());
     return Runtime::GetElementOrCharAtOrFail(isolate(), object, index);
   }
 
   if (object->IsJSObject()) {
     Handle<JSObject> receiver = Handle<JSObject>::cast(object);
     if (receiver->map()->is_deprecated()) {
+      use_ic = false;
       JSObject::MigrateInstance(receiver);
     }
   }
@@ -960,7 +959,7 @@
   }
 
   // Update inline cache and stub cache.
-  if (FLAG_use_ic) UpdateCaches(&lookup, state, object, name);
+  if (use_ic) UpdateCaches(&lookup, state, object, name);
 
   PropertyAttributes attr;
   if (lookup.IsInterceptor() || lookup.IsHandler()) {
@@ -1265,6 +1264,8 @@
                           State state,
                           Handle<Object> object,
                           Handle<String> name) {
+  // TODO(verwaest): It would be nice to support loading fields from smis as
+  // well. For now just fail to update the cache.
   if (!object->IsHeapObject()) return;
 
   Handle<HeapObject> receiver = Handle<HeapObject>::cast(object);
@@ -1278,6 +1279,16 @@
   } else if (!lookup->IsCacheable()) {
     // Bail out if the result is not cacheable.
     code = slow_stub();
+  } else if (object->IsString() &&
+             name->Equals(isolate()->heap()->length_string())) {
+    int length_index = String::kLengthOffset / kPointerSize;
+    if (target()->is_load_stub()) {
+      LoadFieldStub stub(true, length_index, Representation::Tagged());
+      code = stub.GetCode(isolate());
+    } else {
+      KeyedLoadFieldStub stub(true, length_index, Representation::Tagged());
+      code = stub.GetCode(isolate());
+    }
   } else if (!object->IsJSObject()) {
     // TODO(jkummerow): It would be nice to support non-JSObjects in
     // ComputeLoadHandler, then we wouldn't need to go generic here.
@@ -1362,9 +1373,9 @@
         return isolate()->stub_cache()->ComputeLoadViaGetter(
             name, receiver, holder, function);
       } else if (receiver->IsJSArray() &&
-          name->Equals(isolate()->heap()->length_string())) {
-        PropertyIndex lengthIndex =
-          PropertyIndex::NewHeaderIndex(JSArray::kLengthOffset / kPointerSize);
+                 name->Equals(isolate()->heap()->length_string())) {
+        PropertyIndex lengthIndex = PropertyIndex::NewHeaderIndex(
+            JSArray::kLengthOffset / kPointerSize);
         return isolate()->stub_cache()->ComputeLoadField(
             name, receiver, holder, lengthIndex, Representation::Tagged());
       }
@@ -1496,6 +1507,7 @@
       } else if (object->IsJSObject()) {
         Handle<JSObject> receiver = Handle<JSObject>::cast(object);
         if (receiver->map()->is_deprecated()) {
+          use_ic = false;
           JSObject::MigrateInstance(receiver);
         }
 
@@ -1512,9 +1524,11 @@
     } else {
       TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "force generic");
     }
-    ASSERT(!stub.is_null());
-    set_target(*stub);
-    TRACE_IC("KeyedLoadIC", key, state, target());
+    if (use_ic) {
+      ASSERT(!stub.is_null());
+      set_target(*stub);
+      TRACE_IC("KeyedLoadIC", key, state, target());
+    }
   }
 
 
@@ -1678,7 +1692,9 @@
 
   Handle<JSObject> receiver = Handle<JSObject>::cast(object);
 
+  bool use_ic = FLAG_use_ic;
   if (receiver->map()->is_deprecated()) {
+    use_ic = false;
     JSObject::MigrateInstance(receiver);
   }
 
@@ -1701,7 +1717,7 @@
   // properties. Slow properties might indicate redefinition of the length
   // property. Note that when redefined using Object.freeze, it's possible
   // to have fast properties but a read-only length.
-  if (FLAG_use_ic &&
+  if (use_ic &&
       receiver->IsJSArray() &&
       name->Equals(isolate()->heap()->length_string()) &&
       Handle<JSArray>::cast(receiver)->AllowsSetElementsLength() &&
@@ -1716,7 +1732,7 @@
   }
 
   if (receiver->IsJSGlobalProxy()) {
-    if (FLAG_use_ic && kind() != Code::KEYED_STORE_IC) {
+    if (use_ic && kind() != Code::KEYED_STORE_IC) {
       // Generate a generic stub that goes to the runtime when we see a global
       // proxy as receiver.
       Handle<Code> stub = (strict_mode == kStrictMode)
@@ -1738,7 +1754,7 @@
     // Strict mode doesn't allow setting non-existent global property.
     return ReferenceError("not_defined", name);
   }
-  if (FLAG_use_ic) {
+  if (use_ic) {
     if (state == UNINITIALIZED) {
       Handle<Code> stub = (strict_mode == kStrictMode)
           ? pre_monomorphic_stub_strict()
diff --git a/src/isolate.cc b/src/isolate.cc
index 6fa496a..ceb8809 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1087,7 +1087,7 @@
   Handle<String> key = factory()->stack_overflow_string();
   Handle<JSObject> boilerplate =
       Handle<JSObject>::cast(GetProperty(this, js_builtins_object(), key));
-  Handle<JSObject> exception = Copy(boilerplate);
+  Handle<JSObject> exception = JSObject::Copy(boilerplate);
   DoThrow(*exception, NULL);
 
   // Get stack trace limit.
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 3aabd97..400292e 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -833,14 +833,15 @@
   // The following registers must be saved and restored when calling through to
   // the runtime:
   //   a0 - contains return address (beginning of patch sequence)
-  //   a1 - function object
+  //   a1 - isolate
   RegList saved_regs =
       (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
   FrameScope scope(masm, StackFrame::MANUAL);
   __ MultiPush(saved_regs);
-  __ PrepareCallCFunction(1, 0, a1);
+  __ PrepareCallCFunction(1, 0, a2);
+  __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
   __ CallCFunction(
-      ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+      ExternalReference::get_make_code_young_function(masm->isolate()), 2);
   __ MultiPop(saved_regs);
   __ Jump(a0);
 }
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 0589bf0..e4f68f9 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -2795,8 +2795,9 @@
   if (do_gc) {
     // Move result passed in v0 into a0 to call PerformGC.
     __ mov(a0, v0);
-    __ PrepareCallCFunction(1, 0, a1);
-    __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
+    __ PrepareCallCFunction(2, 0, a1);
+    __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+    __ CallCFunction(ExternalReference::perform_gc_function(isolate), 2, 0);
   }
 
   ExternalReference scope_depth =
@@ -3408,8 +3409,7 @@
     receiver = a0;
   }
 
-  StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss,
-                                         support_wrapper_);
+  StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss);
 
   __ bind(&miss);
   StubCompiler::TailCallBuiltin(
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 5c847fc..a12faee 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -635,7 +635,8 @@
 }
 
 
-void Code::PatchPlatformCodeAge(byte* sequence,
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+                                byte* sequence,
                                 Code::Age age,
                                 MarkingParity parity) {
   uint32_t young_length;
@@ -644,7 +645,7 @@
     CopyBytes(sequence, young_sequence, young_length);
     CPU::FlushICache(sequence, young_length);
   } else {
-    Code* stub = GetCodeAgeStub(age, parity);
+    Code* stub = GetCodeAgeStub(isolate, age, parity);
     CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
     // Mark this code sequence for FindPlatformCodeAgeSequence()
     patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index b37c7e0..6659b6c 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -1789,33 +1789,43 @@
 
 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   Register string = ToRegister(instr->string());
-  Register index = ToRegister(instr->index());
+  LOperand* index_op = instr->index();
   Register value = ToRegister(instr->value());
   Register scratch = scratch0();
   String::Encoding encoding = instr->encoding();
 
   if (FLAG_debug_code) {
-    __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
-    __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
+    __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+    __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
 
-    __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
+    __ And(scratch, scratch,
+           Operand(kStringRepresentationMask | kStringEncodingMask));
     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
-    __ Subu(at, at, Operand(encoding == String::ONE_BYTE_ENCODING
+    __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
                                 ? one_byte_seq_type : two_byte_seq_type));
     __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
   }
 
-  __ Addu(scratch,
-          string,
-          Operand(SeqString::kHeaderSize - kHeapObjectTag));
-  if (encoding == String::ONE_BYTE_ENCODING) {
-    __ Addu(at, scratch, index);
-    __ sb(value, MemOperand(at));
+  if (index_op->IsConstantOperand()) {
+    int constant_index = ToInteger32(LConstantOperand::cast(index_op));
+    if (encoding == String::ONE_BYTE_ENCODING) {
+      __ sb(value,
+          FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
+    } else {
+      __ sh(value,
+          FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
+    }
   } else {
-    __ sll(at, index, 1);
-    __ Addu(at, scratch, at);
-    __ sh(value, MemOperand(at));
+    Register index = ToRegister(index_op);
+    if (encoding == String::ONE_BYTE_ENCODING) {
+      __ Addu(scratch, string, Operand(index));
+      __ sb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
+    } else {
+      __ sll(scratch, index, 1);
+      __ Addu(scratch, string, scratch);
+      __ sh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
+    }
   }
 }
 
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 4dc8022..38134f4 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1783,11 +1783,9 @@
 
 LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
   LOperand* string = UseRegister(instr->string());
-  LOperand* index = UseRegister(instr->index());
-  LOperand* value = UseTempRegister(instr->value());
-  LSeqStringSetChar* result =
-      new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
-  return DefineAsRegister(result);
+  LOperand* index = UseRegisterOrConstant(instr->index());
+  LOperand* value = UseRegister(instr->value());
+  return new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
 }
 
 
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 58452ca..97bd872 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -374,30 +374,26 @@
                                             Register receiver,
                                             Register scratch1,
                                             Register scratch2,
-                                            Label* miss,
-                                            bool support_wrappers) {
+                                            Label* miss) {
   Label check_wrapper;
 
   // Check if the object is a string leaving the instance type in the
   // scratch1 register.
-  GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
-                      support_wrappers ? &check_wrapper : miss);
+  GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
 
   // Load length directly from the string.
   __ Ret(USE_DELAY_SLOT);
   __ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
 
-  if (support_wrappers) {
-    // Check if the object is a JSValue wrapper.
-    __ bind(&check_wrapper);
-    __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
+  // Check if the object is a JSValue wrapper.
+  __ bind(&check_wrapper);
+  __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
 
-    // Unwrap the value and check if the wrapped value is a string.
-    __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
-    GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
-    __ Ret(USE_DELAY_SLOT);
-    __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
-  }
+  // Unwrap the value and check if the wrapped value is a string.
+  __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+  GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+  __ Ret(USE_DELAY_SLOT);
+  __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
 }
 
 
diff --git a/src/object-observe.js b/src/object-observe.js
index 1035792..b09c42d 100644
--- a/src/object-observe.js
+++ b/src/object-observe.js
@@ -284,11 +284,6 @@
       arg.length < 0)
     return false;
 
-  var length = arg.length;
-  for (var i = 0; i < length; i++) {
-    if (!IS_STRING(arg[i]))
-      return false;
-  }
   return true;
 }
 
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 3716df1..acb00da 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -330,10 +330,11 @@
     }
   }
 
-  // TODO(hpayer): deal gracefully with partially constructed JSObjects, when
-  // allocation folding is turned off.
-  if (reinterpret_cast<Map*>(elements()) !=
-      GetHeap()->one_pointer_filler_map()) {
+  // If a GC was caused while constructing this object, the elements
+  // pointer may point to a one pointer filler map.
+  if ((FLAG_use_gvn && FLAG_use_allocation_folding) ||
+      (reinterpret_cast<Map*>(elements()) !=
+      GetHeap()->one_pointer_filler_map())) {
     CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
               (elements() == GetHeap()->empty_fixed_array())),
              (elements()->map() == GetHeap()->fixed_array_map() ||
@@ -683,10 +684,11 @@
 void JSArray::JSArrayVerify() {
   JSObjectVerify();
   CHECK(length()->IsNumber() || length()->IsUndefined());
-  // TODO(hpayer): deal gracefully with partially constructed JSObjects, when
-  // allocation folding is turned off.
-  if (reinterpret_cast<Map*>(elements()) !=
-      GetHeap()->one_pointer_filler_map()) {
+  // If a GC was caused while constructing this array, the elements
+  // pointer may point to a one pointer filler map.
+  if ((FLAG_use_gvn && FLAG_use_allocation_folding) ||
+      (reinterpret_cast<Map*>(elements()) !=
+      GetHeap()->one_pointer_filler_map())) {
     CHECK(elements()->IsUndefined() ||
           elements()->IsFixedArray() ||
           elements()->IsFixedDoubleArray());
diff --git a/src/objects.cc b/src/objects.cc
index e37f6d1..bb78a8e 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -5635,71 +5635,78 @@
 }
 
 
-MUST_USE_RESULT MaybeObject* JSObject::DeepCopy(Isolate* isolate) {
-  StackLimitCheck check(isolate);
-  if (check.HasOverflowed()) return isolate->StackOverflow();
-
-  if (map()->is_deprecated()) {
-    MaybeObject* maybe_failure = MigrateInstance();
-    if (maybe_failure->IsFailure()) return maybe_failure;
-  }
-
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<Object> NewStorageFor(Isolate* isolate,
+                                    Handle<Object> object,
+                                    Representation representation) {
   Heap* heap = isolate->heap();
-  Object* result;
-  { MaybeObject* maybe_result = heap->CopyJSObject(this);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
+  CALL_HEAP_FUNCTION(isolate,
+                     object->AllocateNewStorageFor(heap, representation),
+                     Object);
+}
+
+
+Handle<JSObject> JSObject::Copy(Handle<JSObject> object) {
+  Isolate* isolate = object->GetIsolate();
+  CALL_HEAP_FUNCTION(isolate,
+                     isolate->heap()->CopyJSObject(*object), JSObject);
+}
+
+
+Handle<JSObject> JSObject::DeepCopy(Handle<JSObject> object) {
+  Isolate* isolate = object->GetIsolate();
+  StackLimitCheck check(isolate);
+  if (check.HasOverflowed()) {
+    isolate->StackOverflow();
+    return Handle<JSObject>::null();
   }
-  JSObject* copy = JSObject::cast(result);
+
+  if (object->map()->is_deprecated()) {
+    MigrateInstance(object);
+  }
+
+  Handle<JSObject> copy = Copy(object);
 
   // Deep copy local properties.
   if (copy->HasFastProperties()) {
-    DescriptorArray* descriptors = copy->map()->instance_descriptors();
+    Handle<DescriptorArray> descriptors(copy->map()->instance_descriptors());
     int limit = copy->map()->NumberOfOwnDescriptors();
     for (int i = 0; i < limit; i++) {
       PropertyDetails details = descriptors->GetDetails(i);
       if (details.type() != FIELD) continue;
       int index = descriptors->GetFieldIndex(i);
-      Object* value = RawFastPropertyAt(index);
+      Handle<Object> value(object->RawFastPropertyAt(index), isolate);
       if (value->IsJSObject()) {
-        JSObject* js_object = JSObject::cast(value);
-        MaybeObject* maybe_copy = js_object->DeepCopy(isolate);
-        if (!maybe_copy->To(&value)) return maybe_copy;
+        value = DeepCopy(Handle<JSObject>::cast(value));
+        RETURN_IF_EMPTY_HANDLE_VALUE(isolate, value, Handle<JSObject>());
       } else {
         Representation representation = details.representation();
-        MaybeObject* maybe_storage =
-            value->AllocateNewStorageFor(heap, representation);
-        if (!maybe_storage->To(&value)) return maybe_storage;
+        value = NewStorageFor(isolate, value, representation);
       }
-      copy->FastPropertyAtPut(index, value);
+      copy->FastPropertyAtPut(index, *value);
     }
   } else {
-    { MaybeObject* maybe_result =
-          heap->AllocateFixedArray(copy->NumberOfLocalProperties());
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-    FixedArray* names = FixedArray::cast(result);
-    copy->GetLocalPropertyNames(names, 0);
+    Handle<FixedArray> names =
+        isolate->factory()->NewFixedArray(copy->NumberOfLocalProperties());
+    copy->GetLocalPropertyNames(*names, 0);
     for (int i = 0; i < names->length(); i++) {
       ASSERT(names->get(i)->IsString());
-      String* key_string = String::cast(names->get(i));
+      Handle<String> key_string(String::cast(names->get(i)));
       PropertyAttributes attributes =
-          copy->GetLocalPropertyAttribute(key_string);
+          copy->GetLocalPropertyAttribute(*key_string);
       // Only deep copy fields from the object literal expression.
       // In particular, don't try to copy the length attribute of
       // an array.
       if (attributes != NONE) continue;
-      Object* value =
-          copy->GetProperty(key_string, &attributes)->ToObjectUnchecked();
+      Handle<Object> value(
+          copy->GetProperty(*key_string, &attributes)->ToObjectUnchecked(),
+          isolate);
       if (value->IsJSObject()) {
-        JSObject* js_object = JSObject::cast(value);
-        { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
-          if (!maybe_result->ToObject(&result)) return maybe_result;
-        }
-        { MaybeObject* maybe_result =
-              // Creating object copy for literals. No strict mode needed.
-              copy->SetProperty(key_string, result, NONE, kNonStrictMode);
-          if (!maybe_result->ToObject(&result)) return maybe_result;
-        }
+        Handle<Object> result = DeepCopy(Handle<JSObject>::cast(value));
+        RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+        // Creating object copy for literals. No strict mode needed.
+        CHECK_NOT_EMPTY_HANDLE(isolate, SetProperty(
+            copy, key_string, result, NONE, kNonStrictMode));
       }
     }
   }
@@ -5712,8 +5719,8 @@
     case FAST_ELEMENTS:
     case FAST_HOLEY_SMI_ELEMENTS:
     case FAST_HOLEY_ELEMENTS: {
-      FixedArray* elements = FixedArray::cast(copy->elements());
-      if (elements->map() == heap->fixed_cow_array_map()) {
+      Handle<FixedArray> elements(FixedArray::cast(copy->elements()));
+      if (elements->map() == isolate->heap()->fixed_cow_array_map()) {
         isolate->counters()->cow_arrays_created_runtime()->Increment();
 #ifdef DEBUG
         for (int i = 0; i < elements->length(); i++) {
@@ -5722,34 +5729,31 @@
 #endif
       } else {
         for (int i = 0; i < elements->length(); i++) {
-          Object* value = elements->get(i);
+          Handle<Object> value(elements->get(i), isolate);
           ASSERT(value->IsSmi() ||
                  value->IsTheHole() ||
                  (IsFastObjectElementsKind(copy->GetElementsKind())));
           if (value->IsJSObject()) {
-            JSObject* js_object = JSObject::cast(value);
-            { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
-              if (!maybe_result->ToObject(&result)) return maybe_result;
-            }
-            elements->set(i, result);
+            Handle<Object> result = DeepCopy(Handle<JSObject>::cast(value));
+            RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+            elements->set(i, *result);
           }
         }
       }
       break;
     }
     case DICTIONARY_ELEMENTS: {
-      SeededNumberDictionary* element_dictionary = copy->element_dictionary();
+      Handle<SeededNumberDictionary> element_dictionary(
+          copy->element_dictionary());
       int capacity = element_dictionary->Capacity();
       for (int i = 0; i < capacity; i++) {
         Object* k = element_dictionary->KeyAt(i);
         if (element_dictionary->IsKey(k)) {
-          Object* value = element_dictionary->ValueAt(i);
+          Handle<Object> value(element_dictionary->ValueAt(i), isolate);
           if (value->IsJSObject()) {
-            JSObject* js_object = JSObject::cast(value);
-            { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
-              if (!maybe_result->ToObject(&result)) return maybe_result;
-            }
-            element_dictionary->ValueAtPut(i, result);
+            Handle<Object> result = DeepCopy(Handle<JSObject>::cast(value));
+            RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+            element_dictionary->ValueAtPut(i, *result);
           }
         }
       }
@@ -10432,8 +10436,8 @@
 }
 
 
-void Code::MakeCodeAgeSequenceYoung(byte* sequence) {
-  PatchPlatformCodeAge(sequence, kNoAge, NO_MARKING_PARITY);
+void Code::MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate) {
+  PatchPlatformCodeAge(isolate, sequence, kNoAge, NO_MARKING_PARITY);
 }
 
 
@@ -10444,7 +10448,9 @@
     MarkingParity code_parity;
     GetCodeAgeAndParity(sequence, &age, &code_parity);
     if (age != kLastCodeAge && code_parity != current_parity) {
-      PatchPlatformCodeAge(sequence, static_cast<Age>(age + 1),
+      PatchPlatformCodeAge(GetIsolate(),
+                           sequence,
+                           static_cast<Age>(age + 1),
                            current_parity);
     }
   }
@@ -10507,8 +10513,7 @@
 }
 
 
-Code* Code::GetCodeAgeStub(Age age, MarkingParity parity) {
-  Isolate* isolate = Isolate::Current();
+Code* Code::GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity) {
   Builtins* builtins = isolate->builtins();
   switch (age) {
 #define HANDLE_CODE_AGE(AGE)                                            \
diff --git a/src/objects.h b/src/objects.h
index d3593b6..9ddf10f 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -2630,8 +2630,9 @@
   // Called the first time an object is observed with ES7 Object.observe.
   MUST_USE_RESULT MaybeObject* SetObserved(Isolate* isolate);
 
-  // Copy object
-  MUST_USE_RESULT MaybeObject* DeepCopy(Isolate* isolate);
+  // Copy object.
+  static Handle<JSObject> Copy(Handle<JSObject> object);
+  static Handle<JSObject> DeepCopy(Handle<JSObject> object);
 
   // Dispatched behavior.
   void JSObjectShortPrint(StringStream* accumulator);
@@ -5154,7 +5155,7 @@
   // being entered through the prologue.  Used to determine when it is
   // relatively safe to flush this code object and replace it with the lazy
   // compilation stub.
-  static void MakeCodeAgeSequenceYoung(byte* sequence);
+  static void MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate);
   void MakeOlder(MarkingParity);
   static bool IsYoungSequence(byte* sequence);
   bool IsOld();
@@ -5300,10 +5301,11 @@
                                   MarkingParity* parity);
   static void GetCodeAgeAndParity(byte* sequence, Age* age,
                                   MarkingParity* parity);
-  static Code* GetCodeAgeStub(Age age, MarkingParity parity);
+  static Code* GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity);
 
   // Code aging -- platform-specific
-  static void PatchPlatformCodeAge(byte* sequence, Age age,
+  static void PatchPlatformCodeAge(Isolate* isolate,
+                                   byte* sequence, Age age,
                                    MarkingParity parity);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 4d3b1e3..5903438 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -205,12 +205,6 @@
 }
 
 
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
-  // Not supported on Cygwin.
-  return 0;
-}
-
-
 // The VirtualMemory implementation is taken from platform-win32.cc.
 // The mmap-based virtual memory implementation as it is used on most posix
 // platforms does not work well because Cygwin does not support MAP_FIXED.
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index d818278..518ad31 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -199,10 +199,6 @@
 }
 
 
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
-  return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
-}
-
 
 // Constants used for mmap.
 static const int kMmapFd = -1;
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index b8b9602..74d473f 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -313,16 +313,6 @@
 }
 
 
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
-  // backtrace is a glibc extension.
-#if defined(__GLIBC__) && !defined(__UCLIBC__)
-  return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
-#else
-  return 0;
-#endif
-}
-
-
 // Constants used for mmap.
 static const int kMmapFd = -1;
 static const int kMmapFdOffset = 0;
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 67cc96f..a58bc1a 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -220,14 +220,6 @@
 }
 
 
-int OS::StackWalk(Vector<StackFrame> frames) {
-  // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
-  if (backtrace == NULL) return 0;
-
-  return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
-}
-
-
 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 30a484f..4f5420e 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -231,34 +231,6 @@
 }
 
 
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
-  // backtrace is a glibc extension.
-  int frames_size = frames.length();
-  ScopedVector<void*> addresses(frames_size);
-
-  int frames_count = backtrace(addresses.start(), frames_size);
-
-  char** symbols = backtrace_symbols(addresses.start(), frames_count);
-  if (symbols == NULL) {
-    return kStackWalkError;
-  }
-
-  for (int i = 0; i < frames_count; i++) {
-    frames[i].address = addresses[i];
-    // Format a text representation of the frame based on the information
-    // available.
-    SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
-             "%s",
-             symbols[i]);
-    // Make sure line termination is in place.
-    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
-  }
-
-  free(symbols);
-
-  return frames_count;
-}
-
 
 // Constants used for mmap.
 static const int kMmapFd = -1;
diff --git a/src/platform-posix.h b/src/platform-posix.h
index 6b73387..e0fbc0c 100644
--- a/src/platform-posix.h
+++ b/src/platform-posix.h
@@ -39,7 +39,6 @@
 namespace internal {
 
 // Used by platform implementation files during OS::DumpBacktrace()
-// and OS::StackWalk().
 template<int (*backtrace)(void**, int),
          char** (*backtrace_symbols)(void* const*, int)>
 struct POSIXBacktraceHelper {
@@ -73,32 +72,6 @@
     fflush(stderr);
     free(symbols);
   }
-
-  static int StackWalk(Vector<OS::StackFrame> frames) {
-    int frames_size = frames.length();
-    ScopedVector<void*> addresses(frames_size);
-
-    int frames_count = backtrace(addresses.start(), frames_size);
-
-    char** symbols = backtrace_symbols(addresses.start(), frames_count);
-    if (symbols == NULL) {
-      return OS::kStackWalkError;
-    }
-
-    for (int i = 0; i < frames_count; i++) {
-      frames[i].address = addresses[i];
-      // Format a text representation of the frame based on the information
-      // available.
-      OS::SNPrintF(MutableCStrVector(frames[i].text, OS::kStackWalkMaxTextLen),
-                   "%s", symbols[i]);
-      // Make sure line termination is in place.
-      frames[i].text[OS::kStackWalkMaxTextLen - 1] = '\0';
-    }
-
-    free(symbols);
-
-    return frames_count;
-  }
 };
 
 } }  // namespace v8::internal
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index f082af1..df81c3a 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -211,20 +211,6 @@
 }
 
 
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
-  ucontext_t ctx;
-  struct StackWalker walker = { frames, 0 };
-
-  if (getcontext(&ctx) < 0) return kStackWalkError;
-
-  if (!walkcontext(&ctx, StackWalkCallback, &walker)) {
-    return kStackWalkError;
-  }
-
-  return walker.index;
-}
-
-
 // Constants used for mmap.
 static const int kMmapFd = -1;
 static const int kMmapFdOffset = 0;
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index ea4f7ea..073b21a 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1208,133 +1208,9 @@
 }
 
 
-// Walk the stack using the facilities in dbghelp.dll and tlhelp32.dll
-
-// Switch off warning 4748 (/GS can not protect parameters and local variables
-// from local buffer overrun because optimizations are disabled in function) as
-// it is triggered by the use of inline assembler.
-#pragma warning(push)
-#pragma warning(disable : 4748)
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
-  BOOL ok;
-
-  // Load the required functions from DLL's.
-  if (!LoadDbgHelpAndTlHelp32()) return kStackWalkError;
-
-  // Get the process and thread handles.
-  HANDLE process_handle = GetCurrentProcess();
-  HANDLE thread_handle = GetCurrentThread();
-
-  // Read the symbols.
-  if (!LoadSymbols(Isolate::Current(), process_handle)) return kStackWalkError;
-
-  // Capture current context.
-  CONTEXT context;
-  RtlCaptureContext(&context);
-
-  // Initialize the stack walking
-  STACKFRAME64 stack_frame;
-  memset(&stack_frame, 0, sizeof(stack_frame));
-#ifdef  _WIN64
-  stack_frame.AddrPC.Offset = context.Rip;
-  stack_frame.AddrFrame.Offset = context.Rbp;
-  stack_frame.AddrStack.Offset = context.Rsp;
-#else
-  stack_frame.AddrPC.Offset = context.Eip;
-  stack_frame.AddrFrame.Offset = context.Ebp;
-  stack_frame.AddrStack.Offset = context.Esp;
-#endif
-  stack_frame.AddrPC.Mode = AddrModeFlat;
-  stack_frame.AddrFrame.Mode = AddrModeFlat;
-  stack_frame.AddrStack.Mode = AddrModeFlat;
-  int frames_count = 0;
-
-  // Collect stack frames.
-  int frames_size = frames.length();
-  while (frames_count < frames_size) {
-    ok = _StackWalk64(
-        IMAGE_FILE_MACHINE_I386,    // MachineType
-        process_handle,             // hProcess
-        thread_handle,              // hThread
-        &stack_frame,               // StackFrame
-        &context,                   // ContextRecord
-        NULL,                       // ReadMemoryRoutine
-        _SymFunctionTableAccess64,  // FunctionTableAccessRoutine
-        _SymGetModuleBase64,        // GetModuleBaseRoutine
-        NULL);                      // TranslateAddress
-    if (!ok) break;
-
-    // Store the address.
-    ASSERT((stack_frame.AddrPC.Offset >> 32) == 0);  // 32-bit address.
-    frames[frames_count].address =
-        reinterpret_cast<void*>(stack_frame.AddrPC.Offset);
-
-    // Try to locate a symbol for this frame.
-    DWORD64 symbol_displacement;
-    SmartArrayPointer<IMAGEHLP_SYMBOL64> symbol(
-        NewArray<IMAGEHLP_SYMBOL64>(kStackWalkMaxNameLen));
-    if (symbol.is_empty()) return kStackWalkError;  // Out of memory.
-    memset(*symbol, 0, sizeof(IMAGEHLP_SYMBOL64) + kStackWalkMaxNameLen);
-    (*symbol)->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
-    (*symbol)->MaxNameLength = kStackWalkMaxNameLen;
-    ok = _SymGetSymFromAddr64(process_handle,             // hProcess
-                              stack_frame.AddrPC.Offset,  // Address
-                              &symbol_displacement,       // Displacement
-                              *symbol);                   // Symbol
-    if (ok) {
-      // Try to locate more source information for the symbol.
-      IMAGEHLP_LINE64 Line;
-      memset(&Line, 0, sizeof(Line));
-      Line.SizeOfStruct = sizeof(Line);
-      DWORD line_displacement;
-      ok = _SymGetLineFromAddr64(
-          process_handle,             // hProcess
-          stack_frame.AddrPC.Offset,  // dwAddr
-          &line_displacement,         // pdwDisplacement
-          &Line);                     // Line
-      // Format a text representation of the frame based on the information
-      // available.
-      if (ok) {
-        SNPrintF(MutableCStrVector(frames[frames_count].text,
-                                   kStackWalkMaxTextLen),
-                 "%s %s:%d:%d",
-                 (*symbol)->Name, Line.FileName, Line.LineNumber,
-                 line_displacement);
-      } else {
-        SNPrintF(MutableCStrVector(frames[frames_count].text,
-                                   kStackWalkMaxTextLen),
-                 "%s",
-                 (*symbol)->Name);
-      }
-      // Make sure line termination is in place.
-      frames[frames_count].text[kStackWalkMaxTextLen - 1] = '\0';
-    } else {
-      // No text representation of this frame
-      frames[frames_count].text[0] = '\0';
-
-      // Continue if we are just missing a module (for non C/C++ frames a
-      // module will never be found).
-      int err = GetLastError();
-      if (err != ERROR_MOD_NOT_FOUND) {
-        break;
-      }
-    }
-
-    frames_count++;
-  }
-
-  // Return the number of frames filled in.
-  return frames_count;
-}
-
-
-// Restore warnings to previous settings.
-#pragma warning(pop)
-
 #else  // __MINGW32__
 void OS::LogSharedLibraryAddresses(Isolate* isolate) { }
 void OS::SignalCodeMovingGC() { }
-int OS::StackWalk(Vector<OS::StackFrame> frames) { return 0; }
 #endif  // __MINGW32__
 
 
diff --git a/src/platform.h b/src/platform.h
index ee8fb92..aa50cb4 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -264,8 +264,6 @@
     char text[kStackWalkMaxTextLen];
   };
 
-  static int StackWalk(Vector<StackFrame> frames);
-
   class MemoryMappedFile {
    public:
     static MemoryMappedFile* open(const char* name);
diff --git a/src/runtime.cc b/src/runtime.cc
index c09fb1d..a698445 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -499,7 +499,10 @@
     // Update the functions literal and return the boilerplate.
     literals->set(literals_index, *boilerplate);
   }
-  return JSObject::cast(*boilerplate)->DeepCopy(isolate);
+
+  Handle<Object> copy = JSObject::DeepCopy(Handle<JSObject>::cast(boilerplate));
+  RETURN_IF_EMPTY_HANDLE(isolate, copy);
+  return *copy;
 }
 
 
@@ -564,8 +567,10 @@
       literals_index, elements);
   RETURN_IF_EMPTY_HANDLE(isolate, site);
 
-  JSObject* boilerplate = JSObject::cast(site->transition_info());
-  return boilerplate->DeepCopy(isolate);
+  Handle<JSObject> boilerplate(JSObject::cast(site->transition_info()));
+  Handle<JSObject> copy = JSObject::DeepCopy(boilerplate);
+  RETURN_IF_EMPTY_HANDLE(isolate, copy);
+  return *copy;
 }
 
 
@@ -14786,8 +14791,7 @@
 }
 
 
-void Runtime::PerformGC(Object* result) {
-  Isolate* isolate = Isolate::Current();
+void Runtime::PerformGC(Object* result, Isolate* isolate) {
   Failure* failure = Failure::cast(result);
   if (failure->IsRetryAfterGC()) {
     if (isolate->heap()->new_space()->AddFreshPage()) {
diff --git a/src/runtime.h b/src/runtime.h
index 60c6677..959d13f 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -838,7 +838,7 @@
       JSArrayBuffer* phantom_array_buffer);
 
   // Helper functions used stubs.
-  static void PerformGC(Object* result);
+  static void PerformGC(Object* result, Isolate* isolate);
 
   // Used in runtime.cc and hydrogen's VisitArrayLiteral.
   static Handle<Object> CreateArrayLiteralBoilerplate(
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 63cb42b..16028d8 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -572,8 +572,7 @@
                                        Register receiver,
                                        Register scratch1,
                                        Register scratch2,
-                                       Label* miss_label,
-                                       bool support_wrappers);
+                                       Label* miss_label);
 
   static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
                                             Register receiver,
diff --git a/src/unique.h b/src/unique.h
index 7ae704a..38cc336 100644
--- a/src/unique.h
+++ b/src/unique.h
@@ -64,6 +64,10 @@
     handle_ = handle;
   }
 
+  // TODO(titzer): this is a hack to migrate to Unique<T> incrementally.
+  Unique(Address raw_address, Handle<T> handle)
+    : raw_address_(raw_address), handle_(handle) { }
+
   // Constructor for handling automatic up casting.
   // Ex. Unique<JSFunction> can be passed when Unique<Object> is expected.
   template <class S> Unique(Unique<S> uniq) {
@@ -138,7 +142,7 @@
   }
 
   // Compare this set against another set. O(|this|).
-  bool Equals(UniqueSet<T>* that) {
+  bool Equals(UniqueSet<T>* that) const {
     if (that->size_ != this->size_) return false;
     for (int i = 0; i < this->size_; i++) {
       if (this->array_[i] != that->array_[i]) return false;
@@ -146,8 +150,17 @@
     return true;
   }
 
+  template <typename U>
+  bool Contains(Unique<U> elem) const {
+    // TODO(titzer): use binary search for larger sets.
+    for (int i = 0; i < size_; i++) {
+      if (this->array_[i] == elem) return true;
+    }
+    return false;
+  }
+
   // Check if this set is a subset of the given set. O(|this| + |that|).
-  bool IsSubset(UniqueSet<T>* that) {
+  bool IsSubset(UniqueSet<T>* that) const {
     if (that->size_ < this->size_) return false;
     int j = 0;
     for (int i = 0; i < this->size_; i++) {
@@ -163,7 +176,7 @@
 
   // Returns a new set representing the intersection of this set and the other.
   // O(|this| + |that|).
-  UniqueSet<T>* Intersect(UniqueSet<T>* that, Zone* zone) {
+  UniqueSet<T>* Intersect(UniqueSet<T>* that, Zone* zone) const {
     if (that->size_ == 0 || this->size_ == 0) return new(zone) UniqueSet<T>();
 
     UniqueSet<T>* out = new(zone) UniqueSet<T>();
@@ -190,7 +203,7 @@
 
   // Returns a new set representing the union of this set and the other.
   // O(|this| + |that|).
-  UniqueSet<T>* Union(UniqueSet<T>* that, Zone* zone) {
+  UniqueSet<T>* Union(UniqueSet<T>* that, Zone* zone) const {
     if (that->size_ == 0) return this->Copy(zone);
     if (this->size_ == 0) return that->Copy(zone);
 
@@ -222,7 +235,7 @@
   }
 
   // Makes an exact copy of this set. O(|this| + |that|).
-  UniqueSet<T>* Copy(Zone* zone) {
+  UniqueSet<T>* Copy(Zone* zone) const {
     UniqueSet<T>* copy = new(zone) UniqueSet<T>();
     copy->size_ = this->size_;
     copy->capacity_ = this->size_;
@@ -231,10 +244,15 @@
     return copy;
   }
 
-  inline int size() {
+  inline int size() const {
     return size_;
   }
 
+  inline Unique<T> at(int index) const {
+    ASSERT(index >= 0 && index < size_);
+    return array_[index];
+  }
+
  private:
   // These sets should be small, since operations are implemented with simple
   // linear algorithms. Enforce a maximum size.
diff --git a/src/version.cc b/src/version.cc
index da82885..5d04816 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     21
-#define BUILD_NUMBER      15
+#define BUILD_NUMBER      16
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 81721c2..20895f9 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -600,6 +600,8 @@
   // the stub returns.
   __ subq(Operand(rsp, 0), Immediate(5));
   __ Pushad();
+  __ movq(arg_reg_2,
+          ExternalReference::isolate_address(masm->isolate()));
   __ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
   {  // NOLINT
     FrameScope scope(masm, StackFrame::MANUAL);
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 51e1a53..d1130ad 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1009,7 +1009,7 @@
   __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
   // Convert, convert back, and compare the two doubles' bits.
   __ cvttsd2siq(scratch2, xmm0);
-  __ cvtlsi2sd(xmm1, scratch2);
+  __ Cvtlsi2sd(xmm1, scratch2);
   __ movq(scratch1, xmm0);
   __ movq(scratch2, xmm1);
   __ cmpq(scratch1, scratch2);
@@ -1145,7 +1145,7 @@
     // Then load the bits of the double into rbx.
     __ SmiToInteger32(rax, rax);
     __ subq(rsp, Immediate(kDoubleSize));
-    __ cvtlsi2sd(xmm1, rax);
+    __ Cvtlsi2sd(xmm1, rax);
     __ movsd(Operand(rsp, 0), xmm1);
     __ movq(rbx, xmm1);
     __ movq(rdx, xmm1);
@@ -1477,9 +1477,9 @@
 
 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
   __ SmiToInteger32(kScratchRegister, rdx);
-  __ cvtlsi2sd(xmm0, kScratchRegister);
+  __ Cvtlsi2sd(xmm0, kScratchRegister);
   __ SmiToInteger32(kScratchRegister, rax);
-  __ cvtlsi2sd(xmm1, kScratchRegister);
+  __ Cvtlsi2sd(xmm1, kScratchRegister);
 }
 
 
@@ -1503,12 +1503,12 @@
 
   __ bind(&load_smi_rdx);
   __ SmiToInteger32(kScratchRegister, rdx);
-  __ cvtlsi2sd(xmm0, kScratchRegister);
+  __ Cvtlsi2sd(xmm0, kScratchRegister);
   __ JumpIfNotSmi(rax, &load_nonsmi_rax);
 
   __ bind(&load_smi_rax);
   __ SmiToInteger32(kScratchRegister, rax);
-  __ cvtlsi2sd(xmm1, kScratchRegister);
+  __ Cvtlsi2sd(xmm1, kScratchRegister);
   __ bind(&done);
 }
 
@@ -1541,7 +1541,7 @@
   __ cvttsd2siq(smi_result, xmm0);
   // Check if conversion was successful by converting back and
   // comparing to the original double's bits.
-  __ cvtlsi2sd(xmm1, smi_result);
+  __ Cvtlsi2sd(xmm1, smi_result);
   __ movq(kScratchRegister, xmm1);
   __ cmpq(scratch2, kScratchRegister);
   __ j(not_equal, on_not_smis);
@@ -1560,7 +1560,7 @@
   __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
   __ movq(scratch2, xmm0);
   __ cvttsd2siq(smi_result, xmm0);
-  __ cvtlsi2sd(xmm1, smi_result);
+  __ Cvtlsi2sd(xmm1, smi_result);
   __ movq(kScratchRegister, xmm1);
   __ cmpq(scratch2, kScratchRegister);
   __ j(not_equal, on_not_smis);
@@ -1603,7 +1603,7 @@
 
   // Save 1 in double_result - we need this several times later on.
   __ movq(scratch, Immediate(1));
-  __ cvtlsi2sd(double_result, scratch);
+  __ Cvtlsi2sd(double_result, scratch);
 
   if (exponent_type_ == ON_STACK) {
     Label base_is_smi, unpack_exponent;
@@ -1623,7 +1623,7 @@
 
     __ bind(&base_is_smi);
     __ SmiToInteger32(base, base);
-    __ cvtlsi2sd(double_base, base);
+    __ Cvtlsi2sd(double_base, base);
     __ bind(&unpack_exponent);
 
     __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -1812,7 +1812,7 @@
   // and may not have contained the exponent value in the first place when the
   // input was a smi.  We reset it with exponent value before bailing out.
   __ j(not_equal, &done);
-  __ cvtlsi2sd(double_exponent, exponent);
+  __ Cvtlsi2sd(double_exponent, exponent);
 
   // Returning or bailing out.
   Counters* counters = masm->isolate()->counters();
@@ -1902,8 +1902,7 @@
     receiver = rax;
   }
 
-  StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss,
-                                         support_wrapper_);
+  StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss);
   __ bind(&miss);
   StubCompiler::TailCallBuiltin(
       masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
@@ -3619,6 +3618,7 @@
     // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
     // stack is known to be aligned. This function takes one argument which is
     // passed in register.
+    __ movq(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
     __ movq(arg_reg_1, rax);
     __ movq(kScratchRegister,
             ExternalReference::perform_gc_function(masm->isolate()));
@@ -5376,7 +5376,7 @@
   __ jmp(&left, Label::kNear);
   __ bind(&right_smi);
   __ SmiToInteger32(rcx, rax);  // Can't clobber rax yet.
-  __ cvtlsi2sd(xmm1, rcx);
+  __ Cvtlsi2sd(xmm1, rcx);
 
   __ bind(&left);
   __ JumpIfSmi(rdx, &left_smi, Label::kNear);
@@ -5386,7 +5386,7 @@
   __ jmp(&done);
   __ bind(&left_smi);
   __ SmiToInteger32(rcx, rdx);  // Can't clobber rdx yet.
-  __ cvtlsi2sd(xmm0, rcx);
+  __ Cvtlsi2sd(xmm0, rcx);
 
   __ bind(&done);
   // Compare operands
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 24773c2..b3f4eaf 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -386,7 +386,7 @@
   // rbx: current element (smi-tagged)
   __ JumpIfNotSmi(rbx, &convert_hole);
   __ SmiToInteger32(rbx, rbx);
-  __ cvtlsi2sd(xmm0, rbx);
+  __ Cvtlsi2sd(xmm0, rbx);
   __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
            xmm0);
   __ jmp(&entry);
@@ -723,7 +723,8 @@
 }
 
 
-void Code::PatchPlatformCodeAge(byte* sequence,
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+                                byte* sequence,
                                 Code::Age age,
                                 MarkingParity parity) {
   uint32_t young_length;
@@ -732,7 +733,7 @@
     CopyBytes(sequence, young_sequence, young_length);
     CPU::FlushICache(sequence, young_length);
   } else {
-    Code* stub = GetCodeAgeStub(age, parity);
+    Code* stub = GetCodeAgeStub(isolate, age, parity);
     CodePatcher patcher(sequence, young_length);
     patcher.masm()->call(stub->instruction_start());
     for (int i = 0;
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 9dca6b3..6f4d3e3 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -3520,7 +3520,7 @@
     __ bind(&negative_sign);
     // Truncate, then compare and compensate.
     __ cvttsd2si(output_reg, input_reg);
-    __ cvtlsi2sd(xmm_scratch, output_reg);
+    __ Cvtlsi2sd(xmm_scratch, output_reg);
     __ ucomisd(input_reg, xmm_scratch);
     __ j(equal, &done, Label::kNear);
     __ subl(output_reg, Immediate(1));
@@ -3569,7 +3569,7 @@
   __ RecordComment("D2I conversion overflow");
   DeoptimizeIf(equal, instr->environment());
 
-  __ cvtlsi2sd(xmm_scratch, output_reg);
+  __ Cvtlsi2sd(xmm_scratch, output_reg);
   __ ucomisd(input_reg, xmm_scratch);
   __ j(equal, &restore, Label::kNear);
   __ subl(output_reg, Immediate(1));
@@ -4449,9 +4449,9 @@
   LOperand* output = instr->result();
   ASSERT(output->IsDoubleRegister());
   if (input->IsRegister()) {
-    __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
+    __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
   } else {
-    __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
+    __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
   }
 }
 
@@ -4623,7 +4623,7 @@
                                 bool deoptimize_on_minus_zero,
                                 LEnvironment* env,
                                 NumberUntagDMode mode) {
-  Label load_smi, done;
+  Label convert, load_smi, done;
 
   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
     // Smi check.
@@ -4632,25 +4632,17 @@
     // Heap number map check.
     __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
                    Heap::kHeapNumberMapRootIndex);
-    if (!can_convert_undefined_to_nan) {
-      DeoptimizeIf(not_equal, env);
-    } else {
-      Label heap_number, convert;
-      __ j(equal, &heap_number, Label::kNear);
 
-      // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
-      __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
-      DeoptimizeIf(not_equal, env);
-
-      __ bind(&convert);
-      __ xorps(result_reg, result_reg);
-      __ divsd(result_reg, result_reg);
-      __ jmp(&done, Label::kNear);
-
-      __ bind(&heap_number);
-    }
-    // Heap number to XMM conversion.
+    // On x64 it is safe to load at heap number offset before evaluating the map
+    // check, since all heap objects are at least two words long.
     __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+
+    if (can_convert_undefined_to_nan) {
+      __ j(not_equal, &convert);
+    } else {
+      DeoptimizeIf(not_equal, env);
+    }
+
     if (deoptimize_on_minus_zero) {
       XMMRegister xmm_scratch = xmm0;
       __ xorps(xmm_scratch, xmm_scratch);
@@ -4661,6 +4653,18 @@
       DeoptimizeIf(not_zero, env);
     }
     __ jmp(&done, Label::kNear);
+
+    if (can_convert_undefined_to_nan) {
+      __ bind(&convert);
+
+      // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
+      __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+      DeoptimizeIf(not_equal, env);
+
+      __ xorps(result_reg, result_reg);
+      __ divsd(result_reg, result_reg);
+      __ jmp(&done, Label::kNear);
+    }
   } else {
     ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
   }
@@ -4668,7 +4672,7 @@
   // Smi to XMM conversion
   __ bind(&load_smi);
   __ SmiToInteger32(kScratchRegister, input_reg);
-  __ cvtlsi2sd(result_reg, kScratchRegister);
+  __ Cvtlsi2sd(result_reg, kScratchRegister);
   __ bind(&done);
 }
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 69abc54..fa8cf18 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -936,6 +936,18 @@
 }
 
 
+void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
+  xorps(dst, dst);
+  cvtlsi2sd(dst, src);
+}
+
+
+void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
+  xorps(dst, dst);
+  cvtlsi2sd(dst, src);
+}
+
+
 void MacroAssembler::Set(Register dst, int64_t x) {
   if (x == 0) {
     xorl(dst, dst);
@@ -2917,7 +2929,7 @@
   // Value is a smi. convert to a double and store.
   // Preserve original value.
   SmiToInteger32(kScratchRegister, maybe_number);
-  cvtlsi2sd(xmm_scratch, kScratchRegister);
+  Cvtlsi2sd(xmm_scratch, kScratchRegister);
   movsd(FieldOperand(elements, index, times_8,
                      FixedDoubleArray::kHeaderSize - elements_offset),
         xmm_scratch);
@@ -3050,7 +3062,7 @@
                                Label* conversion_failed,
                                Label::Distance dst) {
   cvttsd2si(result_reg, input_reg);
-  cvtlsi2sd(xmm0, result_reg);
+  Cvtlsi2sd(xmm0, result_reg);
   ucomisd(xmm0, input_reg);
   j(not_equal, conversion_failed, dst);
   j(parity_even, conversion_failed, dst);  // NaN.
@@ -3087,7 +3099,7 @@
 
   movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
   cvttsd2si(result_reg, xmm0);
-  cvtlsi2sd(temp, result_reg);
+  Cvtlsi2sd(temp, result_reg);
   ucomisd(xmm0, temp);
   RecordComment("Deferred TaggedToI: lost precision");
   j(not_equal, lost_precision, dst);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 09c8a80..a1b04d5 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -784,6 +784,12 @@
   void Set(Register dst, int64_t x);
   void Set(const Operand& dst, int64_t x);
 
+  // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
+  // hinders register renaming and makes dependence chains longer. So we use
+  // xorps to clear the dst register before cvtsi2sd to solve this issue.
+  void Cvtlsi2sd(XMMRegister dst, Register src);
+  void Cvtlsi2sd(XMMRegister dst, const Operand& src);
+
   // Move if the registers are not identical.
   void Move(Register target, Register source);
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 95276d5..6cd2487 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -304,32 +304,28 @@
                                             Register receiver,
                                             Register scratch1,
                                             Register scratch2,
-                                            Label* miss,
-                                            bool support_wrappers) {
+                                            Label* miss) {
   Label check_wrapper;
 
   // Check if the object is a string leaving the instance type in the
   // scratch register.
-  GenerateStringCheck(masm, receiver, scratch1, miss,
-                      support_wrappers ? &check_wrapper : miss);
+  GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
 
   // Load length directly from the string.
   __ movq(rax, FieldOperand(receiver, String::kLengthOffset));
   __ ret(0);
 
-  if (support_wrappers) {
-    // Check if the object is a JSValue wrapper.
-    __ bind(&check_wrapper);
-    __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
-    __ j(not_equal, miss);
+  // Check if the object is a JSValue wrapper.
+  __ bind(&check_wrapper);
+  __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
+  __ j(not_equal, miss);
 
-    // Check if the wrapped value is a string and load the length
-    // directly if it is.
-    __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
-    GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
-    __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
-    __ ret(0);
-  }
+  // Check if the wrapped value is a string and load the length
+  // directly if it is.
+  __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+  GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+  __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
+  __ ret(0);
 }
 
 
@@ -842,7 +838,7 @@
 
     __ JumpIfNotSmi(value_reg, &heap_number);
     __ SmiToInteger32(scratch1, value_reg);
-    __ cvtlsi2sd(xmm0, scratch1);
+    __ Cvtlsi2sd(xmm0, scratch1);
     __ jmp(&do_store);
 
     __ bind(&heap_number);
@@ -996,7 +992,7 @@
     Label do_store, heap_number;
     __ JumpIfNotSmi(value_reg, &heap_number);
     __ SmiToInteger32(scratch2, value_reg);
-    __ cvtlsi2sd(xmm0, scratch2);
+    __ Cvtlsi2sd(xmm0, scratch2);
     __ jmp(&do_store);
 
     __ bind(&heap_number);
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 9d74011..9e4e907 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -814,7 +814,7 @@
   obj->SetElement(1, *second, NONE, kNonStrictMode)->ToObjectChecked();
 
   // Make the clone.
-  Handle<JSObject> clone = Copy(obj);
+  Handle<JSObject> clone = JSObject::Copy(obj);
   CHECK(!clone.is_identical_to(obj));
 
   CHECK_EQ(obj->GetElement(isolate, 0), clone->GetElement(isolate, 0));
diff --git a/test/cctest/test-unique.cc b/test/cctest/test-unique.cc
index 1d26858..d482a33 100644
--- a/test/cctest/test-unique.cc
+++ b/test/cctest/test-unique.cc
@@ -146,6 +146,74 @@
 }
 
 
+TEST(UniqueSet_Contains) {
+  CcTest::InitializeVM();
+  Isolate* isolate = Isolate::Current();
+  Factory* factory = isolate->factory();
+  HandleScope sc(isolate);
+
+  Unique<String> A(factory->InternalizeUtf8String("A"));
+  Unique<String> B(factory->InternalizeUtf8String("B"));
+  Unique<String> C(factory->InternalizeUtf8String("C"));
+
+  Zone zone(isolate);
+
+  UniqueSet<String>* set = new(&zone) UniqueSet<String>();
+
+  CHECK_EQ(0, set->size());
+  set->Add(A, &zone);
+  CHECK(set->Contains(A));
+  CHECK(!set->Contains(B));
+  CHECK(!set->Contains(C));
+
+  set->Add(A, &zone);
+  CHECK(set->Contains(A));
+  CHECK(!set->Contains(B));
+  CHECK(!set->Contains(C));
+
+  set->Add(B, &zone);
+  CHECK(set->Contains(A));
+  CHECK(set->Contains(B));
+
+  set->Add(C, &zone);
+  CHECK(set->Contains(A));
+  CHECK(set->Contains(B));
+  CHECK(set->Contains(C));
+}
+
+
+TEST(UniqueSet_At) {
+  CcTest::InitializeVM();
+  Isolate* isolate = Isolate::Current();
+  Factory* factory = isolate->factory();
+  HandleScope sc(isolate);
+
+  Unique<String> A(factory->InternalizeUtf8String("A"));
+  Unique<String> B(factory->InternalizeUtf8String("B"));
+  Unique<String> C(factory->InternalizeUtf8String("C"));
+
+  Zone zone(isolate);
+
+  UniqueSet<String>* set = new(&zone) UniqueSet<String>();
+
+  CHECK_EQ(0, set->size());
+  set->Add(A, &zone);
+  CHECK(A == set->at(0));
+
+  set->Add(A, &zone);
+  CHECK(A == set->at(0));
+
+  set->Add(B, &zone);
+  CHECK(A == set->at(0) || B == set->at(0));
+  CHECK(A == set->at(1) || B == set->at(1));
+
+  set->Add(C, &zone);
+  CHECK(A == set->at(0) || B == set->at(0) || C == set->at(0));
+  CHECK(A == set->at(1) || B == set->at(1) || C == set->at(1));
+  CHECK(A == set->at(2) || B == set->at(2) || C == set->at(2));
+}
+
+
 template <class T>
 static void CHECK_SETS(
     UniqueSet<T>* set1, UniqueSet<T>* set2, bool expected) {
diff --git a/test/mjsunit/harmony/object-observe.js b/test/mjsunit/harmony/object-observe.js
index 75f0ff8..f982a66 100644
--- a/test/mjsunit/harmony/object-observe.js
+++ b/test/mjsunit/harmony/object-observe.js
@@ -110,14 +110,16 @@
 
 
 // Object.observe
-assertThrows(function() { Object.observe("non-object", observer.callback); }, TypeError);
+assertThrows(function() { Object.observe("non-object", observer.callback); },
+             TypeError);
 assertThrows(function() { Object.observe(obj, nonFunction); }, TypeError);
 assertThrows(function() { Object.observe(obj, frozenFunction); }, TypeError);
-assertThrows(function() { Object.observe(obj, function() {}, 1); }, TypeError);
-assertThrows(function() { Object.observe(obj, function() {}, [undefined]); }, TypeError);
-assertThrows(function() { Object.observe(obj, function() {}, [1]); }, TypeError);
-assertThrows(function() { Object.observe(obj, function() {}, ['foo', null]); }, TypeError);
-assertEquals(obj, Object.observe(obj, observer.callback, ['foo', 'bar', 'baz']));
+assertEquals(obj, Object.observe(obj, observer.callback, [1]));
+assertEquals(obj, Object.observe(obj, observer.callback, [true]));
+assertEquals(obj, Object.observe(obj, observer.callback, ['foo', null]));
+assertEquals(obj, Object.observe(obj, observer.callback, [undefined]));
+assertEquals(obj, Object.observe(obj, observer.callback,
+             ['foo', 'bar', 'baz']));
 assertEquals(obj, Object.observe(obj, observer.callback, []));
 assertEquals(obj, Object.observe(obj, observer.callback, undefined));
 assertEquals(obj, Object.observe(obj, observer.callback));
@@ -202,6 +204,25 @@
   { object: obj, name: 'bar', type: 'deleted', expando2: 'str' }
 ]);
 
+// Non-string accept values are coerced to strings
+reset();
+Object.observe(obj, observer.callback, [true, 1, null, undefined]);
+notifier = Object.getNotifier(obj);
+notifier.notify({ type: 'true' });
+notifier.notify({ type: 'false' });
+notifier.notify({ type: '1' });
+notifier.notify({ type: '-1' });
+notifier.notify({ type: 'null' });
+notifier.notify({ type: 'nill' });
+notifier.notify({ type: 'undefined' });
+notifier.notify({ type: 'defined' });
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+  { object: obj, type: 'true' },
+  { object: obj, type: '1' },
+  { object: obj, type: 'null' },
+  { object: obj, type: 'undefined' }
+]);
 
 // No delivery takes place if no records are pending
 reset();
@@ -307,7 +328,7 @@
 
 // Accept
 reset();
-Object.observe(obj, observer.callback, []);
+Object.observe(obj, observer.callback, ['somethingElse']);
 Object.getNotifier(obj).notify({
   type: 'new'
 });
@@ -1233,6 +1254,75 @@
   { object: array, name: '0', type: 'updated', oldValue: 2 },
 ]);
 
+// Splice emitted after Array mutation methods
+function MockArray(initial, observer) {
+  for (var i = 0; i < initial.length; i++)
+    this[i] = initial[i];
+
+  this.length_ = initial.length;
+  this.observer = observer;
+}
+MockArray.prototype = {
+  set length(length) {
+    Object.getNotifier(this).notify({ type: 'lengthChange' });
+    this.length_ = length;
+    Object.observe(this, this.observer.callback, ['splice']);
+  },
+  get length() {
+    return this.length_;
+  }
+}
+
+reset();
+var array = new MockArray([], observer);
+Object.observe(array, observer.callback, ['lengthChange']);
+Array.prototype.push.call(array, 1);
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+  { object: array, type: 'lengthChange' },
+  { object: array, type: 'splice', index: 0, removed: [], addedCount: 1 },
+]);
+
+reset();
+var array = new MockArray([1], observer);
+Object.observe(array, observer.callback, ['lengthChange']);
+Array.prototype.pop.call(array);
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+  { object: array, type: 'lengthChange' },
+  { object: array, type: 'splice', index: 0, removed: [1], addedCount: 0 },
+]);
+
+reset();
+var array = new MockArray([1], observer);
+Object.observe(array, observer.callback, ['lengthChange']);
+Array.prototype.shift.call(array);
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+  { object: array, type: 'lengthChange' },
+  { object: array, type: 'splice', index: 0, removed: [1], addedCount: 0 },
+]);
+
+reset();
+var array = new MockArray([], observer);
+Object.observe(array, observer.callback, ['lengthChange']);
+Array.prototype.unshift.call(array, 1);
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+  { object: array, type: 'lengthChange' },
+  { object: array, type: 'splice', index: 0, removed: [], addedCount: 1 },
+]);
+
+reset();
+var array = new MockArray([0, 1, 2], observer);
+Object.observe(array, observer.callback, ['lengthChange']);
+Array.prototype.splice.call(array, 1, 1);
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+  { object: array, type: 'lengthChange' },
+  { object: array, type: 'splice', index: 1, removed: [1], addedCount: 0 },
+]);
+
 //
 // === PLAIN OBJECTS ===
 //
diff --git a/test/mjsunit/lithium/SeqStringSetChar.js b/test/mjsunit/lithium/SeqStringSetChar.js
new file mode 100644
index 0000000..3c890a8
--- /dev/null
+++ b/test/mjsunit/lithium/SeqStringSetChar.js
@@ -0,0 +1,46 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function MyStringFromCharCode(code, i) {
+  var one_byte = %NewString(3, true);
+  %_OneByteSeqStringSetChar(one_byte, 0, code);
+  %_OneByteSeqStringSetChar(one_byte, 1, code);
+  %_OneByteSeqStringSetChar(one_byte, i, code);
+  var two_byte = %NewString(3, false);
+  %_TwoByteSeqStringSetChar(two_byte, 0, code);
+  %_TwoByteSeqStringSetChar(two_byte, 1, code);
+  %_TwoByteSeqStringSetChar(two_byte, i, code);
+  return one_byte + two_byte;
+}
+
+MyStringFromCharCode(65, 2);
+var r1 = MyStringFromCharCode(65, 2);
+%OptimizeFunctionOnNextCall(MyStringFromCharCode);
+var r2 = MyStringFromCharCode(65, 2);
+assertEquals(r1, r2);