Rollback trunk to 3.21.15.

BUG=
R=mstarzinger@chromium.org

Review URL: https://codereview.chromium.org/23890030

git-svn-id: http://v8.googlecode.com/svn/trunk@16747 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/api.cc b/src/api.cc
index 5b41f92..71a8f4a 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3813,7 +3813,7 @@
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
   EXCEPTION_PREAMBLE(isolate);
-  i::Handle<i::JSObject> result = i::JSObject::Copy(self);
+  i::Handle<i::JSObject> result = i::Copy(self);
   has_pending_exception = result.is_null();
   EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
   return Utils::ToLocal(result);
@@ -6212,7 +6212,7 @@
   i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
   EXCEPTION_PREAMBLE(isolate);
   ENTER_V8(isolate);
-  i::Handle<i::JSObject> result = i::JSObject::Copy(paragon_handle);
+  i::Handle<i::JSObject> result = i::Copy(paragon_handle);
   has_pending_exception = result.is_null();
   EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
   return Utils::ToLocal(result);
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index b7a343f..f60e1f8 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -807,13 +807,12 @@
   // The following registers must be saved and restored when calling through to
   // the runtime:
   //   r0 - contains return address (beginning of patch sequence)
-  //   r1 - isolate
+  //   r1 - function object
   FrameScope scope(masm, StackFrame::MANUAL);
   __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
-  __ PrepareCallCFunction(1, 0, r2);
-  __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ PrepareCallCFunction(1, 0, r1);
   __ CallCFunction(
-      ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+      ExternalReference::get_make_code_young_function(masm->isolate()), 1);
   __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
   __ mov(pc, r0);
 }
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 39b2dc5..cd1809f 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -2765,10 +2765,9 @@
 
   if (do_gc) {
     // Passing r0.
-    __ PrepareCallCFunction(2, 0, r1);
-    __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
+    __ PrepareCallCFunction(1, 0, r1);
     __ CallCFunction(ExternalReference::perform_gc_function(isolate),
-        2, 0);
+        1, 0);
   }
 
   ExternalReference scope_depth =
@@ -3376,7 +3375,8 @@
     receiver = r0;
   }
 
-  StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss);
+  StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss,
+                                         support_wrapper_);
 
   __ bind(&miss);
   StubCompiler::TailCallBuiltin(
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 732d9f3..1bcf3e3 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -870,8 +870,7 @@
 }
 
 
-void Code::PatchPlatformCodeAge(Isolate* isolate,
-                                byte* sequence,
+void Code::PatchPlatformCodeAge(byte* sequence,
                                 Code::Age age,
                                 MarkingParity parity) {
   uint32_t young_length;
@@ -880,7 +879,7 @@
     CopyBytes(sequence, young_sequence, young_length);
     CPU::FlushICache(sequence, young_length);
   } else {
-    Code* stub = GetCodeAgeStub(isolate, age, parity);
+    Code* stub = GetCodeAgeStub(age, parity);
     CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
     patcher.masm()->add(r0, pc, Operand(-8));
     patcher.masm()->ldr(pc, MemOperand(pc, -4));
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 7f504ea..59a8818 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1883,9 +1883,11 @@
 
 LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
   LOperand* string = UseRegister(instr->string());
-  LOperand* index = UseRegisterOrConstant(instr->index());
-  LOperand* value = UseRegister(instr->value());
-  return new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+  LOperand* index = UseRegister(instr->index());
+  LOperand* value = UseTempRegister(instr->value());
+  LSeqStringSetChar* result =
+      new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+  return DefineAsRegister(result);
 }
 
 
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index b7fc3b6..7f65023 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -1975,42 +1975,32 @@
 
 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   Register string = ToRegister(instr->string());
-  LOperand* index_op = instr->index();
+  Register index = ToRegister(instr->index());
   Register value = ToRegister(instr->value());
-  Register scratch = scratch0();
   String::Encoding encoding = instr->encoding();
 
   if (FLAG_debug_code) {
-    __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
-    __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+    __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
+    __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
 
-    __ and_(scratch, scratch,
-            Operand(kStringRepresentationMask | kStringEncodingMask));
+    __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
-    __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
-                            ? one_byte_seq_type : two_byte_seq_type));
+    __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING
+                           ? one_byte_seq_type : two_byte_seq_type));
     __ Check(eq, kUnexpectedStringType);
   }
 
-  if (index_op->IsConstantOperand()) {
-    int constant_index = ToInteger32(LConstantOperand::cast(index_op));
-    if (encoding == String::ONE_BYTE_ENCODING) {
-      __ strb(value,
-              FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
-    } else {
-      __ strh(value,
-          FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
-    }
+  __ add(ip,
+         string,
+         Operand(SeqString::kHeaderSize - kHeapObjectTag));
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ strb(value, MemOperand(ip, index));
   } else {
-    Register index = ToRegister(index_op);
-    if (encoding == String::ONE_BYTE_ENCODING) {
-      __ add(scratch, string, Operand(index));
-      __ strb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
-    } else {
-      __ add(scratch, string, Operand(index, LSL, 1));
-      __ strh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
-    }
+    // MemOperand with ip as the base register is not allowed for strh, so
+    // we do the address calculation explicitly.
+    __ add(ip, ip, Operand(index, LSL, 1));
+    __ strh(value, MemOperand(ip));
   }
 }
 
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 681299e..085af3f 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -380,27 +380,31 @@
                                             Register receiver,
                                             Register scratch1,
                                             Register scratch2,
-                                            Label* miss) {
+                                            Label* miss,
+                                            bool support_wrappers) {
   Label check_wrapper;
 
   // Check if the object is a string leaving the instance type in the
   // scratch1 register.
-  GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
+  GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
+                      support_wrappers ? &check_wrapper : miss);
 
   // Load length directly from the string.
   __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
   __ Ret();
 
-  // Check if the object is a JSValue wrapper.
-  __ bind(&check_wrapper);
-  __ cmp(scratch1, Operand(JS_VALUE_TYPE));
-  __ b(ne, miss);
+  if (support_wrappers) {
+    // Check if the object is a JSValue wrapper.
+    __ bind(&check_wrapper);
+    __ cmp(scratch1, Operand(JS_VALUE_TYPE));
+    __ b(ne, miss);
 
-  // Unwrap the value and check if the wrapped value is a string.
-  __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
-  GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
-  __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
-  __ Ret();
+    // Unwrap the value and check if the wrapped value is a string.
+    __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+    GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+    __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
+    __ Ret();
+  }
 }
 
 
diff --git a/src/array.js b/src/array.js
index 4a7aea5..5f89ebb 100644
--- a/src/array.js
+++ b/src/array.js
@@ -399,13 +399,14 @@
   n--;
   var value = this[n];
 
+  EnqueueSpliceRecord(this, n, [value], 0);
+
   try {
     BeginPerformSplice(this);
     delete this[n];
     this.length = n;
   } finally {
     EndPerformSplice(this);
-    EnqueueSpliceRecord(this, n, [value], 0);
   }
 
   return value;
@@ -440,6 +441,8 @@
   var n = TO_UINT32(this.length);
   var m = %_ArgumentsLength();
 
+  EnqueueSpliceRecord(this, n, [], m);
+
   try {
     BeginPerformSplice(this);
     for (var i = 0; i < m; i++) {
@@ -448,7 +451,6 @@
     this.length = n + m;
   } finally {
     EndPerformSplice(this);
-    EnqueueSpliceRecord(this, n, [], m);
   }
 
   return this.length;
@@ -579,13 +581,14 @@
 function ObservedArrayShift(len) {
   var first = this[0];
 
+  EnqueueSpliceRecord(this, 0, [first], 0);
+
   try {
     BeginPerformSplice(this);
     SimpleMove(this, 0, 1, len, 0);
     this.length = len - 1;
   } finally {
     EndPerformSplice(this);
-    EnqueueSpliceRecord(this, 0, [first], 0);
   }
 
   return first;
@@ -624,6 +627,8 @@
   var len = TO_UINT32(this.length);
   var num_arguments = %_ArgumentsLength();
 
+  EnqueueSpliceRecord(this, 0, [], num_arguments);
+
   try {
     BeginPerformSplice(this);
     SimpleMove(this, 0, 0, len, num_arguments);
@@ -633,7 +638,6 @@
     this.length = len + num_arguments;
   } finally {
     EndPerformSplice(this);
-    EnqueueSpliceRecord(this, 0, [], num_arguments);
   }
 
   return len + num_arguments;
diff --git a/src/ast.cc b/src/ast.cc
index 5f085d3..823dede 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -460,7 +460,10 @@
   receiver_types_.Clear();
   if (key()->IsPropertyName()) {
     FunctionPrototypeStub proto_stub(Code::LOAD_IC);
-    if (oracle->LoadIsStub(this, &proto_stub)) {
+    StringLengthStub string_stub(Code::LOAD_IC, false);
+    if (oracle->LoadIsStub(this, &string_stub)) {
+      is_string_length_ = true;
+    } else if (oracle->LoadIsStub(this, &proto_stub)) {
       is_function_prototype_ = true;
     } else {
       Literal* lit_key = key()->AsLiteral();
diff --git a/src/ast.h b/src/ast.h
index 71a51ab..c630906 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1646,6 +1646,7 @@
 
   BailoutId LoadId() const { return load_id_; }
 
+  bool IsStringLength() const { return is_string_length_; }
   bool IsStringAccess() const { return is_string_access_; }
   bool IsFunctionPrototype() const { return is_function_prototype_; }
 
@@ -1673,6 +1674,7 @@
         load_id_(GetNextId(isolate)),
         is_monomorphic_(false),
         is_uninitialized_(false),
+        is_string_length_(false),
         is_string_access_(false),
         is_function_prototype_(false) { }
 
@@ -1685,6 +1687,7 @@
   SmallMapList receiver_types_;
   bool is_monomorphic_ : 1;
   bool is_uninitialized_ : 1;
+  bool is_string_length_ : 1;
   bool is_string_access_ : 1;
   bool is_function_prototype_ : 1;
 };
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 23d5442..23d4269 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -357,45 +357,40 @@
 
   HObjectAccess access = HObjectAccess::ForAllocationSiteTransitionInfo();
   HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
-  HValue* push_value;
   if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
     HValue* elements = AddLoadElements(boilerplate);
 
     IfBuilder if_fixed_cow(this);
     if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
     if_fixed_cow.Then();
-    push_value = BuildCloneShallowArray(boilerplate,
-                                        allocation_site,
-                                        alloc_site_mode,
-                                        FAST_ELEMENTS,
-                                        0/*copy-on-write*/);
-    environment()->Push(push_value);
+    environment()->Push(BuildCloneShallowArray(boilerplate,
+                                               allocation_site,
+                                               alloc_site_mode,
+                                               FAST_ELEMENTS,
+                                               0/*copy-on-write*/));
     if_fixed_cow.Else();
 
     IfBuilder if_fixed(this);
     if_fixed.If<HCompareMap>(elements, factory->fixed_array_map());
     if_fixed.Then();
-    push_value = BuildCloneShallowArray(boilerplate,
-                                        allocation_site,
-                                        alloc_site_mode,
-                                        FAST_ELEMENTS,
-                                        length);
-    environment()->Push(push_value);
+    environment()->Push(BuildCloneShallowArray(boilerplate,
+                                               allocation_site,
+                                               alloc_site_mode,
+                                               FAST_ELEMENTS,
+                                               length));
     if_fixed.Else();
-    push_value = BuildCloneShallowArray(boilerplate,
-                                        allocation_site,
-                                        alloc_site_mode,
-                                        FAST_DOUBLE_ELEMENTS,
-                                        length);
-    environment()->Push(push_value);
+    environment()->Push(BuildCloneShallowArray(boilerplate,
+                                               allocation_site,
+                                               alloc_site_mode,
+                                               FAST_DOUBLE_ELEMENTS,
+                                               length));
   } else {
     ElementsKind elements_kind = casted_stub()->ComputeElementsKind();
-    push_value = BuildCloneShallowArray(boilerplate,
-                                        allocation_site,
-                                        alloc_site_mode,
-                                        elements_kind,
-                                        length);
-    environment()->Push(push_value);
+    environment()->Push(BuildCloneShallowArray(boilerplate,
+                                               allocation_site,
+                                               alloc_site_mode,
+                                               elements_kind,
+                                               length));
   }
 
   checker.ElseDeopt("Uninitialized boilerplate literals");
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 30ec1c7..946eb76 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -830,12 +830,19 @@
 
 class StringLengthStub: public ICStub {
  public:
-  explicit StringLengthStub(Code::Kind kind) : ICStub(kind) { }
+  StringLengthStub(Code::Kind kind, bool support_wrapper)
+      : ICStub(kind), support_wrapper_(support_wrapper) { }
   virtual void Generate(MacroAssembler* masm);
 
  private:
   STATIC_ASSERT(KindBits::kSize == 4);
-    virtual CodeStub::Major MajorKey() { return StringLength; }
+  class WrapperModeBits: public BitField<bool, 4, 1> {};
+  virtual CodeStub::Major MajorKey() { return StringLength; }
+  virtual int MinorKey() {
+    return KindBits::encode(kind()) | WrapperModeBits::encode(support_wrapper_);
+  }
+
+  bool support_wrapper_;
 };
 
 
diff --git a/src/handles.cc b/src/handles.cc
index 033fdab..b3704df 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -294,6 +294,21 @@
 }
 
 
+Handle<JSObject> Copy(Handle<JSObject> obj) {
+  Isolate* isolate = obj->GetIsolate();
+  CALL_HEAP_FUNCTION(isolate,
+                     isolate->heap()->CopyJSObject(*obj), JSObject);
+}
+
+
+Handle<JSObject> DeepCopy(Handle<JSObject> obj) {
+  Isolate* isolate = obj->GetIsolate();
+  CALL_HEAP_FUNCTION(isolate,
+                     obj->DeepCopy(isolate),
+                     JSObject);
+}
+
+
 // Wrappers for scripts are kept alive and cached in weak global
 // handles referred from foreign objects held by the scripts as long as
 // they are used. When they are not used anymore, the garbage
diff --git a/src/handles.h b/src/handles.h
index 585f7b4..c3e4dca 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -255,6 +255,10 @@
 Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
                                                    uint32_t index);
 
+Handle<JSObject> Copy(Handle<JSObject> obj);
+
+Handle<JSObject> DeepCopy(Handle<JSObject> obj);
+
 Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
                                       Handle<JSArray> array);
 
diff --git a/src/heap.cc b/src/heap.cc
index 108cfb3..24e4039 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -4310,10 +4310,6 @@
   AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
       reinterpret_cast<Address>(result) + map->instance_size());
   alloc_memento->set_map_no_write_barrier(allocation_memento_map());
-
-  // TODO(mvstanton): To diagnose bug 284577, some extra checks
-  CHECK(allocation_site->map() == allocation_site_map());
-
   alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
   return result;
 }
@@ -5057,10 +5053,6 @@
       AllocationMemento* alloc_memento;
       if (maybe_alloc_memento->To(&alloc_memento)) {
         alloc_memento->set_map_no_write_barrier(allocation_memento_map());
-
-        // TODO(mvstanton): To diagnose bug 284577, some extra checks
-        CHECK(site->map() == allocation_site_map());
-
         alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
       }
     }
@@ -5083,10 +5075,6 @@
     AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
         reinterpret_cast<Address>(clone) + object_size);
     alloc_memento->set_map_no_write_barrier(allocation_memento_map());
-
-    // TODO(mvstanton): To diagnose bug 284577, some extra checks
-    CHECK(site->map() == allocation_site_map());
-
     alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
   }
 
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 7d3b879..7d33141 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -1303,8 +1303,6 @@
 // Inserts an int3/stop break instruction for debugging purposes.
 class HDebugBreak V8_FINAL : public HTemplateInstruction<0> {
  public:
-  DECLARE_INSTRUCTION_FACTORY_P0(HDebugBreak);
-
   virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
     return Representation::None();
   }
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index e1b89a4..23c373f 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -1831,19 +1831,6 @@
   Handle<Map> alloc_memento_map(
       isolate()->heap()->allocation_memento_map());
   AddStoreMapConstant(alloc_memento, alloc_memento_map);
-
-  {
-    // TODO(mvstanton): the code below is turned on to diagnose chromium bug
-    // 284577.
-    Handle<Map> alloc_site_map(isolate()->heap()->allocation_site_map());
-    IfBuilder builder(this);
-    builder.If<HCompareMap>(alloc_site, alloc_site_map);
-    builder.Then();
-    builder.Else();
-    Add<HDebugBreak>();
-    builder.End();
-  }
-
   HObjectAccess access = HObjectAccess::ForAllocationMementoSite();
   Add<HStoreNamedField>(alloc_memento, access, alloc_site);
   return alloc_memento;
@@ -4185,7 +4172,8 @@
       IsFastLiteral(Handle<JSObject>::cast(boilerplate),
                     kMaxFastLiteralDepth,
                     &max_properties)) {
-    Handle<JSObject> boilerplate_object = Handle<JSObject>::cast(boilerplate);
+    Handle<JSObject> boilerplate_object =
+        Handle<JSObject>::cast(boilerplate);
 
     literal = BuildFastLiteral(boilerplate_object,
                                Handle<Object>::null(),
@@ -5148,7 +5136,9 @@
     CHECK_ALIVE(VisitForValue(prop->obj()));
     HValue* object = Top();
     HValue* key = NULL;
-    if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) ||
+    if ((!prop->IsStringLength() &&
+         !prop->IsFunctionPrototype() &&
+         !prop->key()->IsPropertyName()) ||
         prop->IsStringAccess()) {
       CHECK_ALIVE(VisitForValue(prop->key()));
       key = Top();
@@ -5838,20 +5828,17 @@
 }
 
 
-static bool AreStringTypes(SmallMapList* types) {
-  if (types == NULL || types->length() == 0) return false;
-  for (int i = 0; i < types->length(); i++) {
-    if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
-  }
-  return true;
-}
-
-
 void HOptimizedGraphBuilder::BuildLoad(Property* expr,
                                        int position,
                                        BailoutId ast_id) {
   HInstruction* instr = NULL;
-  if (expr->IsStringAccess()) {
+  if (expr->IsStringLength()) {
+    HValue* string = Pop();
+    BuildCheckHeapObject(string);
+    HInstruction* checkstring =
+        AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
+    instr = BuildLoadStringLength(string, checkstring);
+  } else if (expr->IsStringAccess()) {
     HValue* index = Pop();
     HValue* string = Pop();
     HValue* context = environment()->context();
@@ -5887,12 +5874,6 @@
       } else {
         instr = BuildLoadNamedMonomorphic(Pop(), name, map);
       }
-    } else if (AreStringTypes(types) &&
-               name->Equals(isolate()->heap()->length_string())) {
-      BuildCheckHeapObject(Pop());
-      HValue* checked_object =
-          AddInstruction(HCheckInstanceType::NewIsString(object, zone()));
-      instr = BuildLoadStringLength(object, checked_object);
     } else if (types != NULL && types->length() > 1) {
       return HandlePolymorphicLoadNamedField(
           position, ast_id, Pop(), types, name);
@@ -5933,7 +5914,9 @@
   if (TryArgumentsAccess(expr)) return;
 
   CHECK_ALIVE(VisitForValue(expr->obj()));
-  if ((!expr->IsFunctionPrototype() && !expr->key()->IsPropertyName()) ||
+  if ((!expr->IsStringLength() &&
+       !expr->IsFunctionPrototype() &&
+       !expr->key()->IsPropertyName()) ||
       expr->IsStringAccess()) {
     CHECK_ALIVE(VisitForValue(expr->key()));
   }
@@ -7584,7 +7567,9 @@
   HValue* object = Top();
 
   HValue* key = NULL;
-  if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) ||
+  if ((!prop->IsStringLength() &&
+       !prop->IsFunctionPrototype() &&
+       !prop->key()->IsPropertyName()) ||
       prop->IsStringAccess()) {
     CHECK_ALIVE(VisitForValue(prop->key()));
     key = Top();
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 91eba98..a159748 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -539,12 +539,10 @@
   __ mov(eax, Operand(esp, 8 * kPointerSize));
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    __ PrepareCallCFunction(2, ebx);
-    __ mov(Operand(esp, 1 * kPointerSize),
-           Immediate(ExternalReference::isolate_address(masm->isolate())));
+    __ PrepareCallCFunction(1, ebx);
     __ mov(Operand(esp, 0), eax);
     __ CallCFunction(
-        ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+        ExternalReference::get_make_code_young_function(masm->isolate()), 1);
   }
   __ popad();
   __ ret(0);
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 3ea749f..a83c1ae 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -984,7 +984,7 @@
           ASSERT_EQ(Token::SHL, op);
           if (CpuFeatures::IsSupported(SSE2)) {
             CpuFeatureScope use_sse2(masm, SSE2);
-            __ Cvtsi2sd(xmm0, left);
+            __ cvtsi2sd(xmm0, left);
             __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
           } else {
             __ mov(Operand(esp, 1 * kPointerSize), left);
@@ -1370,7 +1370,7 @@
         // Store the result in the HeapNumber and return.
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatureScope use_sse2(masm, SSE2);
-          __ Cvtsi2sd(xmm0, ebx);
+          __ cvtsi2sd(xmm0, ebx);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         } else {
           __ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -1594,7 +1594,7 @@
         // Store the result in the HeapNumber and return.
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatureScope use_sse2(masm, SSE2);
-          __ Cvtsi2sd(xmm0, ebx);
+          __ cvtsi2sd(xmm0, ebx);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         } else {
           __ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -1782,7 +1782,7 @@
         // Store the result in the HeapNumber and return.
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatureScope use_sse2(masm, SSE2);
-          __ Cvtsi2sd(xmm0, ebx);
+          __ cvtsi2sd(xmm0, ebx);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         } else {
           __ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -2329,12 +2329,12 @@
   __ jmp(not_numbers);  // Argument in eax is not a number.
   __ bind(&load_smi_edx);
   __ SmiUntag(edx);  // Untag smi before converting to float.
-  __ Cvtsi2sd(xmm0, edx);
+  __ cvtsi2sd(xmm0, edx);
   __ SmiTag(edx);  // Retag smi for heap number overwriting test.
   __ jmp(&load_eax);
   __ bind(&load_smi_eax);
   __ SmiUntag(eax);  // Untag smi before converting to float.
-  __ Cvtsi2sd(xmm1, eax);
+  __ cvtsi2sd(xmm1, eax);
   __ SmiTag(eax);  // Retag smi for heap number overwriting test.
   __ jmp(&done, Label::kNear);
   __ bind(&load_float_eax);
@@ -2350,11 +2350,11 @@
   __ mov(scratch, left);
   ASSERT(!scratch.is(right));  // We're about to clobber scratch.
   __ SmiUntag(scratch);
-  __ Cvtsi2sd(xmm0, scratch);
+  __ cvtsi2sd(xmm0, scratch);
 
   __ mov(scratch, right);
   __ SmiUntag(scratch);
-  __ Cvtsi2sd(xmm1, scratch);
+  __ cvtsi2sd(xmm1, scratch);
 }
 
 
@@ -2365,7 +2365,7 @@
                                                   Register scratch,
                                                   XMMRegister xmm_scratch) {
   __ cvttsd2si(int32_result, Operand(operand));
-  __ Cvtsi2sd(xmm_scratch, int32_result);
+  __ cvtsi2sd(xmm_scratch, int32_result);
   __ pcmpeqd(xmm_scratch, operand);
   __ movmskps(scratch, xmm_scratch);
   // Two least significant bits should be both set.
@@ -2470,7 +2470,7 @@
 
   // Save 1 in double_result - we need this several times later on.
   __ mov(scratch, Immediate(1));
-  __ Cvtsi2sd(double_result, scratch);
+  __ cvtsi2sd(double_result, scratch);
 
   if (exponent_type_ == ON_STACK) {
     Label base_is_smi, unpack_exponent;
@@ -2490,7 +2490,7 @@
 
     __ bind(&base_is_smi);
     __ SmiUntag(base);
-    __ Cvtsi2sd(double_base, base);
+    __ cvtsi2sd(double_base, base);
 
     __ bind(&unpack_exponent);
     __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -2683,7 +2683,7 @@
   // and may not have contained the exponent value in the first place when the
   // exponent is a smi.  We reset it with exponent value before bailing out.
   __ j(not_equal, &done);
-  __ Cvtsi2sd(double_exponent, exponent);
+  __ cvtsi2sd(double_exponent, exponent);
 
   // Returning or bailing out.
   Counters* counters = masm->isolate()->counters();
@@ -2756,7 +2756,8 @@
     __ j(not_equal, &miss);
   }
 
-  StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss);
+  StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss,
+                                         support_wrapper_);
   __ bind(&miss);
   StubCompiler::TailCallBuiltin(
       masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
@@ -4507,8 +4508,6 @@
     // stack alignment is known to be correct. This function takes one argument
     // which is passed on the stack, and we know that the stack has been
     // prepared to pass at least one argument.
-    __ mov(Operand(esp, 1 * kPointerSize),
-           Immediate(ExternalReference::isolate_address(masm->isolate())));
     __ mov(Operand(esp, 0 * kPointerSize), eax);  // Result.
     __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
   }
@@ -6259,7 +6258,7 @@
     __ bind(&right_smi);
     __ mov(ecx, eax);  // Can't clobber eax because we can still jump away.
     __ SmiUntag(ecx);
-    __ Cvtsi2sd(xmm1, ecx);
+    __ cvtsi2sd(xmm1, ecx);
 
     __ bind(&left);
     __ JumpIfSmi(edx, &left_smi, Label::kNear);
@@ -6271,7 +6270,7 @@
     __ bind(&left_smi);
     __ mov(ecx, edx);  // Can't clobber edx because we can still jump away.
     __ SmiUntag(ecx);
-    __ Cvtsi2sd(xmm0, ecx);
+    __ cvtsi2sd(xmm0, ecx);
 
     __ bind(&done);
     // Compare operands.
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 9385423..84a4d23 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -768,7 +768,7 @@
   __ SmiUntag(ebx);
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope fscope(masm, SSE2);
-    __ Cvtsi2sd(xmm0, ebx);
+    __ cvtsi2sd(xmm0, ebx);
     __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
               xmm0);
   } else {
@@ -1165,8 +1165,7 @@
 }
 
 
-void Code::PatchPlatformCodeAge(Isolate* isolate,
-                                byte* sequence,
+void Code::PatchPlatformCodeAge(byte* sequence,
                                 Code::Age age,
                                 MarkingParity parity) {
   uint32_t young_length;
@@ -1175,7 +1174,7 @@
     CopyBytes(sequence, young_sequence, young_length);
     CPU::FlushICache(sequence, young_length);
   } else {
-    Code* stub = GetCodeAgeStub(isolate, age, parity);
+    Code* stub = GetCodeAgeStub(age, parity);
     CodePatcher patcher(sequence, young_length);
     patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
   }
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 7d7b51d..d50b780 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -3977,7 +3977,7 @@
     __ bind(&negative_sign);
     // Truncate, then compare and compensate.
     __ cvttsd2si(output_reg, Operand(input_reg));
-    __ Cvtsi2sd(xmm_scratch, output_reg);
+    __ cvtsi2sd(xmm_scratch, output_reg);
     __ ucomisd(input_reg, xmm_scratch);
     __ j(equal, &done, Label::kNear);
     __ sub(output_reg, Immediate(1));
@@ -4027,7 +4027,7 @@
   __ RecordComment("D2I conversion overflow");
   DeoptimizeIf(equal, instr->environment());
 
-  __ Cvtsi2sd(xmm_scratch, output_reg);
+  __ cvtsi2sd(xmm_scratch, output_reg);
   __ ucomisd(xmm_scratch, input_temp);
   __ j(equal, &done);
   __ sub(output_reg, Immediate(1));
@@ -4978,7 +4978,7 @@
   ASSERT(output->IsDoubleRegister());
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope scope(masm(), SSE2);
-    __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+    __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
   } else if (input->IsRegister()) {
     Register input_reg = ToRegister(input);
     __ push(input_reg);
@@ -5087,7 +5087,7 @@
     __ xor_(reg, 0x80000000);
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope feature_scope(masm(), SSE2);
-      __ Cvtsi2sd(xmm0, Operand(reg));
+      __ cvtsi2sd(xmm0, Operand(reg));
     } else {
       __ push(reg);
       __ fild_s(Operand(esp, 0));
@@ -5308,7 +5308,7 @@
                                 bool deoptimize_on_minus_zero,
                                 LEnvironment* env,
                                 NumberUntagDMode mode) {
-  Label convert, load_smi, done;
+  Label load_smi, done;
 
   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
     // Smi check.
@@ -5317,15 +5317,26 @@
     // Heap number map check.
     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
            factory()->heap_number_map());
-    if (can_convert_undefined_to_nan) {
-      __ j(not_equal, &convert, Label::kNear);
-    } else {
+    if (!can_convert_undefined_to_nan) {
       DeoptimizeIf(not_equal, env);
-    }
+    } else {
+      Label heap_number, convert;
+      __ j(equal, &heap_number, Label::kNear);
 
+      // Convert undefined (and hole) to NaN.
+      __ cmp(input_reg, factory()->undefined_value());
+      DeoptimizeIf(not_equal, env);
+
+      __ bind(&convert);
+      ExternalReference nan =
+          ExternalReference::address_of_canonical_non_hole_nan();
+      __ movdbl(result_reg, Operand::StaticVariable(nan));
+      __ jmp(&done, Label::kNear);
+
+      __ bind(&heap_number);
+    }
     // Heap number to XMM conversion.
     __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
-
     if (deoptimize_on_minus_zero) {
       XMMRegister xmm_scratch = xmm0;
       __ xorps(xmm_scratch, xmm_scratch);
@@ -5336,19 +5347,6 @@
       DeoptimizeIf(not_zero, env);
     }
     __ jmp(&done, Label::kNear);
-
-    if (can_convert_undefined_to_nan) {
-      __ bind(&convert);
-
-      // Convert undefined (and hole) to NaN.
-      __ cmp(input_reg, factory()->undefined_value());
-      DeoptimizeIf(not_equal, env);
-
-      ExternalReference nan =
-          ExternalReference::address_of_canonical_non_hole_nan();
-      __ movdbl(result_reg, Operand::StaticVariable(nan));
-      __ jmp(&done, Label::kNear);
-    }
   } else {
     ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
   }
@@ -5358,7 +5356,7 @@
   // input register since we avoid dependencies.
   __ mov(temp_reg, input_reg);
   __ SmiUntag(temp_reg);  // Untag smi before converting to float.
-  __ Cvtsi2sd(result_reg, Operand(temp_reg));
+  __ cvtsi2sd(result_reg, Operand(temp_reg));
   __ bind(&done);
 }
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index d8a475c..b65d328 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -283,7 +283,7 @@
                                Label::Distance dst) {
   ASSERT(!input_reg.is(scratch));
   cvttsd2si(result_reg, Operand(input_reg));
-  Cvtsi2sd(scratch, Operand(result_reg));
+  cvtsi2sd(scratch, Operand(result_reg));
   ucomisd(scratch, input_reg);
   j(not_equal, conversion_failed, dst);
   j(parity_even, conversion_failed, dst);  // NaN.
@@ -392,7 +392,7 @@
 
     movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
     cvttsd2si(result_reg, Operand(xmm0));
-    Cvtsi2sd(temp, Operand(result_reg));
+    cvtsi2sd(temp, Operand(result_reg));
     ucomisd(xmm0, temp);
     RecordComment("Deferred TaggedToI: lost precision");
     j(not_equal, lost_precision, Label::kNear);
@@ -457,7 +457,7 @@
   cmp(src, Immediate(0));
   movdbl(scratch,
          Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE32));
-  Cvtsi2sd(dst, src);
+  cvtsi2sd(dst, src);
   j(not_sign, &done, Label::kNear);
   addsd(dst, scratch);
   bind(&done);
@@ -676,12 +676,6 @@
 #endif
 
 
-void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
-  xorps(dst, dst);
-  cvtsi2sd(dst, src);
-}
-
-
 void MacroAssembler::Set(Register dst, const Immediate& x) {
   if (x.is_zero()) {
     xor_(dst, dst);  // Shorter than mov.
@@ -840,7 +834,7 @@
   SmiUntag(scratch1);
   if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
     CpuFeatureScope fscope(this, SSE2);
-    Cvtsi2sd(scratch2, scratch1);
+    cvtsi2sd(scratch2, scratch1);
     movdbl(FieldOperand(elements, key, times_4,
                         FixedDoubleArray::kHeaderSize - elements_offset),
            scratch2);
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index d813692..e4e4533 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -366,12 +366,6 @@
   void Set(Register dst, const Immediate& x);
   void Set(const Operand& dst, const Immediate& x);
 
-  // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
-  // hinders register renaming and makes dependence chains longer. So we use
-  // xorps to clear the dst register before cvtsi2sd to solve this issue.
-  void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
-  void Cvtsi2sd(XMMRegister dst, const Operand& src);
-
   // Support for constant splitting.
   bool IsUnsafeImmediate(const Immediate& x);
   void SafeSet(Register dst, const Immediate& x);
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 89ea6be..354c2fd 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -329,28 +329,32 @@
                                             Register receiver,
                                             Register scratch1,
                                             Register scratch2,
-                                            Label* miss) {
+                                            Label* miss,
+                                            bool support_wrappers) {
   Label check_wrapper;
 
   // Check if the object is a string leaving the instance type in the
   // scratch register.
-  GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
+  GenerateStringCheck(masm, receiver, scratch1, miss,
+                      support_wrappers ? &check_wrapper : miss);
 
   // Load length from the string and convert to a smi.
   __ mov(eax, FieldOperand(receiver, String::kLengthOffset));
   __ ret(0);
 
-  // Check if the object is a JSValue wrapper.
-  __ bind(&check_wrapper);
-  __ cmp(scratch1, JS_VALUE_TYPE);
-  __ j(not_equal, miss);
+  if (support_wrappers) {
+    // Check if the object is a JSValue wrapper.
+    __ bind(&check_wrapper);
+    __ cmp(scratch1, JS_VALUE_TYPE);
+    __ j(not_equal, miss);
 
-  // Check if the wrapped value is a string and load the length
-  // directly if it is.
-  __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
-  GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
-  __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
-  __ ret(0);
+    // Check if the wrapped value is a string and load the length
+    // directly if it is.
+    __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+    GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+    __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
+    __ ret(0);
+  }
 }
 
 
@@ -858,7 +862,7 @@
     __ SmiUntag(value_reg);
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope use_sse2(masm, SSE2);
-      __ Cvtsi2sd(xmm0, value_reg);
+      __ cvtsi2sd(xmm0, value_reg);
     } else {
       __ push(value_reg);
       __ fild_s(Operand(esp, 0));
@@ -1037,7 +1041,7 @@
     __ SmiUntag(value_reg);
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope use_sse2(masm, SSE2);
-      __ Cvtsi2sd(xmm0, value_reg);
+      __ cvtsi2sd(xmm0, value_reg);
     } else {
       __ push(value_reg);
       __ fild_s(Operand(esp, 0));
diff --git a/src/ic.cc b/src/ic.cc
index 5267af1..5518751 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -549,11 +549,9 @@
                                       Code::ExtraICState extra_ic_state,
                                       Handle<Object> object,
                                       Handle<String> name) {
-  bool use_ic = FLAG_use_ic;
   if (object->IsJSObject()) {
     Handle<JSObject> receiver = Handle<JSObject>::cast(object);
     if (receiver->map()->is_deprecated()) {
-      use_ic = false;
       JSObject::MigrateInstance(receiver);
     }
   }
@@ -592,7 +590,9 @@
   }
 
   // Lookup is valid: Update inline cache and stub cache.
-  if (use_ic) UpdateCaches(&lookup, state, extra_ic_state, object, name);
+  if (FLAG_use_ic) {
+    UpdateCaches(&lookup, state, extra_ic_state, object, name);
+  }
 
   // Get the property.
   PropertyAttributes attr;
@@ -819,11 +819,9 @@
                                     Handle<String>::cast(key));
   }
 
-  bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
   if (object->IsJSObject()) {
     Handle<JSObject> receiver = Handle<JSObject>::cast(object);
     if (receiver->map()->is_deprecated()) {
-      use_ic = false;
       JSObject::MigrateInstance(receiver);
     }
   }
@@ -832,6 +830,7 @@
     return TypeError("non_object_property_call", object, key);
   }
 
+  bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
   ASSERT(!(use_ic && object->IsJSGlobalProxy()));
 
   if (use_ic && state != MEGAMORPHIC) {
@@ -875,20 +874,21 @@
     return TypeError("non_object_property_load", object, name);
   }
 
-  bool use_ic = FLAG_use_ic;
-
-  if (use_ic) {
+  if (FLAG_use_ic) {
     // Use specialized code for getting the length of strings and
     // string wrapper objects.  The length property of string wrapper
     // objects is read-only and therefore always returns the length of
     // the underlying string value.  See ECMA-262 15.5.5.1.
-    if (object->IsStringWrapper() &&
+    if ((object->IsString() || object->IsStringWrapper()) &&
         name->Equals(isolate()->heap()->length_string())) {
       Handle<Code> stub;
       if (state == UNINITIALIZED) {
         stub = pre_monomorphic_stub();
-      } else if (state == PREMONOMORPHIC || state == MONOMORPHIC) {
-        StringLengthStub string_length_stub(kind());
+      } else if (state == PREMONOMORPHIC) {
+        StringLengthStub string_length_stub(kind(), !object->IsString());
+        stub = string_length_stub.GetCode(isolate());
+      } else if (state == MONOMORPHIC && object->IsStringWrapper()) {
+        StringLengthStub string_length_stub(kind(), true);
         stub = string_length_stub.GetCode(isolate());
       } else if (state != MEGAMORPHIC) {
         ASSERT(state != GENERIC);
@@ -897,12 +897,14 @@
       if (!stub.is_null()) {
         set_target(*stub);
 #ifdef DEBUG
-        if (FLAG_trace_ic) PrintF("[LoadIC : +#length /stringwrapper]\n");
+        if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
 #endif
       }
       // Get the string if we have a string wrapper object.
-      String* string = String::cast(JSValue::cast(*object)->value());
-      return Smi::FromInt(string->length());
+      Handle<Object> string = object->IsJSValue()
+          ? Handle<Object>(Handle<JSValue>::cast(object)->value(), isolate())
+          : object;
+      return Smi::FromInt(String::cast(*string)->length());
     }
 
     // Use specialized code for getting prototype of functions.
@@ -934,14 +936,13 @@
   uint32_t index;
   if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) {
     // Rewrite to the generic keyed load stub.
-    if (use_ic) set_target(*generic_stub());
+    if (FLAG_use_ic) set_target(*generic_stub());
     return Runtime::GetElementOrCharAtOrFail(isolate(), object, index);
   }
 
   if (object->IsJSObject()) {
     Handle<JSObject> receiver = Handle<JSObject>::cast(object);
     if (receiver->map()->is_deprecated()) {
-      use_ic = false;
       JSObject::MigrateInstance(receiver);
     }
   }
@@ -959,7 +960,7 @@
   }
 
   // Update inline cache and stub cache.
-  if (use_ic) UpdateCaches(&lookup, state, object, name);
+  if (FLAG_use_ic) UpdateCaches(&lookup, state, object, name);
 
   PropertyAttributes attr;
   if (lookup.IsInterceptor() || lookup.IsHandler()) {
@@ -1264,8 +1265,6 @@
                           State state,
                           Handle<Object> object,
                           Handle<String> name) {
-  // TODO(verwaest): It would be nice to support loading fields from smis as
-  // well. For now just fail to update the cache.
   if (!object->IsHeapObject()) return;
 
   Handle<HeapObject> receiver = Handle<HeapObject>::cast(object);
@@ -1279,16 +1278,6 @@
   } else if (!lookup->IsCacheable()) {
     // Bail out if the result is not cacheable.
     code = slow_stub();
-  } else if (object->IsString() &&
-             name->Equals(isolate()->heap()->length_string())) {
-    int length_index = String::kLengthOffset / kPointerSize;
-    if (target()->is_load_stub()) {
-      LoadFieldStub stub(true, length_index, Representation::Tagged());
-      code = stub.GetCode(isolate());
-    } else {
-      KeyedLoadFieldStub stub(true, length_index, Representation::Tagged());
-      code = stub.GetCode(isolate());
-    }
   } else if (!object->IsJSObject()) {
     // TODO(jkummerow): It would be nice to support non-JSObjects in
     // ComputeLoadHandler, then we wouldn't need to go generic here.
@@ -1373,9 +1362,9 @@
         return isolate()->stub_cache()->ComputeLoadViaGetter(
             name, receiver, holder, function);
       } else if (receiver->IsJSArray() &&
-                 name->Equals(isolate()->heap()->length_string())) {
-        PropertyIndex lengthIndex = PropertyIndex::NewHeaderIndex(
-            JSArray::kLengthOffset / kPointerSize);
+          name->Equals(isolate()->heap()->length_string())) {
+        PropertyIndex lengthIndex =
+          PropertyIndex::NewHeaderIndex(JSArray::kLengthOffset / kPointerSize);
         return isolate()->stub_cache()->ComputeLoadField(
             name, receiver, holder, lengthIndex, Representation::Tagged());
       }
@@ -1507,7 +1496,6 @@
       } else if (object->IsJSObject()) {
         Handle<JSObject> receiver = Handle<JSObject>::cast(object);
         if (receiver->map()->is_deprecated()) {
-          use_ic = false;
           JSObject::MigrateInstance(receiver);
         }
 
@@ -1524,11 +1512,9 @@
     } else {
       TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "force generic");
     }
-    if (use_ic) {
-      ASSERT(!stub.is_null());
-      set_target(*stub);
-      TRACE_IC("KeyedLoadIC", key, state, target());
-    }
+    ASSERT(!stub.is_null());
+    set_target(*stub);
+    TRACE_IC("KeyedLoadIC", key, state, target());
   }
 
 
@@ -1692,9 +1678,7 @@
 
   Handle<JSObject> receiver = Handle<JSObject>::cast(object);
 
-  bool use_ic = FLAG_use_ic;
   if (receiver->map()->is_deprecated()) {
-    use_ic = false;
     JSObject::MigrateInstance(receiver);
   }
 
@@ -1717,7 +1701,7 @@
   // properties. Slow properties might indicate redefinition of the length
   // property. Note that when redefined using Object.freeze, it's possible
   // to have fast properties but a read-only length.
-  if (use_ic &&
+  if (FLAG_use_ic &&
       receiver->IsJSArray() &&
       name->Equals(isolate()->heap()->length_string()) &&
       Handle<JSArray>::cast(receiver)->AllowsSetElementsLength() &&
@@ -1732,7 +1716,7 @@
   }
 
   if (receiver->IsJSGlobalProxy()) {
-    if (use_ic && kind() != Code::KEYED_STORE_IC) {
+    if (FLAG_use_ic && kind() != Code::KEYED_STORE_IC) {
       // Generate a generic stub that goes to the runtime when we see a global
       // proxy as receiver.
       Handle<Code> stub = (strict_mode == kStrictMode)
@@ -1754,7 +1738,7 @@
     // Strict mode doesn't allow setting non-existent global property.
     return ReferenceError("not_defined", name);
   }
-  if (use_ic) {
+  if (FLAG_use_ic) {
     if (state == UNINITIALIZED) {
       Handle<Code> stub = (strict_mode == kStrictMode)
           ? pre_monomorphic_stub_strict()
diff --git a/src/isolate.cc b/src/isolate.cc
index ceb8809..6fa496a 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1087,7 +1087,7 @@
   Handle<String> key = factory()->stack_overflow_string();
   Handle<JSObject> boilerplate =
       Handle<JSObject>::cast(GetProperty(this, js_builtins_object(), key));
-  Handle<JSObject> exception = JSObject::Copy(boilerplate);
+  Handle<JSObject> exception = Copy(boilerplate);
   DoThrow(*exception, NULL);
 
   // Get stack trace limit.
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 400292e..3aabd97 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -833,15 +833,14 @@
   // The following registers must be saved and restored when calling through to
   // the runtime:
   //   a0 - contains return address (beginning of patch sequence)
-  //   a1 - isolate
+  //   a1 - function object
   RegList saved_regs =
       (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
   FrameScope scope(masm, StackFrame::MANUAL);
   __ MultiPush(saved_regs);
-  __ PrepareCallCFunction(1, 0, a2);
-  __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ PrepareCallCFunction(1, 0, a1);
   __ CallCFunction(
-      ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+      ExternalReference::get_make_code_young_function(masm->isolate()), 1);
   __ MultiPop(saved_regs);
   __ Jump(a0);
 }
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index e4f68f9..0589bf0 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -2795,9 +2795,8 @@
   if (do_gc) {
     // Move result passed in v0 into a0 to call PerformGC.
     __ mov(a0, v0);
-    __ PrepareCallCFunction(2, 0, a1);
-    __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
-    __ CallCFunction(ExternalReference::perform_gc_function(isolate), 2, 0);
+    __ PrepareCallCFunction(1, 0, a1);
+    __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
   }
 
   ExternalReference scope_depth =
@@ -3409,7 +3408,8 @@
     receiver = a0;
   }
 
-  StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss);
+  StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss,
+                                         support_wrapper_);
 
   __ bind(&miss);
   StubCompiler::TailCallBuiltin(
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index a12faee..5c847fc 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -635,8 +635,7 @@
 }
 
 
-void Code::PatchPlatformCodeAge(Isolate* isolate,
-                                byte* sequence,
+void Code::PatchPlatformCodeAge(byte* sequence,
                                 Code::Age age,
                                 MarkingParity parity) {
   uint32_t young_length;
@@ -645,7 +644,7 @@
     CopyBytes(sequence, young_sequence, young_length);
     CPU::FlushICache(sequence, young_length);
   } else {
-    Code* stub = GetCodeAgeStub(isolate, age, parity);
+    Code* stub = GetCodeAgeStub(age, parity);
     CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
     // Mark this code sequence for FindPlatformCodeAgeSequence()
     patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 6659b6c..b37c7e0 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -1789,43 +1789,33 @@
 
 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   Register string = ToRegister(instr->string());
-  LOperand* index_op = instr->index();
+  Register index = ToRegister(instr->index());
   Register value = ToRegister(instr->value());
   Register scratch = scratch0();
   String::Encoding encoding = instr->encoding();
 
   if (FLAG_debug_code) {
-    __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
-    __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+    __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
+    __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
 
-    __ And(scratch, scratch,
-           Operand(kStringRepresentationMask | kStringEncodingMask));
+    __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
-    __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
+    __ Subu(at, at, Operand(encoding == String::ONE_BYTE_ENCODING
                                 ? one_byte_seq_type : two_byte_seq_type));
     __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
   }
 
-  if (index_op->IsConstantOperand()) {
-    int constant_index = ToInteger32(LConstantOperand::cast(index_op));
-    if (encoding == String::ONE_BYTE_ENCODING) {
-      __ sb(value,
-          FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
-    } else {
-      __ sh(value,
-          FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
-    }
+  __ Addu(scratch,
+          string,
+          Operand(SeqString::kHeaderSize - kHeapObjectTag));
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ Addu(at, scratch, index);
+    __ sb(value, MemOperand(at));
   } else {
-    Register index = ToRegister(index_op);
-    if (encoding == String::ONE_BYTE_ENCODING) {
-      __ Addu(scratch, string, Operand(index));
-      __ sb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
-    } else {
-      __ sll(scratch, index, 1);
-      __ Addu(scratch, string, scratch);
-      __ sh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
-    }
+    __ sll(at, index, 1);
+    __ Addu(at, scratch, at);
+    __ sh(value, MemOperand(at));
   }
 }
 
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 38134f4..4dc8022 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1783,9 +1783,11 @@
 
 LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
   LOperand* string = UseRegister(instr->string());
-  LOperand* index = UseRegisterOrConstant(instr->index());
-  LOperand* value = UseRegister(instr->value());
-  return new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+  LOperand* index = UseRegister(instr->index());
+  LOperand* value = UseTempRegister(instr->value());
+  LSeqStringSetChar* result =
+      new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+  return DefineAsRegister(result);
 }
 
 
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 97bd872..58452ca 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -374,26 +374,30 @@
                                             Register receiver,
                                             Register scratch1,
                                             Register scratch2,
-                                            Label* miss) {
+                                            Label* miss,
+                                            bool support_wrappers) {
   Label check_wrapper;
 
   // Check if the object is a string leaving the instance type in the
   // scratch1 register.
-  GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
+  GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
+                      support_wrappers ? &check_wrapper : miss);
 
   // Load length directly from the string.
   __ Ret(USE_DELAY_SLOT);
   __ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
 
-  // Check if the object is a JSValue wrapper.
-  __ bind(&check_wrapper);
-  __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
+  if (support_wrappers) {
+    // Check if the object is a JSValue wrapper.
+    __ bind(&check_wrapper);
+    __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
 
-  // Unwrap the value and check if the wrapped value is a string.
-  __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
-  GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
-  __ Ret(USE_DELAY_SLOT);
-  __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
+    // Unwrap the value and check if the wrapped value is a string.
+    __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+    GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+    __ Ret(USE_DELAY_SLOT);
+    __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
+  }
 }
 
 
diff --git a/src/object-observe.js b/src/object-observe.js
index b09c42d..1035792 100644
--- a/src/object-observe.js
+++ b/src/object-observe.js
@@ -284,6 +284,11 @@
       arg.length < 0)
     return false;
 
+  var length = arg.length;
+  for (var i = 0; i < length; i++) {
+    if (!IS_STRING(arg[i]))
+      return false;
+  }
   return true;
 }
 
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index acb00da..3716df1 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -330,11 +330,10 @@
     }
   }
 
-  // If a GC was caused while constructing this object, the elements
-  // pointer may point to a one pointer filler map.
-  if ((FLAG_use_gvn && FLAG_use_allocation_folding) ||
-      (reinterpret_cast<Map*>(elements()) !=
-      GetHeap()->one_pointer_filler_map())) {
+  // TODO(hpayer): deal gracefully with partially constructed JSObjects, when
+  // allocation folding is turned off.
+  if (reinterpret_cast<Map*>(elements()) !=
+      GetHeap()->one_pointer_filler_map()) {
     CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
               (elements() == GetHeap()->empty_fixed_array())),
              (elements()->map() == GetHeap()->fixed_array_map() ||
@@ -684,11 +683,10 @@
 void JSArray::JSArrayVerify() {
   JSObjectVerify();
   CHECK(length()->IsNumber() || length()->IsUndefined());
-  // If a GC was caused while constructing this array, the elements
-  // pointer may point to a one pointer filler map.
-  if ((FLAG_use_gvn && FLAG_use_allocation_folding) ||
-      (reinterpret_cast<Map*>(elements()) !=
-      GetHeap()->one_pointer_filler_map())) {
+  // TODO(hpayer): deal gracefully with partially constructed JSObjects, when
+  // allocation folding is turned off.
+  if (reinterpret_cast<Map*>(elements()) !=
+      GetHeap()->one_pointer_filler_map()) {
     CHECK(elements()->IsUndefined() ||
           elements()->IsFixedArray() ||
           elements()->IsFixedDoubleArray());
diff --git a/src/objects.cc b/src/objects.cc
index bb78a8e..e37f6d1 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -5635,78 +5635,71 @@
 }
 
 
-// TODO(mstarzinger): Temporary wrapper until handlified.
-static Handle<Object> NewStorageFor(Isolate* isolate,
-                                    Handle<Object> object,
-                                    Representation representation) {
-  Heap* heap = isolate->heap();
-  CALL_HEAP_FUNCTION(isolate,
-                     object->AllocateNewStorageFor(heap, representation),
-                     Object);
-}
-
-
-Handle<JSObject> JSObject::Copy(Handle<JSObject> object) {
-  Isolate* isolate = object->GetIsolate();
-  CALL_HEAP_FUNCTION(isolate,
-                     isolate->heap()->CopyJSObject(*object), JSObject);
-}
-
-
-Handle<JSObject> JSObject::DeepCopy(Handle<JSObject> object) {
-  Isolate* isolate = object->GetIsolate();
+MUST_USE_RESULT MaybeObject* JSObject::DeepCopy(Isolate* isolate) {
   StackLimitCheck check(isolate);
-  if (check.HasOverflowed()) {
-    isolate->StackOverflow();
-    return Handle<JSObject>::null();
+  if (check.HasOverflowed()) return isolate->StackOverflow();
+
+  if (map()->is_deprecated()) {
+    MaybeObject* maybe_failure = MigrateInstance();
+    if (maybe_failure->IsFailure()) return maybe_failure;
   }
 
-  if (object->map()->is_deprecated()) {
-    MigrateInstance(object);
+  Heap* heap = isolate->heap();
+  Object* result;
+  { MaybeObject* maybe_result = heap->CopyJSObject(this);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-
-  Handle<JSObject> copy = Copy(object);
+  JSObject* copy = JSObject::cast(result);
 
   // Deep copy local properties.
   if (copy->HasFastProperties()) {
-    Handle<DescriptorArray> descriptors(copy->map()->instance_descriptors());
+    DescriptorArray* descriptors = copy->map()->instance_descriptors();
     int limit = copy->map()->NumberOfOwnDescriptors();
     for (int i = 0; i < limit; i++) {
       PropertyDetails details = descriptors->GetDetails(i);
       if (details.type() != FIELD) continue;
       int index = descriptors->GetFieldIndex(i);
-      Handle<Object> value(object->RawFastPropertyAt(index), isolate);
+      Object* value = RawFastPropertyAt(index);
       if (value->IsJSObject()) {
-        value = DeepCopy(Handle<JSObject>::cast(value));
-        RETURN_IF_EMPTY_HANDLE_VALUE(isolate, value, Handle<JSObject>());
+        JSObject* js_object = JSObject::cast(value);
+        MaybeObject* maybe_copy = js_object->DeepCopy(isolate);
+        if (!maybe_copy->To(&value)) return maybe_copy;
       } else {
         Representation representation = details.representation();
-        value = NewStorageFor(isolate, value, representation);
+        MaybeObject* maybe_storage =
+            value->AllocateNewStorageFor(heap, representation);
+        if (!maybe_storage->To(&value)) return maybe_storage;
       }
-      copy->FastPropertyAtPut(index, *value);
+      copy->FastPropertyAtPut(index, value);
     }
   } else {
-    Handle<FixedArray> names =
-        isolate->factory()->NewFixedArray(copy->NumberOfLocalProperties());
-    copy->GetLocalPropertyNames(*names, 0);
+    { MaybeObject* maybe_result =
+          heap->AllocateFixedArray(copy->NumberOfLocalProperties());
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+    FixedArray* names = FixedArray::cast(result);
+    copy->GetLocalPropertyNames(names, 0);
     for (int i = 0; i < names->length(); i++) {
       ASSERT(names->get(i)->IsString());
-      Handle<String> key_string(String::cast(names->get(i)));
+      String* key_string = String::cast(names->get(i));
       PropertyAttributes attributes =
-          copy->GetLocalPropertyAttribute(*key_string);
+          copy->GetLocalPropertyAttribute(key_string);
       // Only deep copy fields from the object literal expression.
       // In particular, don't try to copy the length attribute of
       // an array.
       if (attributes != NONE) continue;
-      Handle<Object> value(
-          copy->GetProperty(*key_string, &attributes)->ToObjectUnchecked(),
-          isolate);
+      Object* value =
+          copy->GetProperty(key_string, &attributes)->ToObjectUnchecked();
       if (value->IsJSObject()) {
-        Handle<Object> result = DeepCopy(Handle<JSObject>::cast(value));
-        RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
-        // Creating object copy for literals. No strict mode needed.
-        CHECK_NOT_EMPTY_HANDLE(isolate, SetProperty(
-            copy, key_string, result, NONE, kNonStrictMode));
+        JSObject* js_object = JSObject::cast(value);
+        { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
+          if (!maybe_result->ToObject(&result)) return maybe_result;
+        }
+        { MaybeObject* maybe_result =
+              // Creating object copy for literals. No strict mode needed.
+              copy->SetProperty(key_string, result, NONE, kNonStrictMode);
+          if (!maybe_result->ToObject(&result)) return maybe_result;
+        }
       }
     }
   }
@@ -5719,8 +5712,8 @@
     case FAST_ELEMENTS:
     case FAST_HOLEY_SMI_ELEMENTS:
     case FAST_HOLEY_ELEMENTS: {
-      Handle<FixedArray> elements(FixedArray::cast(copy->elements()));
-      if (elements->map() == isolate->heap()->fixed_cow_array_map()) {
+      FixedArray* elements = FixedArray::cast(copy->elements());
+      if (elements->map() == heap->fixed_cow_array_map()) {
         isolate->counters()->cow_arrays_created_runtime()->Increment();
 #ifdef DEBUG
         for (int i = 0; i < elements->length(); i++) {
@@ -5729,31 +5722,34 @@
 #endif
       } else {
         for (int i = 0; i < elements->length(); i++) {
-          Handle<Object> value(elements->get(i), isolate);
+          Object* value = elements->get(i);
           ASSERT(value->IsSmi() ||
                  value->IsTheHole() ||
                  (IsFastObjectElementsKind(copy->GetElementsKind())));
           if (value->IsJSObject()) {
-            Handle<Object> result = DeepCopy(Handle<JSObject>::cast(value));
-            RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
-            elements->set(i, *result);
+            JSObject* js_object = JSObject::cast(value);
+            { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
+              if (!maybe_result->ToObject(&result)) return maybe_result;
+            }
+            elements->set(i, result);
           }
         }
       }
       break;
     }
     case DICTIONARY_ELEMENTS: {
-      Handle<SeededNumberDictionary> element_dictionary(
-          copy->element_dictionary());
+      SeededNumberDictionary* element_dictionary = copy->element_dictionary();
       int capacity = element_dictionary->Capacity();
       for (int i = 0; i < capacity; i++) {
         Object* k = element_dictionary->KeyAt(i);
         if (element_dictionary->IsKey(k)) {
-          Handle<Object> value(element_dictionary->ValueAt(i), isolate);
+          Object* value = element_dictionary->ValueAt(i);
           if (value->IsJSObject()) {
-            Handle<Object> result = DeepCopy(Handle<JSObject>::cast(value));
-            RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
-            element_dictionary->ValueAtPut(i, *result);
+            JSObject* js_object = JSObject::cast(value);
+            { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
+              if (!maybe_result->ToObject(&result)) return maybe_result;
+            }
+            element_dictionary->ValueAtPut(i, result);
           }
         }
       }
@@ -10436,8 +10432,8 @@
 }
 
 
-void Code::MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate) {
-  PatchPlatformCodeAge(isolate, sequence, kNoAge, NO_MARKING_PARITY);
+void Code::MakeCodeAgeSequenceYoung(byte* sequence) {
+  PatchPlatformCodeAge(sequence, kNoAge, NO_MARKING_PARITY);
 }
 
 
@@ -10448,9 +10444,7 @@
     MarkingParity code_parity;
     GetCodeAgeAndParity(sequence, &age, &code_parity);
     if (age != kLastCodeAge && code_parity != current_parity) {
-      PatchPlatformCodeAge(GetIsolate(),
-                           sequence,
-                           static_cast<Age>(age + 1),
+      PatchPlatformCodeAge(sequence, static_cast<Age>(age + 1),
                            current_parity);
     }
   }
@@ -10513,7 +10507,8 @@
 }
 
 
-Code* Code::GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity) {
+Code* Code::GetCodeAgeStub(Age age, MarkingParity parity) {
+  Isolate* isolate = Isolate::Current();
   Builtins* builtins = isolate->builtins();
   switch (age) {
 #define HANDLE_CODE_AGE(AGE)                                            \
diff --git a/src/objects.h b/src/objects.h
index 9ddf10f..d3593b6 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -2630,9 +2630,8 @@
   // Called the first time an object is observed with ES7 Object.observe.
   MUST_USE_RESULT MaybeObject* SetObserved(Isolate* isolate);
 
-  // Copy object.
-  static Handle<JSObject> Copy(Handle<JSObject> object);
-  static Handle<JSObject> DeepCopy(Handle<JSObject> object);
+  // Copy object
+  MUST_USE_RESULT MaybeObject* DeepCopy(Isolate* isolate);
 
   // Dispatched behavior.
   void JSObjectShortPrint(StringStream* accumulator);
@@ -5155,7 +5154,7 @@
   // being entered through the prologue.  Used to determine when it is
   // relatively safe to flush this code object and replace it with the lazy
   // compilation stub.
-  static void MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate);
+  static void MakeCodeAgeSequenceYoung(byte* sequence);
   void MakeOlder(MarkingParity);
   static bool IsYoungSequence(byte* sequence);
   bool IsOld();
@@ -5301,11 +5300,10 @@
                                   MarkingParity* parity);
   static void GetCodeAgeAndParity(byte* sequence, Age* age,
                                   MarkingParity* parity);
-  static Code* GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity);
+  static Code* GetCodeAgeStub(Age age, MarkingParity parity);
 
   // Code aging -- platform-specific
-  static void PatchPlatformCodeAge(Isolate* isolate,
-                                   byte* sequence, Age age,
+  static void PatchPlatformCodeAge(byte* sequence, Age age,
                                    MarkingParity parity);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 5903438..4d3b1e3 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -205,6 +205,12 @@
 }
 
 
+int OS::StackWalk(Vector<OS::StackFrame> frames) {
+  // Not supported on Cygwin.
+  return 0;
+}
+
+
 // The VirtualMemory implementation is taken from platform-win32.cc.
 // The mmap-based virtual memory implementation as it is used on most posix
 // platforms does not work well because Cygwin does not support MAP_FIXED.
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 518ad31..d818278 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -199,6 +199,10 @@
 }
 
 
+int OS::StackWalk(Vector<OS::StackFrame> frames) {
+  return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
+}
+
 
 // Constants used for mmap.
 static const int kMmapFd = -1;
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 74d473f..b8b9602 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -313,6 +313,16 @@
 }
 
 
+int OS::StackWalk(Vector<OS::StackFrame> frames) {
+  // backtrace is a glibc extension.
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
+  return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
+#else
+  return 0;
+#endif
+}
+
+
 // Constants used for mmap.
 static const int kMmapFd = -1;
 static const int kMmapFdOffset = 0;
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index a58bc1a..67cc96f 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -220,6 +220,14 @@
 }
 
 
+int OS::StackWalk(Vector<StackFrame> frames) {
+  // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
+  if (backtrace == NULL) return 0;
+
+  return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
+}
+
+
 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 4f5420e..30a484f 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -231,6 +231,34 @@
 }
 
 
+int OS::StackWalk(Vector<OS::StackFrame> frames) {
+  // backtrace is a glibc extension.
+  int frames_size = frames.length();
+  ScopedVector<void*> addresses(frames_size);
+
+  int frames_count = backtrace(addresses.start(), frames_size);
+
+  char** symbols = backtrace_symbols(addresses.start(), frames_count);
+  if (symbols == NULL) {
+    return kStackWalkError;
+  }
+
+  for (int i = 0; i < frames_count; i++) {
+    frames[i].address = addresses[i];
+    // Format a text representation of the frame based on the information
+    // available.
+    SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
+             "%s",
+             symbols[i]);
+    // Make sure line termination is in place.
+    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
+  }
+
+  free(symbols);
+
+  return frames_count;
+}
+
 
 // Constants used for mmap.
 static const int kMmapFd = -1;
diff --git a/src/platform-posix.h b/src/platform-posix.h
index e0fbc0c..6b73387 100644
--- a/src/platform-posix.h
+++ b/src/platform-posix.h
@@ -39,6 +39,7 @@
 namespace internal {
 
 // Used by platform implementation files during OS::DumpBacktrace()
+// and OS::StackWalk().
 template<int (*backtrace)(void**, int),
          char** (*backtrace_symbols)(void* const*, int)>
 struct POSIXBacktraceHelper {
@@ -72,6 +73,32 @@
     fflush(stderr);
     free(symbols);
   }
+
+  static int StackWalk(Vector<OS::StackFrame> frames) {
+    int frames_size = frames.length();
+    ScopedVector<void*> addresses(frames_size);
+
+    int frames_count = backtrace(addresses.start(), frames_size);
+
+    char** symbols = backtrace_symbols(addresses.start(), frames_count);
+    if (symbols == NULL) {
+      return OS::kStackWalkError;
+    }
+
+    for (int i = 0; i < frames_count; i++) {
+      frames[i].address = addresses[i];
+      // Format a text representation of the frame based on the information
+      // available.
+      OS::SNPrintF(MutableCStrVector(frames[i].text, OS::kStackWalkMaxTextLen),
+                   "%s", symbols[i]);
+      // Make sure line termination is in place.
+      frames[i].text[OS::kStackWalkMaxTextLen - 1] = '\0';
+    }
+
+    free(symbols);
+
+    return frames_count;
+  }
 };
 
 } }  // namespace v8::internal
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index df81c3a..f082af1 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -211,6 +211,20 @@
 }
 
 
+int OS::StackWalk(Vector<OS::StackFrame> frames) {
+  ucontext_t ctx;
+  struct StackWalker walker = { frames, 0 };
+
+  if (getcontext(&ctx) < 0) return kStackWalkError;
+
+  if (!walkcontext(&ctx, StackWalkCallback, &walker)) {
+    return kStackWalkError;
+  }
+
+  return walker.index;
+}
+
+
 // Constants used for mmap.
 static const int kMmapFd = -1;
 static const int kMmapFdOffset = 0;
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 073b21a..ea4f7ea 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1208,9 +1208,133 @@
 }
 
 
+// Walk the stack using the facilities in dbghelp.dll and tlhelp32.dll
+
+// Switch off warning 4748 (/GS can not protect parameters and local variables
+// from local buffer overrun because optimizations are disabled in function) as
+// it is triggered by the use of inline assembler.
+#pragma warning(push)
+#pragma warning(disable : 4748)
+int OS::StackWalk(Vector<OS::StackFrame> frames) {
+  BOOL ok;
+
+  // Load the required functions from DLL's.
+  if (!LoadDbgHelpAndTlHelp32()) return kStackWalkError;
+
+  // Get the process and thread handles.
+  HANDLE process_handle = GetCurrentProcess();
+  HANDLE thread_handle = GetCurrentThread();
+
+  // Read the symbols.
+  if (!LoadSymbols(Isolate::Current(), process_handle)) return kStackWalkError;
+
+  // Capture current context.
+  CONTEXT context;
+  RtlCaptureContext(&context);
+
+  // Initialize the stack walking
+  STACKFRAME64 stack_frame;
+  memset(&stack_frame, 0, sizeof(stack_frame));
+#ifdef  _WIN64
+  stack_frame.AddrPC.Offset = context.Rip;
+  stack_frame.AddrFrame.Offset = context.Rbp;
+  stack_frame.AddrStack.Offset = context.Rsp;
+#else
+  stack_frame.AddrPC.Offset = context.Eip;
+  stack_frame.AddrFrame.Offset = context.Ebp;
+  stack_frame.AddrStack.Offset = context.Esp;
+#endif
+  stack_frame.AddrPC.Mode = AddrModeFlat;
+  stack_frame.AddrFrame.Mode = AddrModeFlat;
+  stack_frame.AddrStack.Mode = AddrModeFlat;
+  int frames_count = 0;
+
+  // Collect stack frames.
+  int frames_size = frames.length();
+  while (frames_count < frames_size) {
+    ok = _StackWalk64(
+        IMAGE_FILE_MACHINE_I386,    // MachineType
+        process_handle,             // hProcess
+        thread_handle,              // hThread
+        &stack_frame,               // StackFrame
+        &context,                   // ContextRecord
+        NULL,                       // ReadMemoryRoutine
+        _SymFunctionTableAccess64,  // FunctionTableAccessRoutine
+        _SymGetModuleBase64,        // GetModuleBaseRoutine
+        NULL);                      // TranslateAddress
+    if (!ok) break;
+
+    // Store the address.
+    ASSERT((stack_frame.AddrPC.Offset >> 32) == 0);  // 32-bit address.
+    frames[frames_count].address =
+        reinterpret_cast<void*>(stack_frame.AddrPC.Offset);
+
+    // Try to locate a symbol for this frame.
+    DWORD64 symbol_displacement;
+    SmartArrayPointer<IMAGEHLP_SYMBOL64> symbol(
+        NewArray<IMAGEHLP_SYMBOL64>(kStackWalkMaxNameLen));
+    if (symbol.is_empty()) return kStackWalkError;  // Out of memory.
+    memset(*symbol, 0, sizeof(IMAGEHLP_SYMBOL64) + kStackWalkMaxNameLen);
+    (*symbol)->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
+    (*symbol)->MaxNameLength = kStackWalkMaxNameLen;
+    ok = _SymGetSymFromAddr64(process_handle,             // hProcess
+                              stack_frame.AddrPC.Offset,  // Address
+                              &symbol_displacement,       // Displacement
+                              *symbol);                   // Symbol
+    if (ok) {
+      // Try to locate more source information for the symbol.
+      IMAGEHLP_LINE64 Line;
+      memset(&Line, 0, sizeof(Line));
+      Line.SizeOfStruct = sizeof(Line);
+      DWORD line_displacement;
+      ok = _SymGetLineFromAddr64(
+          process_handle,             // hProcess
+          stack_frame.AddrPC.Offset,  // dwAddr
+          &line_displacement,         // pdwDisplacement
+          &Line);                     // Line
+      // Format a text representation of the frame based on the information
+      // available.
+      if (ok) {
+        SNPrintF(MutableCStrVector(frames[frames_count].text,
+                                   kStackWalkMaxTextLen),
+                 "%s %s:%d:%d",
+                 (*symbol)->Name, Line.FileName, Line.LineNumber,
+                 line_displacement);
+      } else {
+        SNPrintF(MutableCStrVector(frames[frames_count].text,
+                                   kStackWalkMaxTextLen),
+                 "%s",
+                 (*symbol)->Name);
+      }
+      // Make sure line termination is in place.
+      frames[frames_count].text[kStackWalkMaxTextLen - 1] = '\0';
+    } else {
+      // No text representation of this frame
+      frames[frames_count].text[0] = '\0';
+
+      // Continue if we are just missing a module (for non C/C++ frames a
+      // module will never be found).
+      int err = GetLastError();
+      if (err != ERROR_MOD_NOT_FOUND) {
+        break;
+      }
+    }
+
+    frames_count++;
+  }
+
+  // Return the number of frames filled in.
+  return frames_count;
+}
+
+
+// Restore warnings to previous settings.
+#pragma warning(pop)
+
 #else  // __MINGW32__
 void OS::LogSharedLibraryAddresses(Isolate* isolate) { }
 void OS::SignalCodeMovingGC() { }
+int OS::StackWalk(Vector<OS::StackFrame> frames) { return 0; }
 #endif  // __MINGW32__
 
 
diff --git a/src/platform.h b/src/platform.h
index aa50cb4..ee8fb92 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -264,6 +264,8 @@
     char text[kStackWalkMaxTextLen];
   };
 
+  static int StackWalk(Vector<StackFrame> frames);
+
   class MemoryMappedFile {
    public:
     static MemoryMappedFile* open(const char* name);
diff --git a/src/runtime.cc b/src/runtime.cc
index a698445..c09fb1d 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -499,10 +499,7 @@
     // Update the functions literal and return the boilerplate.
     literals->set(literals_index, *boilerplate);
   }
-
-  Handle<Object> copy = JSObject::DeepCopy(Handle<JSObject>::cast(boilerplate));
-  RETURN_IF_EMPTY_HANDLE(isolate, copy);
-  return *copy;
+  return JSObject::cast(*boilerplate)->DeepCopy(isolate);
 }
 
 
@@ -567,10 +564,8 @@
       literals_index, elements);
   RETURN_IF_EMPTY_HANDLE(isolate, site);
 
-  Handle<JSObject> boilerplate(JSObject::cast(site->transition_info()));
-  Handle<JSObject> copy = JSObject::DeepCopy(boilerplate);
-  RETURN_IF_EMPTY_HANDLE(isolate, copy);
-  return *copy;
+  JSObject* boilerplate = JSObject::cast(site->transition_info());
+  return boilerplate->DeepCopy(isolate);
 }
 
 
@@ -14791,7 +14786,8 @@
 }
 
 
-void Runtime::PerformGC(Object* result, Isolate* isolate) {
+void Runtime::PerformGC(Object* result) {
+  Isolate* isolate = Isolate::Current();
   Failure* failure = Failure::cast(result);
   if (failure->IsRetryAfterGC()) {
     if (isolate->heap()->new_space()->AddFreshPage()) {
diff --git a/src/runtime.h b/src/runtime.h
index 959d13f..60c6677 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -838,7 +838,7 @@
       JSArrayBuffer* phantom_array_buffer);
 
   // Helper functions used stubs.
-  static void PerformGC(Object* result, Isolate* isolate);
+  static void PerformGC(Object* result);
 
   // Used in runtime.cc and hydrogen's VisitArrayLiteral.
   static Handle<Object> CreateArrayLiteralBoilerplate(
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 16028d8..63cb42b 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -572,7 +572,8 @@
                                        Register receiver,
                                        Register scratch1,
                                        Register scratch2,
-                                       Label* miss_label);
+                                       Label* miss_label,
+                                       bool support_wrappers);
 
   static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
                                             Register receiver,
diff --git a/src/unique.h b/src/unique.h
index 38cc336..7ae704a 100644
--- a/src/unique.h
+++ b/src/unique.h
@@ -64,10 +64,6 @@
     handle_ = handle;
   }
 
-  // TODO(titzer): this is a hack to migrate to Unique<T> incrementally.
-  Unique(Address raw_address, Handle<T> handle)
-    : raw_address_(raw_address), handle_(handle) { }
-
   // Constructor for handling automatic up casting.
   // Ex. Unique<JSFunction> can be passed when Unique<Object> is expected.
   template <class S> Unique(Unique<S> uniq) {
@@ -142,7 +138,7 @@
   }
 
   // Compare this set against another set. O(|this|).
-  bool Equals(UniqueSet<T>* that) const {
+  bool Equals(UniqueSet<T>* that) {
     if (that->size_ != this->size_) return false;
     for (int i = 0; i < this->size_; i++) {
       if (this->array_[i] != that->array_[i]) return false;
@@ -150,17 +146,8 @@
     return true;
   }
 
-  template <typename U>
-  bool Contains(Unique<U> elem) const {
-    // TODO(titzer): use binary search for larger sets.
-    for (int i = 0; i < size_; i++) {
-      if (this->array_[i] == elem) return true;
-    }
-    return false;
-  }
-
   // Check if this set is a subset of the given set. O(|this| + |that|).
-  bool IsSubset(UniqueSet<T>* that) const {
+  bool IsSubset(UniqueSet<T>* that) {
     if (that->size_ < this->size_) return false;
     int j = 0;
     for (int i = 0; i < this->size_; i++) {
@@ -176,7 +163,7 @@
 
   // Returns a new set representing the intersection of this set and the other.
   // O(|this| + |that|).
-  UniqueSet<T>* Intersect(UniqueSet<T>* that, Zone* zone) const {
+  UniqueSet<T>* Intersect(UniqueSet<T>* that, Zone* zone) {
     if (that->size_ == 0 || this->size_ == 0) return new(zone) UniqueSet<T>();
 
     UniqueSet<T>* out = new(zone) UniqueSet<T>();
@@ -203,7 +190,7 @@
 
   // Returns a new set representing the union of this set and the other.
   // O(|this| + |that|).
-  UniqueSet<T>* Union(UniqueSet<T>* that, Zone* zone) const {
+  UniqueSet<T>* Union(UniqueSet<T>* that, Zone* zone) {
     if (that->size_ == 0) return this->Copy(zone);
     if (this->size_ == 0) return that->Copy(zone);
 
@@ -235,7 +222,7 @@
   }
 
   // Makes an exact copy of this set. O(|this| + |that|).
-  UniqueSet<T>* Copy(Zone* zone) const {
+  UniqueSet<T>* Copy(Zone* zone) {
     UniqueSet<T>* copy = new(zone) UniqueSet<T>();
     copy->size_ = this->size_;
     copy->capacity_ = this->size_;
@@ -244,15 +231,10 @@
     return copy;
   }
 
-  inline int size() const {
+  inline int size() {
     return size_;
   }
 
-  inline Unique<T> at(int index) const {
-    ASSERT(index >= 0 && index < size_);
-    return array_[index];
-  }
-
  private:
   // These sets should be small, since operations are implemented with simple
   // linear algorithms. Enforce a maximum size.
diff --git a/src/version.cc b/src/version.cc
index 5d04816..35f7889 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -35,7 +35,7 @@
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     21
 #define BUILD_NUMBER      16
-#define PATCH_LEVEL       0
+#define PATCH_LEVEL       1
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 20895f9..81721c2 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -600,8 +600,6 @@
   // the stub returns.
   __ subq(Operand(rsp, 0), Immediate(5));
   __ Pushad();
-  __ movq(arg_reg_2,
-          ExternalReference::isolate_address(masm->isolate()));
   __ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
   {  // NOLINT
     FrameScope scope(masm, StackFrame::MANUAL);
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index d1130ad..51e1a53 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1009,7 +1009,7 @@
   __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
   // Convert, convert back, and compare the two doubles' bits.
   __ cvttsd2siq(scratch2, xmm0);
-  __ Cvtlsi2sd(xmm1, scratch2);
+  __ cvtlsi2sd(xmm1, scratch2);
   __ movq(scratch1, xmm0);
   __ movq(scratch2, xmm1);
   __ cmpq(scratch1, scratch2);
@@ -1145,7 +1145,7 @@
     // Then load the bits of the double into rbx.
     __ SmiToInteger32(rax, rax);
     __ subq(rsp, Immediate(kDoubleSize));
-    __ Cvtlsi2sd(xmm1, rax);
+    __ cvtlsi2sd(xmm1, rax);
     __ movsd(Operand(rsp, 0), xmm1);
     __ movq(rbx, xmm1);
     __ movq(rdx, xmm1);
@@ -1477,9 +1477,9 @@
 
 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
   __ SmiToInteger32(kScratchRegister, rdx);
-  __ Cvtlsi2sd(xmm0, kScratchRegister);
+  __ cvtlsi2sd(xmm0, kScratchRegister);
   __ SmiToInteger32(kScratchRegister, rax);
-  __ Cvtlsi2sd(xmm1, kScratchRegister);
+  __ cvtlsi2sd(xmm1, kScratchRegister);
 }
 
 
@@ -1503,12 +1503,12 @@
 
   __ bind(&load_smi_rdx);
   __ SmiToInteger32(kScratchRegister, rdx);
-  __ Cvtlsi2sd(xmm0, kScratchRegister);
+  __ cvtlsi2sd(xmm0, kScratchRegister);
   __ JumpIfNotSmi(rax, &load_nonsmi_rax);
 
   __ bind(&load_smi_rax);
   __ SmiToInteger32(kScratchRegister, rax);
-  __ Cvtlsi2sd(xmm1, kScratchRegister);
+  __ cvtlsi2sd(xmm1, kScratchRegister);
   __ bind(&done);
 }
 
@@ -1541,7 +1541,7 @@
   __ cvttsd2siq(smi_result, xmm0);
   // Check if conversion was successful by converting back and
   // comparing to the original double's bits.
-  __ Cvtlsi2sd(xmm1, smi_result);
+  __ cvtlsi2sd(xmm1, smi_result);
   __ movq(kScratchRegister, xmm1);
   __ cmpq(scratch2, kScratchRegister);
   __ j(not_equal, on_not_smis);
@@ -1560,7 +1560,7 @@
   __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
   __ movq(scratch2, xmm0);
   __ cvttsd2siq(smi_result, xmm0);
-  __ Cvtlsi2sd(xmm1, smi_result);
+  __ cvtlsi2sd(xmm1, smi_result);
   __ movq(kScratchRegister, xmm1);
   __ cmpq(scratch2, kScratchRegister);
   __ j(not_equal, on_not_smis);
@@ -1603,7 +1603,7 @@
 
   // Save 1 in double_result - we need this several times later on.
   __ movq(scratch, Immediate(1));
-  __ Cvtlsi2sd(double_result, scratch);
+  __ cvtlsi2sd(double_result, scratch);
 
   if (exponent_type_ == ON_STACK) {
     Label base_is_smi, unpack_exponent;
@@ -1623,7 +1623,7 @@
 
     __ bind(&base_is_smi);
     __ SmiToInteger32(base, base);
-    __ Cvtlsi2sd(double_base, base);
+    __ cvtlsi2sd(double_base, base);
     __ bind(&unpack_exponent);
 
     __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -1812,7 +1812,7 @@
   // and may not have contained the exponent value in the first place when the
   // input was a smi.  We reset it with exponent value before bailing out.
   __ j(not_equal, &done);
-  __ Cvtlsi2sd(double_exponent, exponent);
+  __ cvtlsi2sd(double_exponent, exponent);
 
   // Returning or bailing out.
   Counters* counters = masm->isolate()->counters();
@@ -1902,7 +1902,8 @@
     receiver = rax;
   }
 
-  StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss);
+  StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss,
+                                         support_wrapper_);
   __ bind(&miss);
   StubCompiler::TailCallBuiltin(
       masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
@@ -3618,7 +3619,6 @@
     // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
     // stack is known to be aligned. This function takes one argument which is
     // passed in register.
-    __ movq(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
     __ movq(arg_reg_1, rax);
     __ movq(kScratchRegister,
             ExternalReference::perform_gc_function(masm->isolate()));
@@ -5376,7 +5376,7 @@
   __ jmp(&left, Label::kNear);
   __ bind(&right_smi);
   __ SmiToInteger32(rcx, rax);  // Can't clobber rax yet.
-  __ Cvtlsi2sd(xmm1, rcx);
+  __ cvtlsi2sd(xmm1, rcx);
 
   __ bind(&left);
   __ JumpIfSmi(rdx, &left_smi, Label::kNear);
@@ -5386,7 +5386,7 @@
   __ jmp(&done);
   __ bind(&left_smi);
   __ SmiToInteger32(rcx, rdx);  // Can't clobber rdx yet.
-  __ Cvtlsi2sd(xmm0, rcx);
+  __ cvtlsi2sd(xmm0, rcx);
 
   __ bind(&done);
   // Compare operands
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index b3f4eaf..24773c2 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -386,7 +386,7 @@
   // rbx: current element (smi-tagged)
   __ JumpIfNotSmi(rbx, &convert_hole);
   __ SmiToInteger32(rbx, rbx);
-  __ Cvtlsi2sd(xmm0, rbx);
+  __ cvtlsi2sd(xmm0, rbx);
   __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
            xmm0);
   __ jmp(&entry);
@@ -723,8 +723,7 @@
 }
 
 
-void Code::PatchPlatformCodeAge(Isolate* isolate,
-                                byte* sequence,
+void Code::PatchPlatformCodeAge(byte* sequence,
                                 Code::Age age,
                                 MarkingParity parity) {
   uint32_t young_length;
@@ -733,7 +732,7 @@
     CopyBytes(sequence, young_sequence, young_length);
     CPU::FlushICache(sequence, young_length);
   } else {
-    Code* stub = GetCodeAgeStub(isolate, age, parity);
+    Code* stub = GetCodeAgeStub(age, parity);
     CodePatcher patcher(sequence, young_length);
     patcher.masm()->call(stub->instruction_start());
     for (int i = 0;
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 6f4d3e3..9dca6b3 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -3520,7 +3520,7 @@
     __ bind(&negative_sign);
     // Truncate, then compare and compensate.
     __ cvttsd2si(output_reg, input_reg);
-    __ Cvtlsi2sd(xmm_scratch, output_reg);
+    __ cvtlsi2sd(xmm_scratch, output_reg);
     __ ucomisd(input_reg, xmm_scratch);
     __ j(equal, &done, Label::kNear);
     __ subl(output_reg, Immediate(1));
@@ -3569,7 +3569,7 @@
   __ RecordComment("D2I conversion overflow");
   DeoptimizeIf(equal, instr->environment());
 
-  __ Cvtlsi2sd(xmm_scratch, output_reg);
+  __ cvtlsi2sd(xmm_scratch, output_reg);
   __ ucomisd(input_reg, xmm_scratch);
   __ j(equal, &restore, Label::kNear);
   __ subl(output_reg, Immediate(1));
@@ -4449,9 +4449,9 @@
   LOperand* output = instr->result();
   ASSERT(output->IsDoubleRegister());
   if (input->IsRegister()) {
-    __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
+    __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
   } else {
-    __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
+    __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
   }
 }
 
@@ -4623,7 +4623,7 @@
                                 bool deoptimize_on_minus_zero,
                                 LEnvironment* env,
                                 NumberUntagDMode mode) {
-  Label convert, load_smi, done;
+  Label load_smi, done;
 
   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
     // Smi check.
@@ -4632,17 +4632,25 @@
     // Heap number map check.
     __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
                    Heap::kHeapNumberMapRootIndex);
-
-    // On x64 it is safe to load at heap number offset before evaluating the map
-    // check, since all heap objects are at least two words long.
-    __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
-
-    if (can_convert_undefined_to_nan) {
-      __ j(not_equal, &convert);
-    } else {
+    if (!can_convert_undefined_to_nan) {
       DeoptimizeIf(not_equal, env);
-    }
+    } else {
+      Label heap_number, convert;
+      __ j(equal, &heap_number, Label::kNear);
 
+      // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
+      __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+      DeoptimizeIf(not_equal, env);
+
+      __ bind(&convert);
+      __ xorps(result_reg, result_reg);
+      __ divsd(result_reg, result_reg);
+      __ jmp(&done, Label::kNear);
+
+      __ bind(&heap_number);
+    }
+    // Heap number to XMM conversion.
+    __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
     if (deoptimize_on_minus_zero) {
       XMMRegister xmm_scratch = xmm0;
       __ xorps(xmm_scratch, xmm_scratch);
@@ -4653,18 +4661,6 @@
       DeoptimizeIf(not_zero, env);
     }
     __ jmp(&done, Label::kNear);
-
-    if (can_convert_undefined_to_nan) {
-      __ bind(&convert);
-
-      // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
-      __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
-      DeoptimizeIf(not_equal, env);
-
-      __ xorps(result_reg, result_reg);
-      __ divsd(result_reg, result_reg);
-      __ jmp(&done, Label::kNear);
-    }
   } else {
     ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
   }
@@ -4672,7 +4668,7 @@
   // Smi to XMM conversion
   __ bind(&load_smi);
   __ SmiToInteger32(kScratchRegister, input_reg);
-  __ Cvtlsi2sd(result_reg, kScratchRegister);
+  __ cvtlsi2sd(result_reg, kScratchRegister);
   __ bind(&done);
 }
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index fa8cf18..69abc54 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -936,18 +936,6 @@
 }
 
 
-void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
-  xorps(dst, dst);
-  cvtlsi2sd(dst, src);
-}
-
-
-void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
-  xorps(dst, dst);
-  cvtlsi2sd(dst, src);
-}
-
-
 void MacroAssembler::Set(Register dst, int64_t x) {
   if (x == 0) {
     xorl(dst, dst);
@@ -2929,7 +2917,7 @@
   // Value is a smi. convert to a double and store.
   // Preserve original value.
   SmiToInteger32(kScratchRegister, maybe_number);
-  Cvtlsi2sd(xmm_scratch, kScratchRegister);
+  cvtlsi2sd(xmm_scratch, kScratchRegister);
   movsd(FieldOperand(elements, index, times_8,
                      FixedDoubleArray::kHeaderSize - elements_offset),
         xmm_scratch);
@@ -3062,7 +3050,7 @@
                                Label* conversion_failed,
                                Label::Distance dst) {
   cvttsd2si(result_reg, input_reg);
-  Cvtlsi2sd(xmm0, result_reg);
+  cvtlsi2sd(xmm0, result_reg);
   ucomisd(xmm0, input_reg);
   j(not_equal, conversion_failed, dst);
   j(parity_even, conversion_failed, dst);  // NaN.
@@ -3099,7 +3087,7 @@
 
   movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
   cvttsd2si(result_reg, xmm0);
-  Cvtlsi2sd(temp, result_reg);
+  cvtlsi2sd(temp, result_reg);
   ucomisd(xmm0, temp);
   RecordComment("Deferred TaggedToI: lost precision");
   j(not_equal, lost_precision, dst);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index a1b04d5..09c8a80 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -784,12 +784,6 @@
   void Set(Register dst, int64_t x);
   void Set(const Operand& dst, int64_t x);
 
-  // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
-  // hinders register renaming and makes dependence chains longer. So we use
-  // xorps to clear the dst register before cvtsi2sd to solve this issue.
-  void Cvtlsi2sd(XMMRegister dst, Register src);
-  void Cvtlsi2sd(XMMRegister dst, const Operand& src);
-
   // Move if the registers are not identical.
   void Move(Register target, Register source);
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 6cd2487..95276d5 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -304,28 +304,32 @@
                                             Register receiver,
                                             Register scratch1,
                                             Register scratch2,
-                                            Label* miss) {
+                                            Label* miss,
+                                            bool support_wrappers) {
   Label check_wrapper;
 
   // Check if the object is a string leaving the instance type in the
   // scratch register.
-  GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
+  GenerateStringCheck(masm, receiver, scratch1, miss,
+                      support_wrappers ? &check_wrapper : miss);
 
   // Load length directly from the string.
   __ movq(rax, FieldOperand(receiver, String::kLengthOffset));
   __ ret(0);
 
-  // Check if the object is a JSValue wrapper.
-  __ bind(&check_wrapper);
-  __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
-  __ j(not_equal, miss);
+  if (support_wrappers) {
+    // Check if the object is a JSValue wrapper.
+    __ bind(&check_wrapper);
+    __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
+    __ j(not_equal, miss);
 
-  // Check if the wrapped value is a string and load the length
-  // directly if it is.
-  __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
-  GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
-  __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
-  __ ret(0);
+    // Check if the wrapped value is a string and load the length
+    // directly if it is.
+    __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+    GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+    __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
+    __ ret(0);
+  }
 }
 
 
@@ -838,7 +842,7 @@
 
     __ JumpIfNotSmi(value_reg, &heap_number);
     __ SmiToInteger32(scratch1, value_reg);
-    __ Cvtlsi2sd(xmm0, scratch1);
+    __ cvtlsi2sd(xmm0, scratch1);
     __ jmp(&do_store);
 
     __ bind(&heap_number);
@@ -992,7 +996,7 @@
     Label do_store, heap_number;
     __ JumpIfNotSmi(value_reg, &heap_number);
     __ SmiToInteger32(scratch2, value_reg);
-    __ Cvtlsi2sd(xmm0, scratch2);
+    __ cvtlsi2sd(xmm0, scratch2);
     __ jmp(&do_store);
 
     __ bind(&heap_number);
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 9e4e907..9d74011 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -814,7 +814,7 @@
   obj->SetElement(1, *second, NONE, kNonStrictMode)->ToObjectChecked();
 
   // Make the clone.
-  Handle<JSObject> clone = JSObject::Copy(obj);
+  Handle<JSObject> clone = Copy(obj);
   CHECK(!clone.is_identical_to(obj));
 
   CHECK_EQ(obj->GetElement(isolate, 0), clone->GetElement(isolate, 0));
diff --git a/test/cctest/test-unique.cc b/test/cctest/test-unique.cc
index d482a33..1d26858 100644
--- a/test/cctest/test-unique.cc
+++ b/test/cctest/test-unique.cc
@@ -146,74 +146,6 @@
 }
 
 
-TEST(UniqueSet_Contains) {
-  CcTest::InitializeVM();
-  Isolate* isolate = Isolate::Current();
-  Factory* factory = isolate->factory();
-  HandleScope sc(isolate);
-
-  Unique<String> A(factory->InternalizeUtf8String("A"));
-  Unique<String> B(factory->InternalizeUtf8String("B"));
-  Unique<String> C(factory->InternalizeUtf8String("C"));
-
-  Zone zone(isolate);
-
-  UniqueSet<String>* set = new(&zone) UniqueSet<String>();
-
-  CHECK_EQ(0, set->size());
-  set->Add(A, &zone);
-  CHECK(set->Contains(A));
-  CHECK(!set->Contains(B));
-  CHECK(!set->Contains(C));
-
-  set->Add(A, &zone);
-  CHECK(set->Contains(A));
-  CHECK(!set->Contains(B));
-  CHECK(!set->Contains(C));
-
-  set->Add(B, &zone);
-  CHECK(set->Contains(A));
-  CHECK(set->Contains(B));
-
-  set->Add(C, &zone);
-  CHECK(set->Contains(A));
-  CHECK(set->Contains(B));
-  CHECK(set->Contains(C));
-}
-
-
-TEST(UniqueSet_At) {
-  CcTest::InitializeVM();
-  Isolate* isolate = Isolate::Current();
-  Factory* factory = isolate->factory();
-  HandleScope sc(isolate);
-
-  Unique<String> A(factory->InternalizeUtf8String("A"));
-  Unique<String> B(factory->InternalizeUtf8String("B"));
-  Unique<String> C(factory->InternalizeUtf8String("C"));
-
-  Zone zone(isolate);
-
-  UniqueSet<String>* set = new(&zone) UniqueSet<String>();
-
-  CHECK_EQ(0, set->size());
-  set->Add(A, &zone);
-  CHECK(A == set->at(0));
-
-  set->Add(A, &zone);
-  CHECK(A == set->at(0));
-
-  set->Add(B, &zone);
-  CHECK(A == set->at(0) || B == set->at(0));
-  CHECK(A == set->at(1) || B == set->at(1));
-
-  set->Add(C, &zone);
-  CHECK(A == set->at(0) || B == set->at(0) || C == set->at(0));
-  CHECK(A == set->at(1) || B == set->at(1) || C == set->at(1));
-  CHECK(A == set->at(2) || B == set->at(2) || C == set->at(2));
-}
-
-
 template <class T>
 static void CHECK_SETS(
     UniqueSet<T>* set1, UniqueSet<T>* set2, bool expected) {
diff --git a/test/mjsunit/harmony/object-observe.js b/test/mjsunit/harmony/object-observe.js
index f982a66..75f0ff8 100644
--- a/test/mjsunit/harmony/object-observe.js
+++ b/test/mjsunit/harmony/object-observe.js
@@ -110,16 +110,14 @@
 
 
 // Object.observe
-assertThrows(function() { Object.observe("non-object", observer.callback); },
-             TypeError);
+assertThrows(function() { Object.observe("non-object", observer.callback); }, TypeError);
 assertThrows(function() { Object.observe(obj, nonFunction); }, TypeError);
 assertThrows(function() { Object.observe(obj, frozenFunction); }, TypeError);
-assertEquals(obj, Object.observe(obj, observer.callback, [1]));
-assertEquals(obj, Object.observe(obj, observer.callback, [true]));
-assertEquals(obj, Object.observe(obj, observer.callback, ['foo', null]));
-assertEquals(obj, Object.observe(obj, observer.callback, [undefined]));
-assertEquals(obj, Object.observe(obj, observer.callback,
-             ['foo', 'bar', 'baz']));
+assertThrows(function() { Object.observe(obj, function() {}, 1); }, TypeError);
+assertThrows(function() { Object.observe(obj, function() {}, [undefined]); }, TypeError);
+assertThrows(function() { Object.observe(obj, function() {}, [1]); }, TypeError);
+assertThrows(function() { Object.observe(obj, function() {}, ['foo', null]); }, TypeError);
+assertEquals(obj, Object.observe(obj, observer.callback, ['foo', 'bar', 'baz']));
 assertEquals(obj, Object.observe(obj, observer.callback, []));
 assertEquals(obj, Object.observe(obj, observer.callback, undefined));
 assertEquals(obj, Object.observe(obj, observer.callback));
@@ -204,25 +202,6 @@
   { object: obj, name: 'bar', type: 'deleted', expando2: 'str' }
 ]);
 
-// Non-string accept values are coerced to strings
-reset();
-Object.observe(obj, observer.callback, [true, 1, null, undefined]);
-notifier = Object.getNotifier(obj);
-notifier.notify({ type: 'true' });
-notifier.notify({ type: 'false' });
-notifier.notify({ type: '1' });
-notifier.notify({ type: '-1' });
-notifier.notify({ type: 'null' });
-notifier.notify({ type: 'nill' });
-notifier.notify({ type: 'undefined' });
-notifier.notify({ type: 'defined' });
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
-  { object: obj, type: 'true' },
-  { object: obj, type: '1' },
-  { object: obj, type: 'null' },
-  { object: obj, type: 'undefined' }
-]);
 
 // No delivery takes place if no records are pending
 reset();
@@ -328,7 +307,7 @@
 
 // Accept
 reset();
-Object.observe(obj, observer.callback, ['somethingElse']);
+Object.observe(obj, observer.callback, []);
 Object.getNotifier(obj).notify({
   type: 'new'
 });
@@ -1254,75 +1233,6 @@
   { object: array, name: '0', type: 'updated', oldValue: 2 },
 ]);
 
-// Splice emitted after Array mutation methods
-function MockArray(initial, observer) {
-  for (var i = 0; i < initial.length; i++)
-    this[i] = initial[i];
-
-  this.length_ = initial.length;
-  this.observer = observer;
-}
-MockArray.prototype = {
-  set length(length) {
-    Object.getNotifier(this).notify({ type: 'lengthChange' });
-    this.length_ = length;
-    Object.observe(this, this.observer.callback, ['splice']);
-  },
-  get length() {
-    return this.length_;
-  }
-}
-
-reset();
-var array = new MockArray([], observer);
-Object.observe(array, observer.callback, ['lengthChange']);
-Array.prototype.push.call(array, 1);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
-  { object: array, type: 'lengthChange' },
-  { object: array, type: 'splice', index: 0, removed: [], addedCount: 1 },
-]);
-
-reset();
-var array = new MockArray([1], observer);
-Object.observe(array, observer.callback, ['lengthChange']);
-Array.prototype.pop.call(array);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
-  { object: array, type: 'lengthChange' },
-  { object: array, type: 'splice', index: 0, removed: [1], addedCount: 0 },
-]);
-
-reset();
-var array = new MockArray([1], observer);
-Object.observe(array, observer.callback, ['lengthChange']);
-Array.prototype.shift.call(array);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
-  { object: array, type: 'lengthChange' },
-  { object: array, type: 'splice', index: 0, removed: [1], addedCount: 0 },
-]);
-
-reset();
-var array = new MockArray([], observer);
-Object.observe(array, observer.callback, ['lengthChange']);
-Array.prototype.unshift.call(array, 1);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
-  { object: array, type: 'lengthChange' },
-  { object: array, type: 'splice', index: 0, removed: [], addedCount: 1 },
-]);
-
-reset();
-var array = new MockArray([0, 1, 2], observer);
-Object.observe(array, observer.callback, ['lengthChange']);
-Array.prototype.splice.call(array, 1, 1);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
-  { object: array, type: 'lengthChange' },
-  { object: array, type: 'splice', index: 1, removed: [1], addedCount: 0 },
-]);
-
 //
 // === PLAIN OBJECTS ===
 //
diff --git a/test/mjsunit/lithium/SeqStringSetChar.js b/test/mjsunit/lithium/SeqStringSetChar.js
deleted file mode 100644
index 3c890a8..0000000
--- a/test/mjsunit/lithium/SeqStringSetChar.js
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-function MyStringFromCharCode(code, i) {
-  var one_byte = %NewString(3, true);
-  %_OneByteSeqStringSetChar(one_byte, 0, code);
-  %_OneByteSeqStringSetChar(one_byte, 1, code);
-  %_OneByteSeqStringSetChar(one_byte, i, code);
-  var two_byte = %NewString(3, false);
-  %_TwoByteSeqStringSetChar(two_byte, 0, code);
-  %_TwoByteSeqStringSetChar(two_byte, 1, code);
-  %_TwoByteSeqStringSetChar(two_byte, i, code);
-  return one_byte + two_byte;
-}
-
-MyStringFromCharCode(65, 2);
-var r1 = MyStringFromCharCode(65, 2);
-%OptimizeFunctionOnNextCall(MyStringFromCharCode);
-var r2 = MyStringFromCharCode(65, 2);
-assertEquals(r1, r2);