Version 3.22.16

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@17277 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 29dcd40..f1b1062 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,8 @@
+2013-10-21: Version 3.22.16
+
+        Performance and stability improvements on all platforms.
+
+
 2013-10-18: Version 3.22.15
 
         Enabled calling the SetReference* & SetObjectGroupId functions with a
diff --git a/src/api.cc b/src/api.cc
index e498b43..f3bc294 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3526,9 +3526,8 @@
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ON_BAILOUT(isolate, "v8::Object::HasRealNamedProperty()",
              return false);
-  return Utils::OpenHandle(this)->HasRealNamedProperty(
-      isolate,
-      *Utils::OpenHandle(*key));
+  return i::JSObject::HasRealNamedProperty(Utils::OpenHandle(this),
+                                           Utils::OpenHandle(*key));
 }
 
 
@@ -3536,7 +3535,7 @@
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ON_BAILOUT(isolate, "v8::Object::HasRealIndexedProperty()",
              return false);
-  return Utils::OpenHandle(this)->HasRealElementProperty(isolate, index);
+  return i::JSObject::HasRealElementProperty(Utils::OpenHandle(this), index);
 }
 
 
@@ -3546,9 +3545,8 @@
              "v8::Object::HasRealNamedCallbackProperty()",
              return false);
   ENTER_V8(isolate);
-  return Utils::OpenHandle(this)->HasRealNamedCallbackProperty(
-      isolate,
-      *Utils::OpenHandle(*key));
+  return i::JSObject::HasRealNamedCallbackProperty(Utils::OpenHandle(this),
+                                                   Utils::OpenHandle(*key));
 }
 
 
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 5c5231b..bdcdbf8 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -192,7 +192,7 @@
   descriptor->register_param_count_ = 2;
   if (constant_stack_parameter_count != 0) {
     // stack param count needs (constructor pointer, and single argument)
-    descriptor->stack_parameter_count_ = &r0;
+    descriptor->stack_parameter_count_ = r0;
   }
   descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
   descriptor->register_params_ = registers;
@@ -214,7 +214,7 @@
 
   if (constant_stack_parameter_count != 0) {
     // stack param count needs (constructor pointer, and single argument)
-    descriptor->stack_parameter_count_ = &r0;
+    descriptor->stack_parameter_count_ = r0;
   }
   descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
   descriptor->register_params_ = registers;
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 0ed84ff..9339c5f 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -107,10 +107,7 @@
   ApiFunction function(descriptor->deoptimization_handler_);
   ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
   intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
-  int params = descriptor->register_param_count_;
-  if (descriptor->stack_parameter_count_ != NULL) {
-    params++;
-  }
+  int params = descriptor->environment_length();
   output_frame->SetRegister(r0.code(), params);
   output_frame->SetRegister(r1.code(), handler);
 }
diff --git a/src/builtins.cc b/src/builtins.cc
index 03fac2d..b614904 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -273,14 +273,20 @@
     MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
   }
 
+  FixedArrayBase* new_elms = FixedArrayBase::cast(HeapObject::FromAddress(
+      elms->address() + size_delta));
   HeapProfiler* profiler = heap->isolate()->heap_profiler();
   if (profiler->is_profiling()) {
     profiler->ObjectMoveEvent(elms->address(),
-                              elms->address() + size_delta,
-                              elms->Size());
+                              new_elms->address(),
+                              new_elms->Size());
+    if (profiler->is_tracking_allocations()) {
+      // Report filler object as a new allocation.
+      // Otherwise it will become an untracked object.
+      profiler->NewObjectEvent(elms->address(), elms->Size());
+    }
   }
-  return FixedArrayBase::cast(HeapObject::FromAddress(
-      elms->address() + to_trim * entry_size));
+  return new_elms;
 }
 
 
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index f8fde15..a695161 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -162,7 +162,7 @@
   }
 
   HInstruction* stack_parameter_count;
-  if (descriptor_->stack_parameter_count_ != NULL) {
+  if (descriptor_->stack_parameter_count_.is_valid()) {
     ASSERT(descriptor_->environment_length() == (param_count + 1));
     stack_parameter_count = New<HParameter>(param_count,
                                             HParameter::REGISTER_PARAMETER,
@@ -298,7 +298,7 @@
   // the runtime that is significantly faster than using the standard
   // stub-failure deopt mechanism.
   if (stub->IsUninitialized() && descriptor->has_miss_handler()) {
-    ASSERT(descriptor->stack_parameter_count_ == NULL);
+    ASSERT(!descriptor->stack_parameter_count_.is_valid());
     return stub->GenerateLightweightMissCode(isolate);
   }
   ElapsedTimer timer;
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 0681137..0c5f389 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -41,7 +41,7 @@
 
 CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor()
     : register_param_count_(-1),
-      stack_parameter_count_(NULL),
+      stack_parameter_count_(no_reg),
       hint_stack_parameter_count_(-1),
       function_mode_(NOT_JS_FUNCTION_STUB_MODE),
       register_params_(NULL),
diff --git a/src/code-stubs.h b/src/code-stubs.h
index d2101ae..b5f74b2 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -30,8 +30,9 @@
 
 #include "allocation.h"
 #include "assembler.h"
-#include "globals.h"
 #include "codegen.h"
+#include "globals.h"
+#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -280,7 +281,7 @@
 struct CodeStubInterfaceDescriptor {
   CodeStubInterfaceDescriptor();
   int register_param_count_;
-  const Register* stack_parameter_count_;
+  Register stack_parameter_count_;
   // if hint_stack_parameter_count_ > 0, the code stub can optimize the
   // return sequence. Default value is -1, which means it is ignored.
   int hint_stack_parameter_count_;
@@ -289,7 +290,7 @@
   Address deoptimization_handler_;
 
   int environment_length() const {
-    if (stack_parameter_count_ != NULL) {
+    if (stack_parameter_count_.is_valid()) {
       return register_param_count_ + 1;
     }
     return register_param_count_;
@@ -320,7 +321,7 @@
 // defined outside of the platform directories
 #define DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index) \
   ((index) == (descriptor)->register_param_count_)           \
-      ? *((descriptor)->stack_parameter_count_)              \
+      ? (descriptor)->stack_parameter_count_                 \
       : (descriptor)->register_params_[(index)]
 
 
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 819b5e9..2fc04fa 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -1494,7 +1494,7 @@
   }
 
   intptr_t caller_arg_count = 0;
-  bool arg_count_known = descriptor->stack_parameter_count_ == NULL;
+  bool arg_count_known = !descriptor->stack_parameter_count_.is_valid();
 
   // Build the Arguments object for the caller's parameters and a pointer to it.
   output_frame_offset -= kPointerSize;
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 379ae69..e7180a0 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -541,6 +541,12 @@
 DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
 #endif
 
+
+// heap-snapshot-generator.cc
+DEFINE_bool(heap_profiler_trace_objects, false,
+            "Dump heap object allocations/movements/size_updates")
+
+
 // v8.cc
 DEFINE_bool(use_idle_notification, true,
             "Use idle notification to reduce memory footprint.")
diff --git a/src/heap-snapshot-generator.cc b/src/heap-snapshot-generator.cc
index 444bebf..36b99a0 100644
--- a/src/heap-snapshot-generator.cc
+++ b/src/heap-snapshot-generator.cc
@@ -431,6 +431,13 @@
     // Size of an object can change during its life, so to keep information
     // about the object in entries_ consistent, we have to adjust size when the
     // object is migrated.
+    if (FLAG_heap_profiler_trace_objects) {
+      PrintF("Move object from %p to %p old size %6d new size %6d\n",
+             from,
+             to,
+             entries_.at(from_entry_info_index).size,
+             object_size);
+    }
     entries_.at(from_entry_info_index).size = object_size;
     to_entry->value = from_value;
   }
@@ -438,6 +445,12 @@
 
 
 void HeapObjectsMap::NewObject(Address addr, int size) {
+  if (FLAG_heap_profiler_trace_objects) {
+    PrintF("New object         : %p %6d. Next address is %p\n",
+           addr,
+           size,
+           addr + size);
+  }
   ASSERT(addr != NULL);
   FindOrAddEntry(addr, size, false);
 }
@@ -470,6 +483,12 @@
         static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
     EntryInfo& entry_info = entries_.at(entry_index);
     entry_info.accessed = accessed;
+    if (FLAG_heap_profiler_trace_objects) {
+      PrintF("Update object size : %p with old size %d and new size %d\n",
+             addr,
+             entry_info.size,
+             size);
+    }
     entry_info.size = size;
     return entry_info.id;
   }
@@ -488,6 +507,10 @@
 
 
 void HeapObjectsMap::UpdateHeapObjectsMap() {
+  if (FLAG_heap_profiler_trace_objects) {
+    PrintF("Begin HeapObjectsMap::UpdateHeapObjectsMap. map has %d entries.\n",
+           entries_map_.occupancy());
+  }
   heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
                           "HeapSnapshotsCollection::UpdateHeapObjectsMap");
   HeapIterator iterator(heap_);
@@ -495,12 +518,70 @@
        obj != NULL;
        obj = iterator.next()) {
     FindOrAddEntry(obj->address(), obj->Size());
+    if (FLAG_heap_profiler_trace_objects) {
+      PrintF("Update object      : %p %6d. Next address is %p\n",
+             obj->address(),
+             obj->Size(),
+             obj->address() + obj->Size());
+    }
   }
   RemoveDeadEntries();
+  if (FLAG_heap_profiler_trace_objects) {
+    PrintF("End HeapObjectsMap::UpdateHeapObjectsMap. map has %d entries.\n",
+           entries_map_.occupancy());
+  }
 }
 
 
+namespace {
+
+
+struct HeapObjectInfo {
+  HeapObjectInfo(HeapObject* obj, int expected_size)
+    : obj(obj),
+      expected_size(expected_size) {
+  }
+
+  HeapObject* obj;
+  int expected_size;
+
+  bool IsValid() const { return expected_size == obj->Size(); }
+
+  void Print() const {
+    if (expected_size == 0) {
+      PrintF("Untracked object   : %p %6d. Next address is %p\n",
+             obj->address(),
+             obj->Size(),
+             obj->address() + obj->Size());
+    } else if (obj->Size() != expected_size) {
+      PrintF("Wrong size %6d: %p %6d. Next address is %p\n",
+             expected_size,
+             obj->address(),
+             obj->Size(),
+             obj->address() + obj->Size());
+    } else {
+      PrintF("Good object      : %p %6d. Next address is %p\n",
+             obj->address(),
+             expected_size,
+             obj->address() + obj->Size());
+    }
+  }
+};
+
+
+static int comparator(const HeapObjectInfo* a, const HeapObjectInfo* b) {
+  if (a->obj < b->obj) return -1;
+  if (a->obj > b->obj) return 1;
+  return 0;
+}
+
+
+}  // namespace
+
+
 int HeapObjectsMap::FindUntrackedObjects() {
+  List<HeapObjectInfo> heap_objects(1000);
+
   HeapIterator iterator(heap_);
   int untracked = 0;
   for (HeapObject* obj = iterator.next();
@@ -509,14 +590,56 @@
     HashMap::Entry* entry = entries_map_.Lookup(
       obj->address(), ComputePointerHash(obj->address()), false);
     if (entry == NULL) {
-      untracked++;
+      ++untracked;
+      if (FLAG_heap_profiler_trace_objects) {
+        heap_objects.Add(HeapObjectInfo(obj, 0));
+      }
     } else {
       int entry_index = static_cast<int>(
           reinterpret_cast<intptr_t>(entry->value));
       EntryInfo& entry_info = entries_.at(entry_index);
-      CHECK_EQ(obj->Size(), static_cast<int>(entry_info.size));
+      if (FLAG_heap_profiler_trace_objects) {
+        heap_objects.Add(HeapObjectInfo(obj,
+                         static_cast<int>(entry_info.size)));
+        if (obj->Size() != static_cast<int>(entry_info.size))
+          ++untracked;
+      } else {
+        CHECK_EQ(obj->Size(), static_cast<int>(entry_info.size));
+      }
     }
   }
+  if (FLAG_heap_profiler_trace_objects) {
+    PrintF("\nBegin HeapObjectsMap::FindUntrackedObjects. %d entries in map.\n",
+           entries_map_.occupancy());
+    heap_objects.Sort(comparator);
+    int last_printed_object = -1;
+    bool print_next_object = false;
+    for (int i = 0; i < heap_objects.length(); ++i) {
+      const HeapObjectInfo& object_info = heap_objects[i];
+      if (!object_info.IsValid()) {
+        ++untracked;
+        if (last_printed_object != i - 1) {
+          if (i > 0) {
+            PrintF("%d objects were skipped\n", i - 1 - last_printed_object);
+            heap_objects[i - 1].Print();
+          }
+        }
+        object_info.Print();
+        last_printed_object = i;
+        print_next_object = true;
+      } else if (print_next_object) {
+        object_info.Print();
+        print_next_object = false;
+        last_printed_object = i;
+      }
+    }
+    if (last_printed_object < heap_objects.length() - 1) {
+      PrintF("Last %d objects were skipped\n",
+             heap_objects.length() - 1 - last_printed_object);
+    }
+    PrintF("End HeapObjectsMap::FindUntrackedObjects. %d entries in map.\n\n",
+           entries_map_.occupancy());
+  }
   return untracked;
 }
 
diff --git a/src/hydrogen-load-elimination.cc b/src/hydrogen-load-elimination.cc
index 6d01ae5..f33ebe2 100644
--- a/src/hydrogen-load-elimination.cc
+++ b/src/hydrogen-load-elimination.cc
@@ -28,10 +28,14 @@
 #include "hydrogen-alias-analysis.h"
 #include "hydrogen-load-elimination.h"
 #include "hydrogen-instructions.h"
+#include "hydrogen-flow-engine.h"
 
 namespace v8 {
 namespace internal {
 
+#define GLOBAL true
+#define TRACE(x) if (FLAG_trace_load_elimination) PrintF x
+
 static const int kMaxTrackedFields = 16;
 static const int kMaxTrackedObjects = 5;
 
@@ -42,17 +46,145 @@
   HLoadNamedField* last_load_;
   HValue* last_value_;
   HFieldApproximation* next_;
+
+  // Recursively copy the entire linked list of field approximations.
+  HFieldApproximation* Copy(Zone* zone) {
+    if (this == NULL) return NULL;
+    HFieldApproximation* copy = new(zone) HFieldApproximation();
+    copy->object_ = this->object_;
+    copy->last_load_ = this->last_load_;
+    copy->last_value_ = this->last_value_;
+    copy->next_ = this->next_->Copy(zone);
+    return copy;
+  }
 };
 
 
 // The main datastructure used during load/store elimination. Each in-object
 // field is tracked separately. For each field, store a list of known field
 // values for known objects.
-class HLoadEliminationTable BASE_EMBEDDED {
+class HLoadEliminationTable : public ZoneObject {
  public:
   HLoadEliminationTable(Zone* zone, HAliasAnalyzer* aliasing)
     : zone_(zone), fields_(kMaxTrackedFields, zone), aliasing_(aliasing) { }
 
+  // The main processing of instructions.
+  HLoadEliminationTable* Process(HInstruction* instr, Zone* zone) {
+    switch (instr->opcode()) {
+      case HValue::kLoadNamedField: {
+        HLoadNamedField* l = HLoadNamedField::cast(instr);
+        TRACE((" process L%d field %d (o%d)\n",
+               instr->id(),
+               FieldOf(l->access()),
+               l->object()->ActualValue()->id()));
+        HValue* result = load(l);
+        if (result != instr) {
+          // The load can be replaced with a previous load or a value.
+          TRACE(("  replace L%d -> v%d\n", instr->id(), result->id()));
+          instr->DeleteAndReplaceWith(result);
+        }
+        break;
+      }
+      case HValue::kStoreNamedField: {
+        HStoreNamedField* s = HStoreNamedField::cast(instr);
+        TRACE((" process S%d field %d (o%d) = v%d\n",
+               instr->id(),
+               FieldOf(s->access()),
+               s->object()->ActualValue()->id(),
+               s->value()->id()));
+        HValue* result = store(s);
+        if (result == NULL) {
+          // The store is redundant. Remove it.
+          TRACE(("  remove S%d\n", instr->id()));
+          instr->DeleteAndReplaceWith(NULL);
+        }
+        break;
+      }
+      default: {
+        if (instr->CheckGVNFlag(kChangesInobjectFields)) {
+          TRACE((" kill-all i%d\n", instr->id()));
+          Kill();
+          break;
+        }
+        if (instr->CheckGVNFlag(kChangesMaps)) {
+          TRACE((" kill-maps i%d\n", instr->id()));
+          KillOffset(JSObject::kMapOffset);
+        }
+        if (instr->CheckGVNFlag(kChangesElementsKind)) {
+          TRACE((" kill-elements-kind i%d\n", instr->id()));
+          KillOffset(JSObject::kMapOffset);
+          KillOffset(JSObject::kElementsOffset);
+        }
+        if (instr->CheckGVNFlag(kChangesElementsPointer)) {
+          TRACE((" kill-elements i%d\n", instr->id()));
+          KillOffset(JSObject::kElementsOffset);
+        }
+        if (instr->CheckGVNFlag(kChangesOsrEntries)) {
+          TRACE((" kill-osr i%d\n", instr->id()));
+          Kill();
+        }
+      }
+      // Improvements possible:
+      // - learn from HCheckMaps for field 0
+      // - remove unobservable stores (write-after-write)
+      // - track cells
+      // - track globals
+      // - track roots
+    }
+    return this;
+  }
+
+  // Support for global analysis with HFlowEngine: Copy state to sucessor block.
+  HLoadEliminationTable* Copy(HBasicBlock* succ, Zone* zone) {
+    HLoadEliminationTable* copy =
+        new(zone) HLoadEliminationTable(zone, aliasing_);
+    copy->EnsureFields(fields_.length());
+    for (int i = 0; i < fields_.length(); i++) {
+      copy->fields_[i] = fields_[i]->Copy(zone);
+    }
+    if (FLAG_trace_load_elimination) {
+      TRACE((" copy-to B%d\n", succ->block_id()));
+      copy->Print();
+    }
+    return copy;
+  }
+
+  // Support for global analysis with HFlowEngine: Merge this state with
+  // the other incoming state.
+  HLoadEliminationTable* Merge(HBasicBlock* succ,
+      HLoadEliminationTable* that, Zone* zone) {
+    if (that->fields_.length() < fields_.length()) {
+      // Drop fields not in the other table.
+      fields_.Rewind(that->fields_.length());
+    }
+    for (int i = 0; i < fields_.length(); i++) {
+      // Merge the field approximations for like fields.
+      HFieldApproximation* approx = fields_[i];
+      HFieldApproximation* prev = NULL;
+      while (approx != NULL) {
+        // TODO(titzer): Merging is O(N * M); sort?
+        HFieldApproximation* other = that->Find(approx->object_, i);
+        if (other == NULL || !Equal(approx->last_value_, other->last_value_)) {
+          // Kill an entry that doesn't agree with the other value.
+          if (prev != NULL) {
+            prev->next_ = approx->next_;
+          } else {
+            fields_[i] = approx->next_;
+          }
+          approx = approx->next_;
+          continue;
+        }
+        prev = approx;
+        approx = approx->next_;
+      }
+    }
+    return this;
+  }
+
+  friend class HLoadEliminationEffects;  // Calls Kill() and others.
+  friend class HLoadEliminationPhase;
+
+ private:
   // Process a load instruction, updating internal table state. If a previous
   // load or store for this object and field exists, return the new value with
   // which the load should be replaced. Otherwise, return {instr}.
@@ -118,28 +250,17 @@
     }
   }
 
-  // Compute the field index for the given object access; -1 if not tracked.
-  int FieldOf(HObjectAccess access) {
-    // Only track kMaxTrackedFields in-object fields.
-    if (!access.IsInobject()) return -1;
-    return FieldOf(access.offset());
-  }
-
-  // Print this table to stdout.
-  void Print() {
-    for (int i = 0; i < fields_.length(); i++) {
-      PrintF("  field %d: ", i);
-      for (HFieldApproximation* a = fields_[i]; a != NULL; a = a->next_) {
-        PrintF("[o%d =", a->object_->id());
-        if (a->last_load_ != NULL) PrintF(" L%d", a->last_load_->id());
-        if (a->last_value_ != NULL) PrintF(" v%d", a->last_value_->id());
-        PrintF("] ");
-      }
-      PrintF("\n");
+  // Find an entry for the given object and field pair.
+  HFieldApproximation* Find(HValue* object, int field) {
+    // Search for a field approximation for this object.
+    HFieldApproximation* approx = fields_[field];
+    while (approx != NULL) {
+      if (aliasing_->MustAlias(object, approx->object_)) return approx;
+      approx = approx->next_;
     }
+    return NULL;
   }
 
- private:
   // Find or create an entry for the given object and field pair.
   HFieldApproximation* FindOrCreate(HValue* object, int field) {
     EnsureFields(field + 1);
@@ -218,110 +339,143 @@
     return approx;
   }
 
-  // Ensure internal storage for the given number of fields.
-  void EnsureFields(int num_fields) {
-    while (fields_.length() < num_fields) fields_.Add(NULL, zone_);
+  // Compute the field index for the given object access; -1 if not tracked.
+  int FieldOf(HObjectAccess access) {
+    return access.IsInobject() ? FieldOf(access.offset()) : -1;
   }
 
-  // Compute the field index for the given in-object offset.
+  // Compute the field index for the given in-object offset; -1 if not tracked.
   int FieldOf(int offset) {
     if (offset >= kMaxTrackedFields * kPointerSize) return -1;
     ASSERT((offset % kPointerSize) == 0);  // Assume aligned accesses.
     return offset / kPointerSize;
   }
 
+  // Ensure internal storage for the given number of fields.
+  void EnsureFields(int num_fields) {
+    if (fields_.length() < num_fields) {
+      fields_.AddBlock(NULL, num_fields - fields_.length(), zone_);
+    }
+  }
+
+  // Print this table to stdout.
+  void Print() {
+    for (int i = 0; i < fields_.length(); i++) {
+      PrintF("  field %d: ", i);
+      for (HFieldApproximation* a = fields_[i]; a != NULL; a = a->next_) {
+        PrintF("[o%d =", a->object_->id());
+        if (a->last_load_ != NULL) PrintF(" L%d", a->last_load_->id());
+        if (a->last_value_ != NULL) PrintF(" v%d", a->last_value_->id());
+        PrintF("] ");
+      }
+      PrintF("\n");
+    }
+  }
+
   Zone* zone_;
   ZoneList<HFieldApproximation*> fields_;
   HAliasAnalyzer* aliasing_;
 };
 
 
-void HLoadEliminationPhase::Run() {
-  for (int i = 0; i < graph()->blocks()->length(); i++) {
-    HBasicBlock* block = graph()->blocks()->at(i);
-    EliminateLoads(block);
+// Support for HFlowEngine: collect store effects within loops.
+class HLoadEliminationEffects : public ZoneObject {
+ public:
+  explicit HLoadEliminationEffects(Zone* zone)
+    : zone_(zone),
+      maps_stored_(false),
+      fields_stored_(false),
+      elements_stored_(false),
+      stores_(5, zone) { }
+
+  inline bool Disabled() {
+    return false;  // Effects are _not_ disabled.
   }
-}
 
-
-// For code de-uglification.
-#define TRACE(x) if (FLAG_trace_load_elimination) PrintF x
-
-
-// Eliminate loads and stores local to a block.
-void HLoadEliminationPhase::EliminateLoads(HBasicBlock* block) {
-  HAliasAnalyzer aliasing;
-  HLoadEliminationTable table(zone(), &aliasing);
-
-  TRACE(("-- load-elim B%d -------------------------------------------------\n",
-         block->block_id()));
-
-  for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
-    bool changed = false;
-    HInstruction* instr = it.Current();
-
+  // Process a possibly side-effecting instruction.
+  void Process(HInstruction* instr, Zone* zone) {
     switch (instr->opcode()) {
-      case HValue::kLoadNamedField: {
-        HLoadNamedField* load = HLoadNamedField::cast(instr);
-        TRACE((" process L%d field %d (o%d)\n",
-               instr->id(),
-               table.FieldOf(load->access()),
-               load->object()->ActualValue()->id()));
-        HValue* result = table.load(load);
-        if (result != instr) {
-          // The load can be replaced with a previous load or a value.
-          TRACE(("  replace L%d -> v%d\n", instr->id(), result->id()));
-          instr->DeleteAndReplaceWith(result);
-        }
-        changed = true;
+      case HValue::kStoreNamedField: {
+        stores_.Add(HStoreNamedField::cast(instr), zone_);
         break;
       }
-      case HValue::kStoreNamedField: {
-        HStoreNamedField* store = HStoreNamedField::cast(instr);
-        TRACE((" process S%d field %d (o%d) = v%d\n",
-               instr->id(),
-               table.FieldOf(store->access()),
-               store->object()->ActualValue()->id(),
-               store->value()->id()));
-        HValue* result = table.store(store);
-        if (result == NULL) {
-          // The store is redundant. Remove it.
-          TRACE(("  remove S%d\n", instr->id()));
-          instr->DeleteAndReplaceWith(NULL);
-        }
-        changed = true;
-        break;
+      case HValue::kOsrEntry: {
+        // Kill everything. Loads must not be hoisted past the OSR entry.
+        maps_stored_ = true;
+        fields_stored_ = true;
+        elements_stored_ = true;
       }
       default: {
-        if (instr->CheckGVNFlag(kChangesInobjectFields)) {
-          TRACE((" kill-all i%d\n", instr->id()));
-          table.Kill();
-          continue;
-        }
-        if (instr->CheckGVNFlag(kChangesMaps)) {
-          TRACE((" kill-maps i%d\n", instr->id()));
-          table.KillOffset(JSObject::kMapOffset);
-        }
-        if (instr->CheckGVNFlag(kChangesElementsKind)) {
-          TRACE((" kill-elements-kind i%d\n", instr->id()));
-          table.KillOffset(JSObject::kMapOffset);
-          table.KillOffset(JSObject::kElementsOffset);
-        }
-        if (instr->CheckGVNFlag(kChangesElementsPointer)) {
-          TRACE((" kill-elements i%d\n", instr->id()));
-          table.KillOffset(JSObject::kElementsOffset);
-        }
+        fields_stored_ |= instr->CheckGVNFlag(kChangesInobjectFields);
+        maps_stored_ |= instr->CheckGVNFlag(kChangesMaps);
+        maps_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
+        elements_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
+        elements_stored_ |= instr->CheckGVNFlag(kChangesElementsPointer);
       }
-    // Improvements possible:
-    // - learn from HCheckMaps for field 0
-    // - remove unobservable stores (write-after-write)
+    }
+  }
+
+  // Apply these effects to the given load elimination table.
+  void Apply(HLoadEliminationTable* table) {
+    if (fields_stored_) {
+      table->Kill();
+      return;
+    }
+    if (maps_stored_) {
+      table->KillOffset(JSObject::kMapOffset);
+    }
+    if (elements_stored_) {
+      table->KillOffset(JSObject::kElementsOffset);
     }
 
-    if (changed && FLAG_trace_load_elimination) {
-      table.Print();
+    // Kill non-agreeing fields for each store contained in these effects.
+    for (int i = 0; i < stores_.length(); i++) {
+      HStoreNamedField* s = stores_[i];
+      int field = table->FieldOf(s->access());
+      if (field >= 0) {
+        table->KillFieldInternal(s->object()->ActualValue(), field, s->value());
+      }
+    }
+  }
+
+  // Union these effects with the other effects.
+  void Union(HLoadEliminationEffects* that, Zone* zone) {
+    maps_stored_ |= that->maps_stored_;
+    fields_stored_ |= that->fields_stored_;
+    elements_stored_ |= that->elements_stored_;
+    for (int i = 0; i < that->stores_.length(); i++) {
+      stores_.Add(that->stores_[i], zone);
+    }
+  }
+
+ private:
+  Zone* zone_;
+  bool maps_stored_ : 1;
+  bool fields_stored_ : 1;
+  bool elements_stored_ : 1;
+  ZoneList<HStoreNamedField*> stores_;
+};
+
+
+// The main routine of the analysis phase. Use the HFlowEngine for either a
+// local or a global analysis.
+void HLoadEliminationPhase::Run() {
+  HFlowEngine<HLoadEliminationTable, HLoadEliminationEffects>
+    engine(graph(), zone());
+  HAliasAnalyzer aliasing;
+  HLoadEliminationTable* table =
+      new(zone()) HLoadEliminationTable(zone(), &aliasing);
+
+  if (GLOBAL) {
+    // Perform a global analysis.
+    engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), table);
+  } else {
+    // Perform only local analysis.
+    for (int i = 0; i < graph()->blocks()->length(); i++) {
+      table->Kill();
+      engine.AnalyzeOneBlock(graph()->blocks()->at(i), table);
     }
   }
 }
 
-
 } }  // namespace v8::internal
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 37860a3..8cf73c3 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -2067,6 +2067,7 @@
 
 
 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+  ASSERT(IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   EMIT(0xF2);
   EMIT(0x0F);
@@ -2076,6 +2077,7 @@
 
 
 void Assembler::andpd(XMMRegister dst, XMMRegister src) {
+  ASSERT(IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   EMIT(0x66);
   EMIT(0x0F);
@@ -2085,6 +2087,7 @@
 
 
 void Assembler::orpd(XMMRegister dst, XMMRegister src) {
+  ASSERT(IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   EMIT(0x66);
   EMIT(0x0F);
@@ -2247,18 +2250,6 @@
 }
 
 
-void Assembler::movdbl(XMMRegister dst, const Operand& src) {
-  EnsureSpace ensure_space(this);
-  movsd(dst, src);
-}
-
-
-void Assembler::movdbl(const Operand& dst, XMMRegister src) {
-  EnsureSpace ensure_space(this);
-  movsd(dst, src);
-}
-
-
 void Assembler::movsd(const Operand& dst, XMMRegister src ) {
   ASSERT(IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 2b3ed4a..5bb878c 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -1072,15 +1072,14 @@
     }
   }
 
-  // Use either movsd or movlpd.
-  void movdbl(XMMRegister dst, const Operand& src);
-  void movdbl(const Operand& dst, XMMRegister src);
-
   void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
   void movd(XMMRegister dst, const Operand& src);
   void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
   void movd(const Operand& dst, XMMRegister src);
   void movsd(XMMRegister dst, XMMRegister src);
+  void movsd(XMMRegister dst, const Operand& src);
+  void movsd(const Operand& dst, XMMRegister src);
+
 
   void movss(XMMRegister dst, const Operand& src);
   void movss(const Operand& dst, XMMRegister src);
@@ -1162,9 +1161,6 @@
   void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
 
  protected:
-  void movsd(XMMRegister dst, const Operand& src);
-  void movsd(const Operand& dst, XMMRegister src);
-
   void emit_sse_operand(XMMRegister reg, const Operand& adr);
   void emit_sse_operand(XMMRegister dst, XMMRegister src);
   void emit_sse_operand(Register dst, XMMRegister src);
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index beaf320..71f7989 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -172,7 +172,7 @@
 
   if (constant_stack_parameter_count != 0) {
     // stack param count needs (constructor pointer, and single argument)
-    descriptor->stack_parameter_count_ = &eax;
+    descriptor->stack_parameter_count_ = eax;
   }
   descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
   descriptor->register_params_ = registers;
@@ -194,7 +194,7 @@
 
   if (constant_stack_parameter_count != 0) {
     // stack param count needs (constructor pointer, and single argument)
-    descriptor->stack_parameter_count_ = &eax;
+    descriptor->stack_parameter_count_ = eax;
   }
   descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
   descriptor->register_params_ = registers;
@@ -454,7 +454,7 @@
     __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
     for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
       XMMRegister reg = XMMRegister::from_code(i);
-      __ movdbl(Operand(esp, i * kDoubleSize), reg);
+      __ movsd(Operand(esp, i * kDoubleSize), reg);
     }
   }
   const int argument_count = 1;
@@ -470,7 +470,7 @@
     CpuFeatureScope scope(masm, SSE2);
     for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
       XMMRegister reg = XMMRegister::from_code(i);
-      __ movdbl(reg, Operand(esp, i * kDoubleSize));
+      __ movsd(reg, Operand(esp, i * kDoubleSize));
     }
     __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
   }
@@ -770,7 +770,7 @@
     __ ret(kPointerSize);
   } else {  // UNTAGGED.
     CpuFeatureScope scope(masm, SSE2);
-    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+    __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
     __ Ret();
   }
 
@@ -785,7 +785,7 @@
     CpuFeatureScope scope(masm, SSE2);
     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
     __ sub(esp, Immediate(kDoubleSize));
-    __ movdbl(Operand(esp, 0), xmm1);
+    __ movsd(Operand(esp, 0), xmm1);
     __ fld_d(Operand(esp, 0));
     __ add(esp, Immediate(kDoubleSize));
   }
@@ -798,17 +798,17 @@
     __ ret(kPointerSize);
   } else {  // UNTAGGED.
     CpuFeatureScope scope(masm, SSE2);
-    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+    __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
     __ Ret();
 
     // Skip cache and return answer directly, only in untagged case.
     __ bind(&skip_cache);
     __ sub(esp, Immediate(kDoubleSize));
-    __ movdbl(Operand(esp, 0), xmm1);
+    __ movsd(Operand(esp, 0), xmm1);
     __ fld_d(Operand(esp, 0));
     GenerateOperation(masm, type_);
     __ fstp_d(Operand(esp, 0));
-    __ movdbl(xmm1, Operand(esp, 0));
+    __ movsd(xmm1, Operand(esp, 0));
     __ add(esp, Immediate(kDoubleSize));
     // We return the value in xmm1 without adding it to the cache, but
     // we cause a scavenging GC so that future allocations will succeed.
@@ -834,13 +834,13 @@
     __ bind(&runtime_call_clear_stack);
     __ bind(&runtime_call);
     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
-    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
+    __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
       __ push(eax);
       __ CallRuntime(RuntimeFunction(), 1);
     }
-    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+    __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
     __ Ret();
   }
 }
@@ -983,7 +983,7 @@
   Factory* factory = masm->isolate()->factory();
   __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
   __ j(not_equal, not_numbers);  // Argument in edx is not a number.
-  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+  __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
   __ bind(&load_eax);
   // Load operand in eax into xmm1, or branch to not_numbers.
   __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
@@ -1001,7 +1001,7 @@
   __ SmiTag(eax);  // Retag smi for heap number overwriting test.
   __ jmp(&done, Label::kNear);
   __ bind(&load_float_eax);
-  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+  __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
   __ bind(&done);
 }
 
@@ -1059,7 +1059,7 @@
            factory->heap_number_map());
     __ j(not_equal, &call_runtime);
 
-    __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+    __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
     __ jmp(&unpack_exponent, Label::kNear);
 
     __ bind(&base_is_smi);
@@ -1075,7 +1075,7 @@
     __ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
            factory->heap_number_map());
     __ j(not_equal, &call_runtime);
-    __ movdbl(double_exponent,
+    __ movsd(double_exponent,
               FieldOperand(exponent, HeapNumber::kValueOffset));
   } else if (exponent_type_ == TAGGED) {
     __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -1083,7 +1083,7 @@
     __ jmp(&int_exponent);
 
     __ bind(&exponent_not_smi);
-    __ movdbl(double_exponent,
+    __ movsd(double_exponent,
               FieldOperand(exponent, HeapNumber::kValueOffset));
   }
 
@@ -1178,9 +1178,9 @@
     __ fnclex();  // Clear flags to catch exceptions later.
     // Transfer (B)ase and (E)xponent onto the FPU register stack.
     __ sub(esp, Immediate(kDoubleSize));
-    __ movdbl(Operand(esp, 0), double_exponent);
+    __ movsd(Operand(esp, 0), double_exponent);
     __ fld_d(Operand(esp, 0));  // E
-    __ movdbl(Operand(esp, 0), double_base);
+    __ movsd(Operand(esp, 0), double_base);
     __ fld_d(Operand(esp, 0));  // B, E
 
     // Exponent is in st(1) and base is in st(0)
@@ -1203,7 +1203,7 @@
     __ test_b(eax, 0x5F);  // We check for all but precision exception.
     __ j(not_zero, &fast_power_failed, Label::kNear);
     __ fstp_d(Operand(esp, 0));
-    __ movdbl(double_result, Operand(esp, 0));
+    __ movsd(double_result, Operand(esp, 0));
     __ add(esp, Immediate(kDoubleSize));
     __ jmp(&done);
 
@@ -1270,7 +1270,7 @@
     // as heap number in exponent.
     __ bind(&done);
     __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
-    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
+    __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
     __ IncrementCounter(counters->math_pow(), 1);
     __ ret(2 * kPointerSize);
   } else {
@@ -1278,8 +1278,8 @@
     {
       AllowExternalCallThatCantCauseGC scope(masm);
       __ PrepareCallCFunction(4, scratch);
-      __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
-      __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
+      __ movsd(Operand(esp, 0 * kDoubleSize), double_base);
+      __ movsd(Operand(esp, 1 * kDoubleSize), double_exponent);
       __ CallCFunction(
           ExternalReference::power_double_double_function(masm->isolate()), 4);
     }
@@ -1287,7 +1287,7 @@
     // Store it into the (fixed) result register.
     __ sub(esp, Immediate(kDoubleSize));
     __ fstp_d(Operand(esp, 0));
-    __ movdbl(double_result, Operand(esp, 0));
+    __ movsd(double_result, Operand(esp, 0));
     __ add(esp, Immediate(kDoubleSize));
 
     __ bind(&done);
@@ -4730,7 +4730,7 @@
     __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
            masm->isolate()->factory()->heap_number_map());
     __ j(not_equal, &maybe_undefined1, Label::kNear);
-    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+    __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
     __ jmp(&left, Label::kNear);
     __ bind(&right_smi);
     __ mov(ecx, eax);  // Can't clobber eax because we can still jump away.
@@ -4742,7 +4742,7 @@
     __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
            masm->isolate()->factory()->heap_number_map());
     __ j(not_equal, &maybe_undefined2, Label::kNear);
-    __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+    __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
     __ jmp(&done);
     __ bind(&left_smi);
     __ mov(ecx, edx);  // Can't clobber edx because we can still jump away.
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index dc536e7..006651c 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -444,7 +444,7 @@
         // Save all XMM registers except XMM0.
         for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
           XMMRegister reg = XMMRegister::from_code(i);
-          masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
+          masm->movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
         }
       }
     }
@@ -456,7 +456,7 @@
         // Restore all XMM registers except XMM0.
         for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
           XMMRegister reg = XMMRegister::from_code(i);
-          masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
+          masm->movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
         }
         masm->add(esp,
                   Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index b28161d..da88146 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -117,7 +117,7 @@
     CpuFeatureScope use_sse2(&masm, SSE2);
     XMMRegister input = xmm1;
     XMMRegister result = xmm2;
-    __ movdbl(input, Operand(esp, 1 * kPointerSize));
+    __ movsd(input, Operand(esp, 1 * kPointerSize));
     __ push(eax);
     __ push(ebx);
 
@@ -125,7 +125,7 @@
 
     __ pop(ebx);
     __ pop(eax);
-    __ movdbl(Operand(esp, 1 * kPointerSize), result);
+    __ movsd(Operand(esp, 1 * kPointerSize), result);
     __ fld_d(Operand(esp, 1 * kPointerSize));
     __ Ret();
   }
@@ -155,9 +155,9 @@
   // Move double input into registers.
   {
     CpuFeatureScope use_sse2(&masm, SSE2);
-    __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
+    __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
     __ sqrtsd(xmm0, xmm0);
-    __ movdbl(Operand(esp, 1 * kPointerSize), xmm0);
+    __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
     // Load result into floating point register as return value.
     __ fld_d(Operand(esp, 1 * kPointerSize));
     __ Ret();
@@ -462,10 +462,10 @@
       Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
 
       __ bind(&f9_16);
-      __ movdbl(xmm0, Operand(src, 0));
-      __ movdbl(xmm1, Operand(src, count, times_1, -8));
-      __ movdbl(Operand(dst, 0), xmm0);
-      __ movdbl(Operand(dst, count, times_1, -8), xmm1);
+      __ movsd(xmm0, Operand(src, 0));
+      __ movsd(xmm1, Operand(src, count, times_1, -8));
+      __ movsd(Operand(dst, 0), xmm0);
+      __ movsd(Operand(dst, count, times_1, -8), xmm1);
       MemMoveEmitPopAndReturn(&masm);
 
       __ bind(&f17_32);
@@ -741,7 +741,7 @@
   XMMRegister the_hole_nan = xmm1;
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope use_sse2(masm, SSE2);
-    __ movdbl(the_hole_nan,
+    __ movsd(the_hole_nan,
               Operand::StaticVariable(canonical_the_hole_nan_reference));
   }
   __ jmp(&entry);
@@ -767,7 +767,7 @@
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope fscope(masm, SSE2);
     __ Cvtsi2sd(xmm0, ebx);
-    __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+    __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
               xmm0);
   } else {
     __ push(ebx);
@@ -787,7 +787,7 @@
 
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope use_sse2(masm, SSE2);
-    __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+    __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
               the_hole_nan);
   } else {
     __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
@@ -896,9 +896,9 @@
   // edx: new heap number
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope fscope(masm, SSE2);
-    __ movdbl(xmm0,
+    __ movsd(xmm0,
               FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
-    __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
+    __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
   } else {
     __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
     __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
@@ -1078,20 +1078,20 @@
 
   Label done;
 
-  __ movdbl(double_scratch, ExpConstant(0));
+  __ movsd(double_scratch, ExpConstant(0));
   __ xorpd(result, result);
   __ ucomisd(double_scratch, input);
   __ j(above_equal, &done);
   __ ucomisd(input, ExpConstant(1));
-  __ movdbl(result, ExpConstant(2));
+  __ movsd(result, ExpConstant(2));
   __ j(above_equal, &done);
-  __ movdbl(double_scratch, ExpConstant(3));
-  __ movdbl(result, ExpConstant(4));
+  __ movsd(double_scratch, ExpConstant(3));
+  __ movsd(result, ExpConstant(4));
   __ mulsd(double_scratch, input);
   __ addsd(double_scratch, result);
   __ movd(temp2, double_scratch);
   __ subsd(double_scratch, result);
-  __ movdbl(result, ExpConstant(6));
+  __ movsd(result, ExpConstant(6));
   __ mulsd(double_scratch, ExpConstant(5));
   __ subsd(double_scratch, input);
   __ subsd(result, double_scratch);
@@ -1108,7 +1108,7 @@
   __ shl(temp1, 20);
   __ movd(input, temp1);
   __ pshufd(input, input, static_cast<uint8_t>(0xe1));  // Order: 11 10 00 01
-  __ movdbl(double_scratch, Operand::StaticArray(
+  __ movsd(double_scratch, Operand::StaticArray(
       temp2, times_8, ExternalReference::math_exp_log_table()));
   __ por(input, double_scratch);
   __ mulsd(result, input);
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index bb32bc5..e339b3a 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -202,10 +202,7 @@
     FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
   intptr_t handler =
       reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
-  int params = descriptor->register_param_count_;
-  if (descriptor->stack_parameter_count_ != NULL) {
-    params++;
-  }
+  int params = descriptor->environment_length();
   output_frame->SetRegister(eax.code(), params);
   output_frame->SetRegister(ebx.code(), handler);
 }
@@ -250,7 +247,7 @@
     for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
       XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
       int offset = i * kDoubleSize;
-      __ movdbl(Operand(esp, offset), xmm_reg);
+      __ movsd(Operand(esp, offset), xmm_reg);
     }
   }
 
@@ -302,8 +299,8 @@
     for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
       int dst_offset = i * kDoubleSize + double_regs_offset;
       int src_offset = i * kDoubleSize;
-      __ movdbl(xmm0, Operand(esp, src_offset));
-      __ movdbl(Operand(ebx, dst_offset), xmm0);
+      __ movsd(xmm0, Operand(esp, src_offset));
+      __ movsd(Operand(ebx, dst_offset), xmm0);
     }
   }
 
@@ -388,7 +385,7 @@
     for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
       XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
       int src_offset = i * kDoubleSize + double_regs_offset;
-      __ movdbl(xmm_reg, Operand(ebx, src_offset));
+      __ movsd(xmm_reg, Operand(ebx, src_offset));
     }
   }
 
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index d284f53..725a6f8 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -3310,7 +3310,7 @@
     __ cvtss2sd(xmm1, xmm1);
     __ xorps(xmm0, xmm1);
     __ subsd(xmm0, xmm1);
-    __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
+    __ movsd(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
   } else {
     // 0x4130000000000000 is 1.0 x 2^20 as a double.
     __ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 8adc25c..4b0a7b2 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -257,7 +257,7 @@
       BitVector* doubles = chunk()->allocated_double_registers();
       BitVector::Iterator save_iterator(doubles);
       while (!save_iterator.Done()) {
-        __ movdbl(MemOperand(esp, count * kDoubleSize),
+        __ movsd(MemOperand(esp, count * kDoubleSize),
                   XMMRegister::FromAllocationIndex(save_iterator.Current()));
         save_iterator.Advance();
         count++;
@@ -2233,8 +2233,8 @@
       case Token::MOD: {
         // Pass two doubles as arguments on the stack.
         __ PrepareCallCFunction(4, eax);
-        __ movdbl(Operand(esp, 0 * kDoubleSize), left);
-        __ movdbl(Operand(esp, 1 * kDoubleSize), right);
+        __ movsd(Operand(esp, 0 * kDoubleSize), left);
+        __ movsd(Operand(esp, 1 * kDoubleSize), right);
         __ CallCFunction(
             ExternalReference::double_fp_operation(Token::MOD, isolate()),
             4);
@@ -2243,7 +2243,7 @@
         // Store it into the result register.
         __ sub(Operand(esp), Immediate(kDoubleSize));
         __ fstp_d(Operand(esp, 0));
-        __ movdbl(result, Operand(esp, 0));
+        __ movsd(result, Operand(esp, 0));
         __ add(Operand(esp), Immediate(kDoubleSize));
         break;
       }
@@ -2617,7 +2617,7 @@
   if (use_sse2) {
     CpuFeatureScope scope(masm(), SSE2);
     XMMRegister input_reg = ToDoubleRegister(instr->object());
-    __ movdbl(MemOperand(esp, 0), input_reg);
+    __ movsd(MemOperand(esp, 0), input_reg);
   } else {
     __ fstp_d(MemOperand(esp, 0));
   }
@@ -3079,7 +3079,7 @@
     BitVector::Iterator save_iterator(doubles);
     int count = 0;
     while (!save_iterator.Done()) {
-      __ movdbl(XMMRegister::FromAllocationIndex(save_iterator.Current()),
+      __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
                 MemOperand(esp, count * kDoubleSize));
       save_iterator.Advance();
       count++;
@@ -3247,7 +3247,7 @@
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope scope(masm(), SSE2);
       XMMRegister result = ToDoubleRegister(instr->result());
-      __ movdbl(result, FieldOperand(object, offset));
+      __ movsd(result, FieldOperand(object, offset));
     } else {
       X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
     }
@@ -3401,7 +3401,7 @@
   } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope scope(masm(), SSE2);
-      __ movdbl(ToDoubleRegister(instr->result()), operand);
+      __ movsd(ToDoubleRegister(instr->result()), operand);
     } else {
       X87Mov(ToX87Register(instr->result()), operand);
     }
@@ -3472,7 +3472,7 @@
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope scope(masm(), SSE2);
     XMMRegister result = ToDoubleRegister(instr->result());
-    __ movdbl(result, double_load_operand);
+    __ movsd(result, double_load_operand);
   } else {
     X87Mov(ToX87Register(instr->result()), double_load_operand);
   }
@@ -3995,7 +3995,7 @@
       ExternalReference::address_of_minus_one_half();
 
   Label done, round_to_zero, below_one_half, do_not_compensate;
-  __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
+  __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
   __ ucomisd(xmm_scratch, input_reg);
   __ j(above, &below_one_half);
 
@@ -4009,7 +4009,7 @@
   __ jmp(&done);
 
   __ bind(&below_one_half);
-  __ movdbl(xmm_scratch, Operand::StaticVariable(minus_one_half));
+  __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
   __ ucomisd(xmm_scratch, input_reg);
   __ j(below_equal, &round_to_zero);
 
@@ -4196,22 +4196,21 @@
   __ j(equal, &zero, Label::kNear);
   ExternalReference nan =
       ExternalReference::address_of_canonical_non_hole_nan();
-  __ movdbl(input_reg, Operand::StaticVariable(nan));
+  __ movsd(input_reg, Operand::StaticVariable(nan));
   __ jmp(&done, Label::kNear);
   __ bind(&zero);
-  __ push(Immediate(0xFFF00000));
-  __ push(Immediate(0));
-  __ movdbl(input_reg, Operand(esp, 0));
-  __ add(Operand(esp), Immediate(kDoubleSize));
+  ExternalReference ninf =
+      ExternalReference::address_of_negative_infinity();
+  __ movsd(input_reg, Operand::StaticVariable(ninf));
   __ jmp(&done, Label::kNear);
   __ bind(&positive);
   __ fldln2();
   __ sub(Operand(esp), Immediate(kDoubleSize));
-  __ movdbl(Operand(esp, 0), input_reg);
+  __ movsd(Operand(esp, 0), input_reg);
   __ fld_d(Operand(esp, 0));
   __ fyl2x();
   __ fstp_d(Operand(esp, 0));
-  __ movdbl(input_reg, Operand(esp, 0));
+  __ movsd(input_reg, Operand(esp, 0));
   __ add(Operand(esp), Immediate(kDoubleSize));
   __ bind(&done);
 }
@@ -4482,7 +4481,7 @@
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope scope(masm(), SSE2);
       XMMRegister value = ToDoubleRegister(instr->value());
-      __ movdbl(FieldOperand(object, offset), value);
+      __ movsd(FieldOperand(object, offset), value);
     } else {
       X87Register value = ToX87Register(instr->value());
       X87Mov(FieldOperand(object, offset), value);
@@ -4632,7 +4631,7 @@
   } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
     if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
       CpuFeatureScope scope(masm(), SSE2);
-      __ movdbl(operand, ToDoubleRegister(instr->value()));
+      __ movsd(operand, ToDoubleRegister(instr->value()));
     } else {
       X87Mov(operand, ToX87Register(instr->value()));
     }
@@ -4690,11 +4689,11 @@
       __ ucomisd(value, value);
       __ j(parity_odd, &have_value);  // NaN.
 
-      __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
+      __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
       __ bind(&have_value);
     }
 
-    __ movdbl(double_store_operand, value);
+    __ movsd(double_store_operand, value);
   } else {
     // Can't use SSE2 in the serializer
     if (instr->hydrogen()->IsConstantHoleStore()) {
@@ -5160,7 +5159,7 @@
   __ bind(&done);
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope feature_scope(masm(), SSE2);
-    __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
+    __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
   } else {
     __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
   }
@@ -5204,7 +5203,7 @@
   if (use_sse2) {
     CpuFeatureScope scope(masm(), SSE2);
     XMMRegister input_reg = ToDoubleRegister(instr->value());
-    __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+    __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
   } else {
     __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
   }
@@ -5347,7 +5346,7 @@
     }
 
     // Heap number to XMM conversion.
-    __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+    __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
 
     if (deoptimize_on_minus_zero) {
       XMMRegister xmm_scratch = double_scratch0();
@@ -5369,7 +5368,7 @@
 
       ExternalReference nan =
           ExternalReference::address_of_canonical_non_hole_nan();
-      __ movdbl(result_reg, Operand::StaticVariable(nan));
+      __ movsd(result_reg, Operand::StaticVariable(nan));
       __ jmp(&done, Label::kNear);
     }
   } else {
@@ -5758,7 +5757,7 @@
 
   // Heap number
   __ bind(&heap_number);
-  __ movdbl(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+  __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
   __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
   __ jmp(&done, Label::kNear);
 
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index b5bc18b..2b2126a 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -326,7 +326,7 @@
         } else {
           __ push(Immediate(upper));
           __ push(Immediate(lower));
-          __ movdbl(dst, Operand(esp, 0));
+          __ movsd(dst, Operand(esp, 0));
           __ add(esp, Immediate(kDoubleSize));
         }
       } else {
@@ -360,7 +360,7 @@
       } else {
         ASSERT(destination->IsDoubleStackSlot());
         Operand dst = cgen_->ToOperand(destination);
-        __ movdbl(dst, src);
+        __ movsd(dst, src);
       }
     } else {
       // load from the register onto the stack, store in destination, which must
@@ -378,12 +378,12 @@
       Operand src = cgen_->ToOperand(source);
       if (destination->IsDoubleRegister()) {
         XMMRegister dst = cgen_->ToDoubleRegister(destination);
-        __ movdbl(dst, src);
+        __ movsd(dst, src);
       } else {
         // We rely on having xmm0 available as a fixed scratch register.
         Operand dst = cgen_->ToOperand(destination);
-        __ movdbl(xmm0, src);
-        __ movdbl(dst, xmm0);
+        __ movsd(xmm0, src);
+        __ movsd(dst, xmm0);
       }
     } else {
       // load from the stack slot on top of the floating point stack, and then
@@ -486,9 +486,9 @@
                                               : destination);
     Operand other =
         cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
-    __ movdbl(xmm0, other);
-    __ movdbl(other, reg);
-    __ movdbl(reg, Operand(xmm0));
+    __ movsd(xmm0, other);
+    __ movsd(other, reg);
+    __ movsd(reg, Operand(xmm0));
   } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
     CpuFeatureScope scope(cgen_->masm(), SSE2);
     // Double-width memory-to-memory.  Spill on demand to use a general
@@ -499,12 +499,12 @@
     Operand src1 = cgen_->HighOperand(source);
     Operand dst0 = cgen_->ToOperand(destination);
     Operand dst1 = cgen_->HighOperand(destination);
-    __ movdbl(xmm0, dst0);  // Save destination in xmm0.
+    __ movsd(xmm0, dst0);  // Save destination in xmm0.
     __ mov(tmp, src0);  // Then use tmp to copy source to destination.
     __ mov(dst0, tmp);
     __ mov(tmp, src1);
     __ mov(dst1, tmp);
-    __ movdbl(src0, xmm0);
+    __ movsd(src0, xmm0);
 
   } else {
     // No other combinations are possible.
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 9dfc0e2..805861e 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -232,7 +232,7 @@
   j(not_equal, &done, Label::kNear);
 
   sub(esp, Immediate(kDoubleSize));
-  movdbl(MemOperand(esp, 0), input_reg);
+  movsd(MemOperand(esp, 0), input_reg);
   SlowTruncateToI(result_reg, esp, 0);
   add(esp, Immediate(kDoubleSize));
   bind(&done);
@@ -344,7 +344,7 @@
     }
   } else if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope scope(this, SSE2);
-    movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+    movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
     cvttsd2si(result_reg, Operand(xmm0));
     cmp(result_reg, 0x80000000u);
     j(not_equal, &done, Label::kNear);
@@ -361,7 +361,7 @@
     if (input_reg.is(result_reg)) {
       // Input is clobbered. Restore number from double scratch.
       sub(esp, Immediate(kDoubleSize));
-      movdbl(MemOperand(esp, 0), xmm0);
+      movsd(MemOperand(esp, 0), xmm0);
       SlowTruncateToI(result_reg, esp, 0);
       add(esp, Immediate(kDoubleSize));
     } else {
@@ -390,7 +390,7 @@
     ASSERT(!temp.is(no_xmm_reg));
     CpuFeatureScope scope(this, SSE2);
 
-    movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+    movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
     cvttsd2si(result_reg, Operand(xmm0));
     Cvtsi2sd(temp, Operand(result_reg));
     ucomisd(xmm0, temp);
@@ -452,7 +452,7 @@
   cmp(src, Immediate(0));
   ExternalReference uint32_bias =
         ExternalReference::address_of_uint32_bias();
-  movdbl(scratch, Operand::StaticVariable(uint32_bias));
+  movsd(scratch, Operand::StaticVariable(uint32_bias));
   Cvtsi2sd(dst, src);
   j(not_sign, &done, Label::kNear);
   addsd(dst, scratch);
@@ -816,9 +816,9 @@
       ExternalReference::address_of_canonical_non_hole_nan();
   if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
     CpuFeatureScope use_sse2(this, SSE2);
-    movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+    movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
     bind(&have_double_value);
-    movdbl(FieldOperand(elements, key, times_4,
+    movsd(FieldOperand(elements, key, times_4,
                         FixedDoubleArray::kHeaderSize - elements_offset),
            scratch2);
   } else {
@@ -838,7 +838,7 @@
   bind(&is_nan);
   if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
     CpuFeatureScope use_sse2(this, SSE2);
-    movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference));
+    movsd(scratch2, Operand::StaticVariable(canonical_nan_reference));
   } else {
     fld_d(Operand::StaticVariable(canonical_nan_reference));
   }
@@ -852,7 +852,7 @@
   if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
     CpuFeatureScope fscope(this, SSE2);
     Cvtsi2sd(scratch2, scratch1);
-    movdbl(FieldOperand(elements, key, times_4,
+    movsd(FieldOperand(elements, key, times_4,
                         FixedDoubleArray::kHeaderSize - elements_offset),
            scratch2);
   } else {
@@ -1068,7 +1068,7 @@
     const int offset = -2 * kPointerSize;
     for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
       XMMRegister reg = XMMRegister::from_code(i);
-      movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
+      movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
     }
   } else {
     sub(esp, Immediate(argc * kPointerSize));
@@ -1112,7 +1112,7 @@
     const int offset = -2 * kPointerSize;
     for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
       XMMRegister reg = XMMRegister::from_code(i);
-      movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
+      movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
     }
   }
 
@@ -3070,7 +3070,7 @@
   JumpIfSmi(probe, not_found);
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope fscope(this, SSE2);
-    movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+    movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
     ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
   } else {
     fld_d(FieldOperand(object, HeapNumber::kValueOffset));
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 80fbc78..9786cff 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -879,7 +879,7 @@
                 miss_label, DONT_DO_SMI_CHECK);
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope use_sse2(masm, SSE2);
-      __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+      __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
     } else {
       __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
     }
@@ -887,7 +887,7 @@
     __ bind(&do_store);
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope use_sse2(masm, SSE2);
-      __ movdbl(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
+      __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
     } else {
       __ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset));
     }
@@ -1057,14 +1057,14 @@
                 miss_label, DONT_DO_SMI_CHECK);
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope use_sse2(masm, SSE2);
-      __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+      __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
     } else {
       __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
     }
     __ bind(&do_store);
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope use_sse2(masm, SSE2);
-      __ movdbl(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
+      __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
     } else {
       __ fstp_d(FieldOperand(scratch1, HeapNumber::kValueOffset));
     }
@@ -2397,7 +2397,7 @@
   // Check if the argument is a heap number and load its value into xmm0.
   Label slow;
   __ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
-  __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+  __ movsd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
 
   // Check if the argument is strictly positive. Note this also
   // discards NaN.
@@ -2447,7 +2447,7 @@
 
   // Return a new heap number.
   __ AllocateHeapNumber(eax, ebx, edx, &slow);
-  __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+  __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
   __ ret(2 * kPointerSize);
 
   // Return the argument (when it's an already round heap number).
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 3f59932..af5f87d 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -181,7 +181,7 @@
   descriptor->register_param_count_ = 2;
   if (constant_stack_parameter_count != 0) {
     // stack param count needs (constructor pointer, and single argument)
-    descriptor->stack_parameter_count_ = &a0;
+    descriptor->stack_parameter_count_ = a0;
   }
   descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
   descriptor->register_params_ = registers;
@@ -203,7 +203,7 @@
 
   if (constant_stack_parameter_count != 0) {
     // Stack param count needs (constructor pointer, and single argument).
-    descriptor->stack_parameter_count_ = &a0;
+    descriptor->stack_parameter_count_ = a0;
   }
   descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
   descriptor->register_params_ = registers;
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 4426d90..d31990b 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -104,10 +104,7 @@
   ApiFunction function(descriptor->deoptimization_handler_);
   ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
   intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
-  int params = descriptor->register_param_count_;
-  if (descriptor->stack_parameter_count_ != NULL) {
-    params++;
-  }
+  int params = descriptor->environment_length();
   output_frame->SetRegister(s0.code(), params);
   output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize);
   output_frame->SetRegister(s2.code(), handler);
diff --git a/src/objects.cc b/src/objects.cc
index 2f84975..743009f 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -6557,7 +6557,7 @@
   uint32_t index = 0;
   if (name->AsArrayIndex(&index)) {
     for (Handle<Object> obj = object;
-         *obj != isolate->heap()->null_value();
+         !obj->IsNull();
          obj = handle(JSReceiver::cast(*obj)->GetPrototype(), isolate)) {
       if (obj->IsJSObject() && JSObject::cast(*obj)->HasDictionaryElements()) {
         JSObject* js_object = JSObject::cast(*obj);
@@ -6575,7 +6575,7 @@
     }
   } else {
     for (Handle<Object> obj = object;
-         *obj != isolate->heap()->null_value();
+         !obj->IsNull();
          obj = handle(JSReceiver::cast(*obj)->GetPrototype(), isolate)) {
       LookupResult result(isolate);
       JSReceiver::cast(*obj)->LocalLookup(*name, &result);
@@ -13177,52 +13177,62 @@
 }
 
 
-bool JSObject::HasRealNamedProperty(Isolate* isolate, Name* key) {
+bool JSObject::HasRealNamedProperty(Handle<JSObject> object,
+                                    Handle<Name> key) {
+  Isolate* isolate = object->GetIsolate();
+  SealHandleScope shs(isolate);
   // Check access rights if needed.
-  if (IsAccessCheckNeeded()) {
-    if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
-      isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+  if (object->IsAccessCheckNeeded()) {
+    if (!isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS)) {
+      isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
       return false;
     }
   }
 
   LookupResult result(isolate);
-  LocalLookupRealNamedProperty(key, &result);
+  object->LocalLookupRealNamedProperty(*key, &result);
   return result.IsFound() && !result.IsInterceptor();
 }
 
 
-bool JSObject::HasRealElementProperty(Isolate* isolate, uint32_t index) {
+bool JSObject::HasRealElementProperty(Handle<JSObject> object, uint32_t index) {
+  Isolate* isolate = object->GetIsolate();
+  SealHandleScope shs(isolate);
   // Check access rights if needed.
-  if (IsAccessCheckNeeded()) {
-    if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
-      isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+  if (object->IsAccessCheckNeeded()) {
+    if (!isolate->MayIndexedAccess(*object, index, v8::ACCESS_HAS)) {
+      isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
       return false;
     }
   }
 
-  if (IsJSGlobalProxy()) {
-    Object* proto = GetPrototype();
+  if (object->IsJSGlobalProxy()) {
+    HandleScope scope(isolate);
+    Handle<Object> proto(object->GetPrototype(), isolate);
     if (proto->IsNull()) return false;
     ASSERT(proto->IsJSGlobalObject());
-    return JSObject::cast(proto)->HasRealElementProperty(isolate, index);
+    return HasRealElementProperty(Handle<JSObject>::cast(proto), index);
   }
 
-  return GetElementAttributeWithoutInterceptor(this, index, false) != ABSENT;
+  return object->GetElementAttributeWithoutInterceptor(
+             *object, index, false) != ABSENT;
 }
 
 
-bool JSObject::HasRealNamedCallbackProperty(Isolate* isolate, Name* key) {
+bool JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
+                                            Handle<Name> key) {
+  Isolate* isolate = object->GetIsolate();
+  SealHandleScope shs(isolate);
   // Check access rights if needed.
-  if (IsAccessCheckNeeded()) {
-    if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
-      isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+  if (object->IsAccessCheckNeeded()) {
+    if (!isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS)) {
+      isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
       return false;
     }
   }
 
   LookupResult result(isolate);
-  LocalLookupRealNamedProperty(key, &result);
+  object->LocalLookupRealNamedProperty(*key, &result);
   return result.IsPropertyCallbacks();
 }
 
diff --git a/src/objects.h b/src/objects.h
index 30c3f63..8db2cee 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -2414,9 +2414,11 @@
   inline bool HasIndexedInterceptor();
 
   // Support functions for v8 api (needed for correct interceptor behavior).
-  bool HasRealNamedProperty(Isolate* isolate, Name* key);
-  bool HasRealElementProperty(Isolate* isolate, uint32_t index);
-  bool HasRealNamedCallbackProperty(Isolate* isolate, Name* key);
+  static bool HasRealNamedProperty(Handle<JSObject> object,
+                                   Handle<Name> key);
+  static bool HasRealElementProperty(Handle<JSObject> object, uint32_t index);
+  static bool HasRealNamedCallbackProperty(Handle<JSObject> object,
+                                           Handle<Name> key);
 
   // Get the header size for a JSObject.  Used to compute the index of
   // internal fields as well as the number of internal fields.
diff --git a/src/runtime.cc b/src/runtime.cc
index 0b39a43..b25547b 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -5582,40 +5582,39 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
-  SealHandleScope shs(isolate);
+  HandleScope scope(isolate);
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(Name, key, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
+  Handle<Object> object = args.at<Object>(0);
 
   uint32_t index;
   const bool key_is_array_index = key->AsArrayIndex(&index);
 
-  Object* obj = args[0];
   // Only JS objects can have properties.
-  if (obj->IsJSObject()) {
-    JSObject* object = JSObject::cast(obj);
+  if (object->IsJSObject()) {
+    Handle<JSObject> js_obj = Handle<JSObject>::cast(object);
     // Fast case: either the key is a real named property or it is not
     // an array index and there are no interceptors or hidden
     // prototypes.
-    if (object->HasRealNamedProperty(isolate, key)) {
+    if (JSObject::HasRealNamedProperty(js_obj, key)) {
       ASSERT(!isolate->has_scheduled_exception());
       return isolate->heap()->true_value();
     } else {
       RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     }
-    Map* map = object->map();
+    Map* map = js_obj->map();
     if (!key_is_array_index &&
         !map->has_named_interceptor() &&
         !HeapObject::cast(map->prototype())->map()->is_hidden_prototype()) {
       return isolate->heap()->false_value();
     }
     // Slow case.
-    HandleScope scope(isolate);
     return HasLocalPropertyImplementation(isolate,
-                                          Handle<JSObject>(object),
+                                          Handle<JSObject>(js_obj),
                                           Handle<Name>(key));
-  } else if (obj->IsString() && key_is_array_index) {
+  } else if (object->IsString() && key_is_array_index) {
     // Well, there is one exception:  Handle [] on strings.
-    String* string = String::cast(obj);
+    Handle<String> string = Handle<String>::cast(object);
     if (index < static_cast<uint32_t>(string->length())) {
       return isolate->heap()->true_value();
     }
diff --git a/src/version.cc b/src/version.cc
index 42efb3b..9085561 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     22
-#define BUILD_NUMBER      15
+#define BUILD_NUMBER      16
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 7e6b63c..a81c780 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -179,7 +179,7 @@
   descriptor->register_param_count_ = 2;
   if (constant_stack_parameter_count != 0) {
     // stack param count needs (constructor pointer, and single argument)
-    descriptor->stack_parameter_count_ = &rax;
+    descriptor->stack_parameter_count_ = rax;
   }
   descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
   descriptor->register_params_ = registers;
@@ -201,7 +201,7 @@
 
   if (constant_stack_parameter_count != 0) {
     // stack param count needs (constructor pointer, and single argument)
-    descriptor->stack_parameter_count_ = &rax;
+    descriptor->stack_parameter_count_ = rax;
   }
   descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
   descriptor->register_params_ = registers;
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index a5e4583..bf11e08 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -106,10 +106,7 @@
     FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
   intptr_t handler =
       reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
-  int params = descriptor->register_param_count_;
-  if (descriptor->stack_parameter_count_ != NULL) {
-    params++;
-  }
+  int params = descriptor->environment_length();
   output_frame->SetRegister(rax.code(), params);
   output_frame->SetRegister(rbx.code(), handler);
 }
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 6fe64c5..908d0ac 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -3673,10 +3673,35 @@
 
 
 void LCodeGen::DoMathLog(LMathLog* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-  TranscendentalCacheStub stub(TranscendentalCache::LOG,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+  ASSERT(instr->value()->Equals(instr->result()));
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  XMMRegister xmm_scratch = double_scratch0();
+  Label positive, done, zero;
+  __ xorps(xmm_scratch, xmm_scratch);
+  __ ucomisd(input_reg, xmm_scratch);
+  __ j(above, &positive, Label::kNear);
+  __ j(equal, &zero, Label::kNear);
+  ExternalReference nan =
+      ExternalReference::address_of_canonical_non_hole_nan();
+  Operand nan_operand = masm()->ExternalOperand(nan);
+  __ movsd(input_reg, nan_operand);
+  __ jmp(&done, Label::kNear);
+  __ bind(&zero);
+  ExternalReference ninf =
+      ExternalReference::address_of_negative_infinity();
+  Operand ninf_operand = masm()->ExternalOperand(ninf);
+  __ movsd(input_reg, ninf_operand);
+  __ jmp(&done, Label::kNear);
+  __ bind(&positive);
+  __ fldln2();
+  __ subq(rsp, Immediate(kDoubleSize));
+  __ movsd(Operand(rsp, 0), input_reg);
+  __ fld_d(Operand(rsp, 0));
+  __ fyl2x();
+  __ fstp_d(Operand(rsp, 0));
+  __ movsd(input_reg, Operand(rsp, 0));
+  __ addq(rsp, Immediate(kDoubleSize));
+  __ bind(&done);
 }
 
 
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 9fb8307..6e35237 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1216,9 +1216,11 @@
 
 
 LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
-  LOperand* input = UseFixedDouble(instr->value(), xmm1);
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->value()->representation().IsDouble());
+  LOperand* input = UseRegisterAtStart(instr->value());
   LMathLog* result = new(zone()) LMathLog(input);
-  return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+  return DefineSameAsFirst(result);
 }
 
 
diff --git a/test/cctest/test-assembler-ia32.cc b/test/cctest/test-assembler-ia32.cc
index fe6e0e9..d401568 100644
--- a/test/cctest/test-assembler-ia32.cc
+++ b/test/cctest/test-assembler-ia32.cc
@@ -264,15 +264,15 @@
   Assembler assm(isolate, buffer, sizeof buffer);
 
   CpuFeatureScope fscope(&assm, SSE2);
-  __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
-  __ movdbl(xmm1, Operand(esp, 3 * kPointerSize));
+  __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
+  __ movsd(xmm1, Operand(esp, 3 * kPointerSize));
   __ addsd(xmm0, xmm1);
   __ mulsd(xmm0, xmm1);
   __ subsd(xmm0, xmm1);
   __ divsd(xmm0, xmm1);
   // Copy xmm0 to st(0) using eight bytes of stack.
   __ sub(esp, Immediate(8));
-  __ movdbl(Operand(esp, 0), xmm0);
+  __ movsd(Operand(esp, 0), xmm0);
   __ fld_d(Operand(esp, 0));
   __ add(esp, Immediate(8));
   __ ret(0);
@@ -313,7 +313,7 @@
   __ cvtsi2sd(xmm0, eax);
   // Copy xmm0 to st(0) using eight bytes of stack.
   __ sub(esp, Immediate(8));
-  __ movdbl(Operand(esp, 0), xmm0);
+  __ movsd(Operand(esp, 0), xmm0);
   __ fld_d(Operand(esp, 0));
   __ add(esp, Immediate(8));
   __ ret(0);
@@ -575,7 +575,7 @@
   MacroAssembler assm(isolate, buffer, sizeof buffer);
   { CpuFeatureScope fscope2(&assm, SSE2);
     CpuFeatureScope fscope41(&assm, SSE4_1);
-    __ movdbl(xmm1, Operand(esp, 4));
+    __ movsd(xmm1, Operand(esp, 4));
     __ extractps(eax, xmm1, 0x1);
     __ ret(0);
   }
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index 0b27a87..8b9da49 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -358,8 +358,8 @@
       __ mulsd(xmm1, xmm0);
       __ subsd(xmm1, xmm0);
       __ divsd(xmm1, xmm0);
-      __ movdbl(xmm1, Operand(ebx, ecx, times_4, 10000));
-      __ movdbl(Operand(ebx, ecx, times_4, 10000), xmm1);
+      __ movsd(xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ movsd(Operand(ebx, ecx, times_4, 10000), xmm1);
       __ ucomisd(xmm0, xmm1);
 
       // 128 bit move instructions.
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index 6b45296..809372d 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -2007,17 +2007,68 @@
 }
 
 
+
+class HeapProfilerExtension : public v8::Extension {
+ public:
+  static const char* kName;
+  HeapProfilerExtension() : v8::Extension(kName, kSource) { }
+  virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+      v8::Handle<v8::String> name);
+  static void FindUntrackedObjects(
+      const v8::FunctionCallbackInfo<v8::Value>& args);
+ private:
+  static const char* kSource;
+};
+
+const char* HeapProfilerExtension::kName = "v8/heap-profiler";
+
+
+const char* HeapProfilerExtension::kSource =
+    "native function findUntrackedObjects();";
+
+
+v8::Handle<v8::FunctionTemplate> HeapProfilerExtension::GetNativeFunction(
+    v8::Handle<v8::String> name) {
+  if (name->Equals(v8::String::New("findUntrackedObjects"))) {
+    return v8::FunctionTemplate::New(
+        HeapProfilerExtension::FindUntrackedObjects);
+  } else {
+    CHECK(false);
+    return v8::Handle<v8::FunctionTemplate>();
+  }
+}
+
+
+void HeapProfilerExtension::FindUntrackedObjects(
+    const v8::FunctionCallbackInfo<v8::Value>& args) {
+  i::HeapProfiler* heap_profiler =
+      reinterpret_cast<i::HeapProfiler*>(args.GetIsolate()->GetHeapProfiler());
+  int untracked_objects = heap_profiler->FindUntrackedObjects();
+  args.GetReturnValue().Set(untracked_objects);
+  CHECK_EQ(0, untracked_objects);
+}
+
+
+static HeapProfilerExtension kHeapProfilerExtension;
+v8::DeclareExtension kHeapProfilerExtensionDeclaration(
+    &kHeapProfilerExtension);
+
+
 // This is an example of using checking of JS allocations tracking in a test.
 TEST(HeapObjectsTracker) {
-  LocalContext env;
+  const char* extensions[] = { HeapProfilerExtension::kName };
+  v8::ExtensionConfiguration config(1, extensions);
+  LocalContext env(&config);
   v8::HandleScope scope(env->GetIsolate());
   HeapObjectsTracker tracker;
   CompileRun("var a = 1.2");
   CompileRun("var a = 1.2; var b = 1.0; var c = 1.0;");
   CompileRun(
-    "var a = [];"
-    "for (var i = 0; i < 5; ++i)"
+    "var a = [];\n"
+    "for (var i = 0; i < 5; ++i)\n"
     "    a[i] = i;\n"
-    "for (var i = 0; i < 3; ++i)"
-    "    a.shift();\n");
+    "findUntrackedObjects();\n"
+    "for (var i = 0; i < 3; ++i)\n"
+    "    a.shift();\n"
+    "findUntrackedObjects();\n");
 }
diff --git a/test/mjsunit/compiler/load-elimination-global.js b/test/mjsunit/compiler/load-elimination-global.js
new file mode 100644
index 0000000..9caaa9f
--- /dev/null
+++ b/test/mjsunit/compiler/load-elimination-global.js
@@ -0,0 +1,196 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --load-elimination
+
+// Test global load elimination of redundant loads and stores.
+
+var X = true;  // For forcing branches.
+X = false;
+X = true;
+X = false;
+
+function B(x, y) {
+  this.x = x;
+  this.y = y;
+  return this;
+}
+
+function test_load() {
+  var a = new B(1, 2);
+  var f = a.x + a.x;
+  if (false) ;
+  return f + a.x + a.x;
+}
+
+function test_load2() {
+  var a = new B(1, 2);
+  var f = a.x + a.x;
+  if (true) ;
+  return f + a.x + a.x;
+}
+
+function test_store_load() {
+  var a = new B(1, 2);
+  a.x = 4;
+  var b = X ? a.x : a.x;
+  return b + a.x;
+}
+
+function test_store_load2() {
+  var a = new B(1, 2);
+  var c = 6;
+  if (X) a.x = c;
+  else a.x = c;
+  return a.x + a.x;
+}
+
+function test_nonaliasing_store1() {
+  var a = new B(2, 3), b = new B(3, 4);
+  if (X) ;
+  b.x = 4;
+  if (X) ;
+  var f = a.x;
+  if (X) ;
+  b.x = 5;
+  if (X) ;
+  var g = a.x;
+  if (X) ;
+  b.x = 6;
+  if (X) ;
+  var h = a.x;
+  if (X) ;
+  b.x = 7;
+  if (X) ;
+  return f + g + h + a.x;
+}
+
+function test_loop(x) {
+  var a = new B(2, 3);
+  var v = a.x;
+  var total = v;
+  var i = 0;
+  while (i++ < 10) {
+    total = a.x;
+    a.y = 4;
+  }
+  return total;
+}
+
+function test_loop2(x) {
+  var a = new B(2, 3);
+  var v = a.x;
+  var total = v;
+  var i = 0;
+  while (i++ < 10) {
+    total = a.x;  // a.x not affected by loop
+    a.y = 4;
+
+    var j = 0;
+    while (j++ < 10) {
+      total = a.x;  // a.x not affected by loop
+      a.y = 5;
+    }
+
+    total = a.x;
+    a.y = 6;
+
+    j = 0;
+    while (j++ < 10) {
+      total = a.x;  // a.x not affected by loop
+      a.y = 7;
+    }
+  }
+  return total;
+}
+
+function killall() {
+  try { } catch(e) { }
+}
+
+%NeverOptimizeFunction(killall);
+
+function test_store_load_kill() {
+  var a = new B(1, 2);
+  if (X) ;
+  a.x = 4;
+  if (X) ;
+  var f = a.x;
+  if (X) ;
+  a.x = 5;
+  if (X) ;
+  var g = a.x;
+  if (X) ;
+  killall();
+  if (X) ;
+  a.x = 6;
+  if (X) ;
+  var h = a.x;
+  if (X) ;
+  a.x = 7;
+  if (X) ;
+  return f + g + h + a.x;
+}
+
+function test_store_store() {
+  var a = new B(6, 7);
+  if (X) ;
+  a.x = 7;
+  if (X) ;
+  a.x = 7;
+  if (X) ;
+  a.x = 7;
+  if (X) ;
+  a.x = 7;
+  if (X) ;
+  return a.x;
+}
+
+function test(x, f) {
+  X = true;
+  assertEquals(x, f());
+  assertEquals(x, f());
+  X = false;
+  assertEquals(x, f());
+  assertEquals(x, f());
+  X = true;
+  %OptimizeFunctionOnNextCall(f);
+  assertEquals(x, f());
+  assertEquals(x, f());
+  X = false;
+  assertEquals(x, f());
+  assertEquals(x, f());
+}
+
+test(4, test_load);
+test(8, test_store_load);
+test(12, test_store_load2);
+test(8, test_nonaliasing_store1);
+test(22, test_store_load_kill);
+test(7, test_store_store);
+test(2, test_loop);
+test(2, test_loop2);
diff --git a/test/mjsunit/compiler/load-elimination-osr.js b/test/mjsunit/compiler/load-elimination-osr.js
new file mode 100644
index 0000000..a57fe17
--- /dev/null
+++ b/test/mjsunit/compiler/load-elimination-osr.js
@@ -0,0 +1,65 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --load-elimination
+
+// Test global load elimination in the presence of OSR compilation.
+
+function It(x, y) { }
+
+function foo_osr(x, limit) {
+  var o = new It();
+  o.x = x;
+  o.y = x;
+  for (var i = 0; i < limit; i++) {
+    o.y += o.x;  // Load of x cannot be hoisted due to OSR.
+  }
+
+  return o.y;
+}
+
+assertEquals(22, foo_osr(11, 1));
+assertEquals(24, foo_osr(12, 1));
+assertEquals(1300013, foo_osr(13, 100000));
+
+
+function foo_hot(x, limit) {
+  var o = new It();
+  o.x = x;
+  o.y = x;
+  for (var i = 0; i < limit; i++) {
+    o.y += o.x;  // Load of x can be hoisted without OSR.
+  }
+
+  return o.y;
+}
+
+assertEquals(22, foo_hot(11, 1));
+assertEquals(24, foo_hot(12, 1));
+%OptimizeFunctionOnNextCall(foo_hot);
+assertEquals(32, foo_hot(16, 1));
+assertEquals(1300013, foo_hot(13, 100000));