Version 3.22.23

Renamed deprecated __attribute__((no_address_safety_analysis)) to __attribute__((no_sanitize_address)) (Chromium issue 311283)

Defined DEBUG for v8_optimized_debug=2

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@17420 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 7076999..a900142 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,13 @@
+2013-10-28: Version 3.22.23
+
+        Renamed deprecated __attribute__((no_address_safety_analysis)) to
+        __attribute__((no_sanitize_address)) (Chromium issue 311283)
+
+        Defined DEBUG for v8_optimized_debug=2
+
+        Performance and stability improvements on all platforms.
+
+
 2013-10-25: Version 3.22.22
 
         Record allocation stack traces. (Chromium issue 277984,v8:2949)
diff --git a/benchmarks/deltablue.js b/benchmarks/deltablue.js
index 548fd96..dacee3f 100644
--- a/benchmarks/deltablue.js
+++ b/benchmarks/deltablue.js
@@ -121,23 +121,23 @@
 
 Strength.prototype.nextWeaker = function () {
   switch (this.strengthValue) {
-    case 0: return Strength.WEAKEST;
-    case 1: return Strength.WEAK_DEFAULT;
-    case 2: return Strength.NORMAL;
-    case 3: return Strength.STRONG_DEFAULT;
-    case 4: return Strength.PREFERRED;
-    case 5: return Strength.REQUIRED;
+    case 0: return Strength.STRONG_PREFERRED;
+    case 1: return Strength.PREFERRED;
+    case 2: return Strength.STRONG_DEFAULT;
+    case 3: return Strength.NORMAL;
+    case 4: return Strength.WEAK_DEFAULT;
+    case 5: return Strength.WEAKEST;
   }
 }
 
 // Strength constants.
-Strength.REQUIRED        = new Strength(0, "required");
-Strength.STONG_PREFERRED = new Strength(1, "strongPreferred");
-Strength.PREFERRED       = new Strength(2, "preferred");
-Strength.STRONG_DEFAULT  = new Strength(3, "strongDefault");
-Strength.NORMAL          = new Strength(4, "normal");
-Strength.WEAK_DEFAULT    = new Strength(5, "weakDefault");
-Strength.WEAKEST         = new Strength(6, "weakest");
+Strength.REQUIRED         = new Strength(0, "required");
+Strength.STRONG_PREFERRED = new Strength(1, "strongPreferred");
+Strength.PREFERRED        = new Strength(2, "preferred");
+Strength.STRONG_DEFAULT   = new Strength(3, "strongDefault");
+Strength.NORMAL           = new Strength(4, "normal");
+Strength.WEAK_DEFAULT     = new Strength(5, "weakDefault");
+Strength.WEAKEST          = new Strength(6, "weakest");
 
 /* --- *
  * C o n s t r a i n t
diff --git a/build/toolchain.gypi b/build/toolchain.gypi
index e1903f3..de41fe0 100644
--- a/build/toolchain.gypi
+++ b/build/toolchain.gypi
@@ -436,6 +436,7 @@
           'V8_ENABLE_CHECKS',
           'OBJECT_PRINT',
           'VERIFY_HEAP',
+          'DEBUG'
         ],
         'msvs_settings': {
           'VCCLCompilerTool': {
@@ -503,15 +504,6 @@
           },
         },
         'conditions': [
-          ['v8_optimized_debug==2', {
-            'defines': [
-              'NDEBUG',
-            ],
-          }, {
-            'defines': [
-              'DEBUG',
-            ],
-          }],
           ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
             'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
                         '-Wnon-virtual-dtor', '-Woverloaded-virtual',
@@ -553,6 +545,9 @@
                   '-fdata-sections',
                   '-ffunction-sections',
                 ],
+                'defines': [
+                  'OPTIMIZED_DEBUG'
+                ],
                 'conditions': [
                   # TODO(crbug.com/272548): Avoid -O3 in NaCl
                   ['nacl_target_arch=="none"', {
diff --git a/src/api.cc b/src/api.cc
index 0bb374f..8a73877 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -574,7 +574,8 @@
   int max_executable_size = constraints->max_executable_size();
   if (young_space_size != 0 || old_gen_size != 0 || max_executable_size != 0) {
     // After initialization it's too late to change Heap constraints.
-    ASSERT(!isolate->IsInitialized());
+    // TODO(rmcilroy): fix this assert.
+    // ASSERT(!isolate->IsInitialized());
     bool result = isolate->heap()->ConfigureHeap(young_space_size / 2,
                                                  old_gen_size,
                                                  max_executable_size);
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 079bccb..c57c785 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -148,13 +148,10 @@
   // receiver object). r5 is zero for method calls and non-zero for
   // function calls.
   if (!info->is_classic_mode() || info->is_native()) {
-    Label ok;
     __ cmp(r5, Operand::Zero());
-    __ b(eq, &ok);
     int receiver_offset = info->scope()->num_parameters() * kPointerSize;
     __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
-    __ str(r2, MemOperand(sp, receiver_offset));
-    __ bind(&ok);
+    __ str(r2, MemOperand(sp, receiver_offset), ne);
   }
 
   // Open a frame scope to indicate that there is a frame on the stack.  The
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index a73d6a2..e7ec59c 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -121,13 +121,10 @@
     // receiver object). r5 is zero for method calls and non-zero for
     // function calls.
     if (!info_->is_classic_mode() || info_->is_native()) {
-      Label ok;
       __ cmp(r5, Operand::Zero());
-      __ b(eq, &ok);
       int receiver_offset = scope()->num_parameters() * kPointerSize;
       __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
-      __ str(r2, MemOperand(sp, receiver_offset));
-      __ bind(&ok);
+      __ str(r2, MemOperand(sp, receiver_offset), ne);
     }
   }
 
diff --git a/src/array.js b/src/array.js
index 2649798..e98d7f5 100644
--- a/src/array.js
+++ b/src/array.js
@@ -430,7 +430,7 @@
 
   n--;
   var value = this[n];
-  delete this[n];
+  Delete(this, ToName(n), true);
   this.length = n;
   return value;
 }
diff --git a/src/ast.cc b/src/ast.cc
index 481414e..843f8c8 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -627,7 +627,7 @@
       holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate());
       receiver_types_.Add(handle(holder_->map()), oracle->zone());
     }
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
     if (FLAG_enable_slow_asserts) {
       int length = receiver_types_.length();
       for (int i = 0; i < length; i++) {
diff --git a/src/checks.cc b/src/checks.cc
index d2a107c..e08cd7c 100644
--- a/src/checks.cc
+++ b/src/checks.cc
@@ -129,8 +129,6 @@
 
 namespace v8 { namespace internal {
 
-  bool EnableSlowAsserts() { return FLAG_enable_slow_asserts; }
-
   intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
 
 } }  // namespace v8::internal
diff --git a/src/checks.h b/src/checks.h
index f5c5f23..9d2db28 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -272,7 +272,24 @@
 #endif
 
 
+#ifdef DEBUG
+#ifndef OPTIMIZED_DEBUG
+#define ENABLE_SLOW_ASSERTS    1
+#endif
+#endif
+
+namespace v8 {
+namespace internal {
+#ifdef ENABLE_SLOW_ASSERTS
+#define SLOW_ASSERT(condition) \
+  CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition))
 extern bool FLAG_enable_slow_asserts;
+#else
+#define SLOW_ASSERT(condition) ((void) 0)
+const bool FLAG_enable_slow_asserts = false;
+#endif
+}  // namespace internal
+}  // namespace v8
 
 
 // The ASSERT macro is equivalent to CHECK except that it only
@@ -285,7 +302,6 @@
 #define ASSERT_GE(v1, v2)      CHECK_GE(v1, v2)
 #define ASSERT_LT(v1, v2)      CHECK_LT(v1, v2)
 #define ASSERT_LE(v1, v2)      CHECK_LE(v1, v2)
-#define SLOW_ASSERT(condition) CHECK(!FLAG_enable_slow_asserts || (condition))
 #else
 #define ASSERT_RESULT(expr)    (expr)
 #define ASSERT(condition)      ((void) 0)
@@ -294,7 +310,6 @@
 #define ASSERT_GE(v1, v2)      ((void) 0)
 #define ASSERT_LT(v1, v2)      ((void) 0)
 #define ASSERT_LE(v1, v2)      ((void) 0)
-#define SLOW_ASSERT(condition) ((void) 0)
 #endif
 // Static asserts has no impact on runtime performance, so they can be
 // safely enabled in release mode. Moreover, the ((void) 0) expression
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 88af466..dfa5ecd 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -371,7 +371,8 @@
                                                     undefined);
   checker.Then();
 
-  HObjectAccess access = HObjectAccess::ForAllocationSiteTransitionInfo();
+  HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
+      AllocationSite::kTransitionInfoOffset);
   HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
   HValue* push_value;
   if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
@@ -440,7 +441,8 @@
                                                     undefined);
   checker.And();
 
-  HObjectAccess access = HObjectAccess::ForAllocationSiteTransitionInfo();
+  HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
+      AllocationSite::kTransitionInfoOffset);
   HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
 
   int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize;
@@ -500,12 +502,14 @@
   // Store the payload (smi elements kind)
   HValue* initial_elements_kind = Add<HConstant>(GetInitialFastElementsKind());
   Add<HStoreNamedField>(object,
-                        HObjectAccess::ForAllocationSiteTransitionInfo(),
+                        HObjectAccess::ForAllocationSiteOffset(
+                            AllocationSite::kTransitionInfoOffset),
                         initial_elements_kind);
 
   // Unlike literals, constructed arrays don't have nested sites
   Add<HStoreNamedField>(object,
-                        HObjectAccess::ForAllocationSiteNestedSite(),
+                        HObjectAccess::ForAllocationSiteOffset(
+                            AllocationSite::kNestedSiteOffset),
                         graph()->GetConstant0());
 
   // Store an empty fixed array for the code dependency.
@@ -513,7 +517,8 @@
     Add<HConstant>(isolate()->factory()->empty_fixed_array());
   HStoreNamedField* store = Add<HStoreNamedField>(
       object,
-      HObjectAccess::ForAllocationSiteDependentCode(),
+      HObjectAccess::ForAllocationSiteOffset(
+          AllocationSite::kDependentCodeOffset),
       empty_fixed_array);
 
   // Link the object to the allocation site list
@@ -522,8 +527,8 @@
   HValue* site = Add<HLoadNamedField>(site_list,
                                       HObjectAccess::ForAllocationSiteList());
   store = Add<HStoreNamedField>(object,
-                                HObjectAccess::ForAllocationSiteWeakNext(),
-                                site);
+      HObjectAccess::ForAllocationSiteOffset(AllocationSite::kWeakNextOffset),
+      site);
   store->SkipWriteBarrier();
   Add<HStoreNamedField>(site_list, HObjectAccess::ForAllocationSiteList(),
                         object);
diff --git a/src/compiler.cc b/src/compiler.cc
index 5e4d17b..ed0a0c8 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -665,67 +665,71 @@
     }
   }
 
-  // Measure how long it takes to do the compilation; only take the
-  // rest of the function into account to avoid overlap with the
-  // parsing statistics.
-  HistogramTimer* rate = info->is_eval()
-      ? info->isolate()->counters()->compile_eval()
-      : info->isolate()->counters()->compile();
-  HistogramTimerScope timer(rate);
-
-  // Compile the code.
   FunctionLiteral* lit = info->function();
   LiveEditFunctionTracker live_edit_tracker(isolate, lit);
-  if (!MakeCode(info)) {
-    if (!isolate->has_pending_exception()) isolate->StackOverflow();
-    return Handle<SharedFunctionInfo>::null();
+  Handle<SharedFunctionInfo> result;
+  {
+    // Measure how long it takes to do the compilation; only take the
+    // rest of the function into account to avoid overlap with the
+    // parsing statistics.
+    HistogramTimer* rate = info->is_eval()
+          ? info->isolate()->counters()->compile_eval()
+          : info->isolate()->counters()->compile();
+    HistogramTimerScope timer(rate);
+
+    // Compile the code.
+    if (!MakeCode(info)) {
+      if (!isolate->has_pending_exception()) isolate->StackOverflow();
+      return Handle<SharedFunctionInfo>::null();
+    }
+
+    // Allocate function.
+    ASSERT(!info->code().is_null());
+    result =
+        isolate->factory()->NewSharedFunctionInfo(
+            lit->name(),
+            lit->materialized_literal_count(),
+            lit->is_generator(),
+            info->code(),
+            ScopeInfo::Create(info->scope(), info->zone()));
+
+    ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
+    Compiler::SetFunctionInfo(result, lit, true, script);
+
+    if (script->name()->IsString()) {
+      PROFILE(isolate, CodeCreateEvent(
+          info->is_eval()
+          ? Logger::EVAL_TAG
+              : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
+                *info->code(),
+                *result,
+                info,
+                String::cast(script->name())));
+      GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
+                     script,
+                     info->code(),
+                     info));
+    } else {
+      PROFILE(isolate, CodeCreateEvent(
+          info->is_eval()
+          ? Logger::EVAL_TAG
+              : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
+                *info->code(),
+                *result,
+                info,
+                isolate->heap()->empty_string()));
+      GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
+    }
+
+    // Hint to the runtime system used when allocating space for initial
+    // property space by setting the expected number of properties for
+    // the instances of the function.
+    SetExpectedNofPropertiesFromEstimate(result,
+                                         lit->expected_property_count());
+
+    script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
   }
 
-  // Allocate function.
-  ASSERT(!info->code().is_null());
-  Handle<SharedFunctionInfo> result =
-      isolate->factory()->NewSharedFunctionInfo(
-          lit->name(),
-          lit->materialized_literal_count(),
-          lit->is_generator(),
-          info->code(),
-          ScopeInfo::Create(info->scope(), info->zone()));
-
-  ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
-  Compiler::SetFunctionInfo(result, lit, true, script);
-
-  if (script->name()->IsString()) {
-    PROFILE(isolate, CodeCreateEvent(
-        info->is_eval()
-            ? Logger::EVAL_TAG
-            : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
-        *info->code(),
-        *result,
-        info,
-        String::cast(script->name())));
-    GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
-                   script,
-                   info->code(),
-                   info));
-  } else {
-    PROFILE(isolate, CodeCreateEvent(
-        info->is_eval()
-            ? Logger::EVAL_TAG
-            : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
-        *info->code(),
-        *result,
-        info,
-        isolate->heap()->empty_string()));
-    GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
-  }
-
-  // Hint to the runtime system used when allocating space for initial
-  // property space by setting the expected number of properties for
-  // the instances of the function.
-  SetExpectedNofPropertiesFromEstimate(result, lit->expected_property_count());
-
-  script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Notify debugger
   isolate->debugger()->OnAfterCompile(
diff --git a/src/contexts.cc b/src/contexts.cc
index 441ef9d..710d30a 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -259,7 +259,7 @@
 
 void Context::AddOptimizedFunction(JSFunction* function) {
   ASSERT(IsNativeContext());
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
   if (FLAG_enable_slow_asserts) {
     Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
     while (!element->IsUndefined()) {
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index 2f0a399..7ba19ba 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -355,7 +355,7 @@
       return JunkStringValue();
     }
 
-    ASSERT(buffer_pos < kBufferSize);
+    SLOW_ASSERT(buffer_pos < kBufferSize);
     buffer[buffer_pos] = '\0';
     Vector<const char> buffer_vector(buffer, buffer_pos);
     return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
@@ -692,7 +692,7 @@
     exponent--;
   }
 
-  ASSERT(buffer_pos < kBufferSize);
+  SLOW_ASSERT(buffer_pos < kBufferSize);
   buffer[buffer_pos] = '\0';
 
   double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
diff --git a/src/conversions.cc b/src/conversions.cc
index fd0f9ee..5f1219e 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -46,8 +46,11 @@
 
 double StringToDouble(UnicodeCache* unicode_cache,
                       const char* str, int flags, double empty_string_val) {
-  const char* end = str + StrLength(str);
-  return InternalStringToDouble(unicode_cache, str, end, flags,
+  // We cast to const uint8_t* here to avoid instantiating the
+  // InternalStringToDouble() template for const char* as well.
+  const uint8_t* start = reinterpret_cast<const uint8_t*>(str);
+  const uint8_t* end = start + StrLength(str);
+  return InternalStringToDouble(unicode_cache, start, end, flags,
                                 empty_string_val);
 }
 
@@ -56,11 +59,15 @@
                       Vector<const char> str,
                       int flags,
                       double empty_string_val) {
-  const char* end = str.start() + str.length();
-  return InternalStringToDouble(unicode_cache, str.start(), end, flags,
+  // We cast to const uint8_t* here to avoid instantiating the
+  // InternalStringToDouble() template for const char* as well.
+  const uint8_t* start = reinterpret_cast<const uint8_t*>(str.start());
+  const uint8_t* end = start + str.length();
+  return InternalStringToDouble(unicode_cache, start, end, flags,
                                 empty_string_val);
 }
 
+
 double StringToDouble(UnicodeCache* unicode_cache,
                       Vector<const uc16> str,
                       int flags,
diff --git a/src/defaults.cc b/src/defaults.cc
index cbbe537..a03cf69 100644
--- a/src/defaults.cc
+++ b/src/defaults.cc
@@ -37,41 +37,25 @@
 namespace v8 {
 
 
-#if V8_OS_ANDROID
-const bool kOsHasSwap = false;
-#else
-const bool kOsHasSwap = true;
-#endif
-
-
 bool ConfigureResourceConstraintsForCurrentPlatform(
     ResourceConstraints* constraints) {
   if (constraints == NULL) {
     return false;
   }
 
-  uint64_t physical_memory = i::OS::TotalPhysicalMemory();
   int lump_of_memory = (i::kPointerSize / 4) * i::MB;
 
   // The young_space_size should be a power of 2 and old_generation_size should
   // be a multiple of Page::kPageSize.
-  if (physical_memory <= 512ul * i::MB) {
-    constraints->set_max_young_space_size(2 * lump_of_memory);
-    constraints->set_max_old_space_size(128 * lump_of_memory);
-    constraints->set_max_executable_size(96 * lump_of_memory);
-  } else if (physical_memory <= (kOsHasSwap ? 768ul * i::MB : 1ul * i::GB)) {
-    constraints->set_max_young_space_size(8 * lump_of_memory);
-    constraints->set_max_old_space_size(256 * lump_of_memory);
-    constraints->set_max_executable_size(192 * lump_of_memory);
-  } else if (physical_memory <= (kOsHasSwap ? 1ul * i::GB : 2ul * i::GB)) {
-    constraints->set_max_young_space_size(16 * lump_of_memory);
-    constraints->set_max_old_space_size(512 * lump_of_memory);
-    constraints->set_max_executable_size(256 * lump_of_memory);
-  } else {
-    constraints->set_max_young_space_size(16 * lump_of_memory);
-    constraints->set_max_old_space_size(700 * lump_of_memory);
-    constraints->set_max_executable_size(256 * lump_of_memory);
-  }
+#if V8_OS_ANDROID
+  constraints->set_max_young_space_size(8 * lump_of_memory);
+  constraints->set_max_old_space_size(256 * lump_of_memory);
+  constraints->set_max_executable_size(192 * lump_of_memory);
+#else
+  constraints->set_max_young_space_size(16 * lump_of_memory);
+  constraints->set_max_old_space_size(700 * lump_of_memory);
+  constraints->set_max_executable_size(256 * lump_of_memory);
+#endif
   return true;
 }
 
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 706d1f0..4e9d281 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -506,7 +506,15 @@
   void SetCallerFp(unsigned offset, intptr_t value);
 
   intptr_t GetRegister(unsigned n) const {
-    ASSERT(n < ARRAY_SIZE(registers_));
+#if DEBUG
+    // This convoluted ASSERT is needed to work around a gcc problem that
+    // improperly detects an array bounds overflow in optimized debug builds
+    // when using a plain ASSERT.
+    if (n >= ARRAY_SIZE(registers_)) {
+      ASSERT(false);
+      return 0;
+    }
+#endif
     return registers_[n];
   }
 
diff --git a/src/elements.cc b/src/elements.cc
index 89621cb..0b745c4 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -792,7 +792,7 @@
       FixedArray* to,
       FixedArrayBase* from) {
     int len0 = to->length();
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
     if (FLAG_enable_slow_asserts) {
       for (int i = 0; i < len0; i++) {
         ASSERT(!to->get(i)->IsTheHole());
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index aa889f3..865413e 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -696,8 +696,10 @@
 #endif
 
 // checks.cc
+#ifdef ENABLE_SLOW_ASSERTS
 DEFINE_bool(enable_slow_asserts, false,
             "enable asserts that are slow to execute")
+#endif
 
 // codegen-ia32.cc / codegen-arm.cc / macro-assembler-*.cc
 DEFINE_bool(print_source, false, "pretty print source code")
diff --git a/src/globals.h b/src/globals.h
index 701fdee..3456030 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -358,7 +358,7 @@
 // Define DISABLE_ASAN macros.
 #if defined(__has_feature)
 #if __has_feature(address_sanitizer)
-#define DISABLE_ASAN __attribute__((no_address_safety_analysis))
+#define DISABLE_ASAN __attribute__((no_sanitize_address))
 #endif
 #endif
 
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 7df1aae..80773bf 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -5786,20 +5786,9 @@
                 ? Representation::Smi() : Representation::Tagged());
   }
 
-  static HObjectAccess ForAllocationSiteTransitionInfo() {
-    return HObjectAccess(kInobject, AllocationSite::kTransitionInfoOffset);
-  }
-
-  static HObjectAccess ForAllocationSiteNestedSite() {
-    return HObjectAccess(kInobject, AllocationSite::kNestedSiteOffset);
-  }
-
-  static HObjectAccess ForAllocationSiteDependentCode() {
-    return HObjectAccess(kInobject, AllocationSite::kDependentCodeOffset);
-  }
-
-  static HObjectAccess ForAllocationSiteWeakNext() {
-    return HObjectAccess(kInobject, AllocationSite::kWeakNextOffset);
+  static HObjectAccess ForAllocationSiteOffset(int offset) {
+    ASSERT(offset >= HeapObject::kHeaderSize && offset < AllocationSite::kSize);
+    return HObjectAccess(kInobject, offset);
   }
 
   static HObjectAccess ForAllocationSiteList() {
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index cfeaca6..0557ed8 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -2344,6 +2344,14 @@
 }
 
 
+void Assembler::andps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0x54);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::pand(XMMRegister dst, XMMRegister src) {
   ASSERT(IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 5bb878c..f46c647 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -1017,6 +1017,10 @@
 
   void cpuid();
 
+  // SSE instructions
+  void andps(XMMRegister dst, XMMRegister src);
+  void xorps(XMMRegister dst, XMMRegister src);
+
   // SSE2 instructions
   void cvttss2si(Register dst, const Operand& src);
   void cvttsd2si(Register dst, const Operand& src);
@@ -1034,7 +1038,6 @@
   void mulsd(XMMRegister dst, const Operand& src);
   void divsd(XMMRegister dst, XMMRegister src);
   void xorpd(XMMRegister dst, XMMRegister src);
-  void xorps(XMMRegister dst, XMMRegister src);
   void sqrtsd(XMMRegister dst, XMMRegister src);
 
   void andpd(XMMRegister dst, XMMRegister src);
@@ -1157,7 +1160,7 @@
   // Avoid overflows for displacements etc.
   static const int kMaximalBufferSize = 512*MB;
 
-  byte byte_at(int pos)  { return buffer_[pos]; }
+  byte byte_at(int pos) { return buffer_[pos]; }
   void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
 
  protected:
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index a7613c9..13cf6bc 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -1042,6 +1042,14 @@
                            NameOfXMMRegister(regop),
                            NameOfXMMRegister(rm));
             data++;
+          } else if (f0byte == 0x54) {
+            data += 2;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("andps %s,%s",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
           } else if (f0byte == 0x57) {
             data += 2;
             int mod, regop, rm;
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index ebeaaa8..042a470 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -3883,7 +3883,7 @@
     XMMRegister input_reg = ToDoubleRegister(instr->value());
     __ xorps(scratch, scratch);
     __ subsd(scratch, input_reg);
-    __ pand(input_reg, scratch);
+    __ andps(input_reg, scratch);
   } else if (r.IsSmiOrInteger32()) {
     EmitIntegerMathAbs(instr);
   } else {  // Tagged case.
diff --git a/src/ic.cc b/src/ic.cc
index 4bff543..55d7ba9 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -2306,6 +2306,7 @@
                          right_type->Maybe(Type::Smi());
 
   Maybe<Handle<Object> > result = stub.Result(left, right, isolate());
+  if (!result.has_value) return Failure::Exception();
 
 #ifdef DEBUG
   if (FLAG_trace_ic) {
@@ -2346,9 +2347,8 @@
     PatchInlinedSmiCode(address(), DISABLE_INLINED_SMI_CHECK);
   }
 
-  return result.has_value
-      ? static_cast<MaybeObject*>(*result.value)
-      : Failure::Exception();
+  ASSERT(result.has_value);
+  return static_cast<MaybeObject*>(*result.value);
 }
 
 
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index 49936d7..4223dde 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -728,7 +728,7 @@
   IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
 
   MarkBit mark_bit = Marking::MarkBitFrom(obj);
-#ifdef DEBUG
+#if ENABLE_SLOW_ASSERTS
   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
   SLOW_ASSERT(Marking::IsGrey(mark_bit) ||
               (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
diff --git a/src/list.h b/src/list.h
index 0e4e35b..41666de 100644
--- a/src/list.h
+++ b/src/list.h
@@ -84,7 +84,7 @@
   // backing store (e.g. Add).
   inline T& operator[](int i) const {
     ASSERT(0 <= i);
-    ASSERT(i < length_);
+    SLOW_ASSERT(i < length_);
     return data_[i];
   }
   inline T& at(int i) const { return operator[](i); }
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 90b0193..fa3c375 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -335,9 +335,7 @@
 
   // If a GC was caused while constructing this object, the elements
   // pointer may point to a one pointer filler map.
-  if ((FLAG_use_gvn && FLAG_use_allocation_folding) ||
-      (reinterpret_cast<Map*>(elements()) !=
-      GetHeap()->one_pointer_filler_map())) {
+  if (ElementsAreSafeToExamine()) {
     CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
               (elements() == GetHeap()->empty_fixed_array())),
              (elements()->map() == GetHeap()->fixed_array_map() ||
@@ -698,9 +696,7 @@
   CHECK(length()->IsNumber() || length()->IsUndefined());
   // If a GC was caused while constructing this array, the elements
   // pointer may point to a one pointer filler map.
-  if ((FLAG_use_gvn && FLAG_use_allocation_folding) ||
-      (reinterpret_cast<Map*>(elements()) !=
-      GetHeap()->one_pointer_filler_map())) {
+  if (ElementsAreSafeToExamine()) {
     CHECK(elements()->IsUndefined() ||
           elements()->IsFixedArray() ||
           elements()->IsFixedDoubleArray());
@@ -1143,6 +1139,13 @@
 }
 
 
+bool JSObject::ElementsAreSafeToExamine() {
+  return (FLAG_use_gvn && FLAG_use_allocation_folding) ||
+      reinterpret_cast<Map*>(elements()) !=
+      GetHeap()->one_pointer_filler_map();
+}
+
+
 bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) {
   if (valid_entries == -1) valid_entries = number_of_descriptors();
   Name* current_key = NULL;
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 006aff3..deb3365 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -80,7 +80,7 @@
 
 #define CAST_ACCESSOR(type)                     \
   type* type::cast(Object* object) {            \
-    ASSERT(object->Is##type());                 \
+    SLOW_ASSERT(object->Is##type());            \
     return reinterpret_cast<type*>(object);     \
   }
 
@@ -1190,7 +1190,7 @@
 Heap* HeapObject::GetHeap() {
   Heap* heap =
       MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
-  ASSERT(heap != NULL);
+  SLOW_ASSERT(heap != NULL);
   return heap;
 }
 
@@ -1307,7 +1307,7 @@
 
 
 void JSObject::ValidateElements() {
-#if DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
   if (FLAG_enable_slow_asserts) {
     ElementsAccessor* accessor = GetElementsAccessor();
     accessor->Validate(this);
@@ -1667,7 +1667,9 @@
     case JS_MESSAGE_OBJECT_TYPE:
       return JSMessageObject::kSize;
     default:
-      UNREACHABLE();
+      // TODO(jkummerow): Re-enable this. Blink currently hits this
+      // from its CustomElementConstructorBuilder.
+      // UNREACHABLE();
       return 0;
   }
 }
@@ -1901,7 +1903,7 @@
 
 
 Object* FixedArray::get(int index) {
-  ASSERT(index >= 0 && index < this->length());
+  SLOW_ASSERT(index >= 0 && index < this->length());
   return READ_FIELD(this, kHeaderSize + index * kPointerSize);
 }
 
@@ -5516,19 +5518,24 @@
 #if DEBUG
   FixedArrayBase* fixed_array =
       reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset));
-  Map* map = fixed_array->map();
-  ASSERT((IsFastSmiOrObjectElementsKind(kind) &&
-          (map == GetHeap()->fixed_array_map() ||
-           map == GetHeap()->fixed_cow_array_map())) ||
-         (IsFastDoubleElementsKind(kind) &&
-          (fixed_array->IsFixedDoubleArray() ||
-           fixed_array == GetHeap()->empty_fixed_array())) ||
-         (kind == DICTIONARY_ELEMENTS &&
+
+  // If a GC was caused while constructing this object, the elements
+  // pointer may point to a one pointer filler map.
+  if (ElementsAreSafeToExamine()) {
+    Map* map = fixed_array->map();
+    ASSERT((IsFastSmiOrObjectElementsKind(kind) &&
+            (map == GetHeap()->fixed_array_map() ||
+             map == GetHeap()->fixed_cow_array_map())) ||
+           (IsFastDoubleElementsKind(kind) &&
+            (fixed_array->IsFixedDoubleArray() ||
+             fixed_array == GetHeap()->empty_fixed_array())) ||
+           (kind == DICTIONARY_ELEMENTS &&
             fixed_array->IsFixedArray() &&
-          fixed_array->IsDictionary()) ||
-         (kind > DICTIONARY_ELEMENTS));
-  ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
-         (elements()->IsFixedArray() && elements()->length() >= 2));
+            fixed_array->IsDictionary()) ||
+           (kind > DICTIONARY_ELEMENTS));
+    ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
+           (elements()->IsFixedArray() && elements()->length() >= 2));
+  }
 #endif
   return kind;
 }
diff --git a/src/objects.cc b/src/objects.cc
index 944bfa6..f7c8917 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -343,6 +343,14 @@
 }
 
 
+Handle<FixedArray> JSObject::EnsureWritableFastElements(
+    Handle<JSObject> object) {
+  CALL_HEAP_FUNCTION(object->GetIsolate(),
+                     object->EnsureWritableFastElements(),
+                     FixedArray);
+}
+
+
 Handle<Object> JSObject::GetPropertyWithCallback(Handle<JSObject> object,
                                                  Handle<Object> receiver,
                                                  Handle<Object> structure,
@@ -1196,7 +1204,7 @@
   // Externalizing twice leaks the external resource, so it's
   // prohibited by the API.
   ASSERT(!this->IsExternalString());
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
   if (FLAG_enable_slow_asserts) {
     // Assert that the resource and the string are equivalent.
     ASSERT(static_cast<size_t>(this->length()) == resource->length());
@@ -1253,7 +1261,7 @@
 
 
 bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
   if (FLAG_enable_slow_asserts) {
     // Assert that the resource and the string are equivalent.
     ASSERT(static_cast<size_t>(this->length()) == resource->length());
@@ -4483,7 +4491,7 @@
       Handle<Map>::cast(result)->SharedMapVerify();
     }
 #endif
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
     if (FLAG_enable_slow_asserts) {
       // The cached map should match newly created normalized map bit-by-bit,
       // except for the code cache, which can contain some ics which can be
@@ -7828,7 +7836,7 @@
       accessor->AddElementsToFixedArray(array, array, this);
   FixedArray* result;
   if (!maybe_result->To<FixedArray>(&result)) return maybe_result;
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
   if (FLAG_enable_slow_asserts) {
     for (int i = 0; i < result->length(); i++) {
       Object* current = result->get(i);
@@ -7846,7 +7854,7 @@
       accessor->AddElementsToFixedArray(NULL, NULL, this, other);
   FixedArray* result;
   if (!maybe_result->To(&result)) return maybe_result;
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
   if (FLAG_enable_slow_asserts) {
     for (int i = 0; i < result->length(); i++) {
       Object* current = result->get(i);
@@ -8901,7 +8909,7 @@
   // Fast check: if hash code is computed for both strings
   // a fast negative check can be performed.
   if (HasHashCode() && other->HasHashCode()) {
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
     if (FLAG_enable_slow_asserts) {
       if (Hash() != other->Hash()) {
         bool found_difference = false;
@@ -9156,7 +9164,7 @@
   if (newspace->Contains(start_of_string) &&
       newspace->top() == start_of_string + old_size) {
     // Last allocated object in new space.  Simply lower allocation top.
-    *(newspace->allocation_top_address()) = start_of_string + new_size;
+    newspace->set_top(start_of_string + new_size);
   } else {
     // Sizes are pointer size aligned, so that we can use filler objects
     // that are a multiple of pointer size.
@@ -9904,9 +9912,13 @@
   String* name = shared()->DebugName();
   Vector<const char> filter = CStrVector(raw_filter);
   if (filter.length() == 0) return name->length() == 0;
-  if (filter[0] != '-' && name->IsUtf8EqualTo(filter)) return true;
-  if (filter[0] == '-' &&
-      !name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
+  if (filter[0] == '-') {
+    if (filter.length() == 1) {
+      return (name->length() != 0);
+    } else if (!name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
+      return true;
+    }
+  } else if (name->IsUtf8EqualTo(filter)) {
     return true;
   }
   if (filter[filter.length() - 1] == '*' &&
@@ -14287,6 +14299,14 @@
 int HashTable<SeededNumberDictionaryShape, uint32_t>::FindEntry(uint32_t);
 
 
+Handle<Object> JSObject::PrepareSlowElementsForSort(
+    Handle<JSObject> object, uint32_t limit) {
+  CALL_HEAP_FUNCTION(object->GetIsolate(),
+                     object->PrepareSlowElementsForSort(limit),
+                     Object);
+}
+
+
 // Collates undefined and unexisting elements below limit from position
 // zero of the elements. The object stays in Dictionary mode.
 MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
@@ -14389,74 +14409,57 @@
 // the start of the elements array.
 // If the object is in dictionary mode, it is converted to fast elements
 // mode.
-MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
-  Heap* heap = GetHeap();
+Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
+                                                uint32_t limit) {
+  Isolate* isolate = object->GetIsolate();
 
-  ASSERT(!map()->is_observed());
-  if (HasDictionaryElements()) {
+  ASSERT(!object->map()->is_observed());
+  if (object->HasDictionaryElements()) {
     // Convert to fast elements containing only the existing properties.
     // Ordering is irrelevant, since we are going to sort anyway.
-    SeededNumberDictionary* dict = element_dictionary();
-    if (IsJSArray() || dict->requires_slow_elements() ||
+    Handle<SeededNumberDictionary> dict(object->element_dictionary());
+    if (object->IsJSArray() || dict->requires_slow_elements() ||
         dict->max_number_key() >= limit) {
-      return PrepareSlowElementsForSort(limit);
+      return JSObject::PrepareSlowElementsForSort(object, limit);
     }
     // Convert to fast elements.
 
-    Object* obj;
-    MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
-                                                      FAST_HOLEY_ELEMENTS);
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-    Map* new_map = Map::cast(obj);
+    Handle<Map> new_map =
+        JSObject::GetElementsTransitionMap(object, FAST_HOLEY_ELEMENTS);
 
-    PretenureFlag tenure = heap->InNewSpace(this) ? NOT_TENURED: TENURED;
-    Object* new_array;
-    { MaybeObject* maybe_new_array =
-          heap->AllocateFixedArray(dict->NumberOfElements(), tenure);
-      if (!maybe_new_array->ToObject(&new_array)) return maybe_new_array;
-    }
-    FixedArray* fast_elements = FixedArray::cast(new_array);
-    dict->CopyValuesTo(fast_elements);
-    ValidateElements();
+    PretenureFlag tenure = isolate->heap()->InNewSpace(*object) ?
+        NOT_TENURED: TENURED;
+    Handle<FixedArray> fast_elements =
+        isolate->factory()->NewFixedArray(dict->NumberOfElements(), tenure);
+    dict->CopyValuesTo(*fast_elements);
+    object->ValidateElements();
 
-    set_map_and_elements(new_map, fast_elements);
-  } else if (HasExternalArrayElements()) {
+    object->set_map_and_elements(*new_map, *fast_elements);
+  } else if (object->HasExternalArrayElements()) {
     // External arrays cannot have holes or undefined elements.
-    return Smi::FromInt(ExternalArray::cast(elements())->length());
-  } else if (!HasFastDoubleElements()) {
-    Object* obj;
-    { MaybeObject* maybe_obj = EnsureWritableFastElements();
-      if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-    }
+    return handle(Smi::FromInt(
+        ExternalArray::cast(object->elements())->length()), isolate);
+  } else if (!object->HasFastDoubleElements()) {
+    JSObject::EnsureWritableFastElements(object);
   }
-  ASSERT(HasFastSmiOrObjectElements() || HasFastDoubleElements());
+  ASSERT(object->HasFastSmiOrObjectElements() ||
+         object->HasFastDoubleElements());
 
   // Collect holes at the end, undefined before that and the rest at the
   // start, and return the number of non-hole, non-undefined values.
 
-  FixedArrayBase* elements_base = FixedArrayBase::cast(this->elements());
+  Handle<FixedArrayBase> elements_base(object->elements());
   uint32_t elements_length = static_cast<uint32_t>(elements_base->length());
   if (limit > elements_length) {
     limit = elements_length ;
   }
   if (limit == 0) {
-    return Smi::FromInt(0);
-  }
-
-  HeapNumber* result_double = NULL;
-  if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
-    // Pessimistically allocate space for return value before
-    // we start mutating the array.
-    Object* new_double;
-    { MaybeObject* maybe_new_double = heap->AllocateHeapNumber(0.0);
-      if (!maybe_new_double->ToObject(&new_double)) return maybe_new_double;
-    }
-    result_double = HeapNumber::cast(new_double);
+    return handle(Smi::FromInt(0), isolate);
   }
 
   uint32_t result = 0;
-  if (elements_base->map() == heap->fixed_double_array_map()) {
-    FixedDoubleArray* elements = FixedDoubleArray::cast(elements_base);
+  if (elements_base->map() == isolate->heap()->fixed_double_array_map()) {
+    FixedDoubleArray* elements = FixedDoubleArray::cast(*elements_base);
     // Split elements into defined and the_hole, in that order.
     unsigned int holes = limit;
     // Assume most arrays contain no holes and undefined values, so minimize the
@@ -14483,7 +14486,7 @@
       holes++;
     }
   } else {
-    FixedArray* elements = FixedArray::cast(elements_base);
+    FixedArray* elements = FixedArray::cast(*elements_base);
     DisallowHeapAllocation no_gc;
 
     // Split elements into defined, undefined and the_hole, in that order.  Only
@@ -14528,12 +14531,7 @@
     }
   }
 
-  if (result <= static_cast<uint32_t>(Smi::kMaxValue)) {
-    return Smi::FromInt(static_cast<int>(result));
-  }
-  ASSERT_NE(NULL, result_double);
-  result_double->set_value(static_cast<double>(result));
-  return result_double;
+  return isolate->factory()->NewNumberFromUint(result);
 }
 
 
diff --git a/src/objects.h b/src/objects.h
index 267ef13..c0835e2 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -2112,14 +2112,19 @@
       WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
 
   // Requires: HasFastElements().
+  static Handle<FixedArray> EnsureWritableFastElements(
+      Handle<JSObject> object);
   MUST_USE_RESULT inline MaybeObject* EnsureWritableFastElements();
 
   // Collects elements starting at index 0.
   // Undefined values are placed after non-undefined values.
   // Returns the number of non-undefined values.
-  MUST_USE_RESULT MaybeObject* PrepareElementsForSort(uint32_t limit);
+  static Handle<Object> PrepareElementsForSort(Handle<JSObject> object,
+                                               uint32_t limit);
   // As PrepareElementsForSort, but only on objects where elements is
   // a dictionary, and it will stay a dictionary.
+  static Handle<Object> PrepareSlowElementsForSort(Handle<JSObject> object,
+                                                   uint32_t limit);
   MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit);
 
   static Handle<Object> GetPropertyWithCallback(Handle<JSObject> object,
@@ -2590,6 +2595,11 @@
   };
 
   void IncrementSpillStatistics(SpillInformation* info);
+
+  // If a GC was caused while constructing this object, the elements pointer
+  // may point to a one pointer filler map. The object won't be rooted, but
+  // our heap verification code could stumble across it.
+  bool ElementsAreSafeToExamine();
 #endif
   Object* SlowReverseLookup(Object* value);
 
diff --git a/src/runtime.cc b/src/runtime.cc
index feb78c0..15cfc85 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -10548,11 +10548,11 @@
 // property.
 // Returns the number of non-undefined elements collected.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) {
-  SealHandleScope shs(isolate);
+  HandleScope scope(isolate);
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(JSObject, object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
-  return object->PrepareElementsForSort(limit);
+  return *JSObject::PrepareElementsForSort(object, limit);
 }
 
 
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 7178b57..d5c114c 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -264,11 +264,11 @@
 // allocation) so it can be used by all the allocation functions and for all
 // the paged spaces.
 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
-  Address current_top = allocation_info_.top;
+  Address current_top = allocation_info_.top();
   Address new_top = current_top + size_in_bytes;
-  if (new_top > allocation_info_.limit) return NULL;
+  if (new_top > allocation_info_.limit()) return NULL;
 
-  allocation_info_.top = new_top;
+  allocation_info_.set_top(new_top);
   return HeapObject::FromAddress(current_top);
 }
 
@@ -324,29 +324,29 @@
 
 
 MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
-  Address old_top = allocation_info_.top;
+  Address old_top = allocation_info_.top();
 #ifdef DEBUG
   // If we are stressing compaction we waste some memory in new space
   // in order to get more frequent GCs.
   if (FLAG_stress_compaction && !heap()->linear_allocation()) {
-    if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
+    if (allocation_info_.limit() - old_top >= size_in_bytes * 4) {
       int filler_size = size_in_bytes * 4;
       for (int i = 0; i < filler_size; i += kPointerSize) {
         *(reinterpret_cast<Object**>(old_top + i)) =
             heap()->one_pointer_filler_map();
       }
       old_top += filler_size;
-      allocation_info_.top += filler_size;
+      allocation_info_.set_top(allocation_info_.top() + filler_size);
     }
   }
 #endif
 
-  if (allocation_info_.limit - old_top < size_in_bytes) {
+  if (allocation_info_.limit() - old_top < size_in_bytes) {
     return SlowAllocateRaw(size_in_bytes);
   }
 
   HeapObject* obj = HeapObject::FromAddress(old_top);
-  allocation_info_.top += size_in_bytes;
+  allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 
   HeapProfiler* profiler = heap()->isolate()->heap_profiler();
diff --git a/src/spaces.cc b/src/spaces.cc
index ee6a890..fe5eeb5 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -960,8 +960,8 @@
       * AreaSize();
   accounting_stats_.Clear();
 
-  allocation_info_.top = NULL;
-  allocation_info_.limit = NULL;
+  allocation_info_.set_top(NULL);
+  allocation_info_.set_limit(NULL);
 
   anchor_.InitializeAsAnchor(this);
 }
@@ -990,7 +990,7 @@
 
 size_t PagedSpace::CommittedPhysicalMemory() {
   if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
-  MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
+  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
   size_t size = 0;
   PageIterator it(this);
   while (it.has_next()) {
@@ -1142,8 +1142,9 @@
     DecreaseUnsweptFreeBytes(page);
   }
 
-  if (Page::FromAllocationTop(allocation_info_.top) == page) {
-    allocation_info_.top = allocation_info_.limit = NULL;
+  if (Page::FromAllocationTop(allocation_info_.top()) == page) {
+    allocation_info_.set_top(NULL);
+    allocation_info_.set_limit(NULL);
   }
 
   if (unlink) {
@@ -1170,12 +1171,12 @@
   if (was_swept_conservatively_) return;
 
   bool allocation_pointer_found_in_space =
-      (allocation_info_.top == allocation_info_.limit);
+      (allocation_info_.top() == allocation_info_.limit());
   PageIterator page_iterator(this);
   while (page_iterator.has_next()) {
     Page* page = page_iterator.next();
     CHECK(page->owner() == this);
-    if (page == Page::FromAllocationTop(allocation_info_.top)) {
+    if (page == Page::FromAllocationTop(allocation_info_.top())) {
       allocation_pointer_found_in_space = true;
     }
     CHECK(page->WasSweptPrecisely());
@@ -1286,8 +1287,8 @@
   }
 
   start_ = NULL;
-  allocation_info_.top = NULL;
-  allocation_info_.limit = NULL;
+  allocation_info_.set_top(NULL);
+  allocation_info_.set_limit(NULL);
 
   to_space_.TearDown();
   from_space_.TearDown();
@@ -1344,22 +1345,22 @@
       }
     }
   }
-  allocation_info_.limit = to_space_.page_high();
+  allocation_info_.set_limit(to_space_.page_high());
   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 }
 
 
 void NewSpace::UpdateAllocationInfo() {
-  MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
-  allocation_info_.top = to_space_.page_low();
-  allocation_info_.limit = to_space_.page_high();
+  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+  allocation_info_.set_top(to_space_.page_low());
+  allocation_info_.set_limit(to_space_.page_high());
 
   // Lower limit during incremental marking.
   if (heap()->incremental_marking()->IsMarking() &&
       inline_allocation_limit_step() != 0) {
     Address new_limit =
-        allocation_info_.top + inline_allocation_limit_step();
-    allocation_info_.limit = Min(new_limit, allocation_info_.limit);
+        allocation_info_.top() + inline_allocation_limit_step();
+    allocation_info_.set_limit(Min(new_limit, allocation_info_.limit()));
   }
   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 }
@@ -1378,7 +1379,7 @@
 
 
 bool NewSpace::AddFreshPage() {
-  Address top = allocation_info_.top;
+  Address top = allocation_info_.top();
   if (NewSpacePage::IsAtStart(top)) {
     // The current page is already empty. Don't try to make another.
 
@@ -1410,15 +1411,16 @@
 
 
 MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
-  Address old_top = allocation_info_.top;
+  Address old_top = allocation_info_.top();
   Address new_top = old_top + size_in_bytes;
   Address high = to_space_.page_high();
-  if (allocation_info_.limit < high) {
+  if (allocation_info_.limit() < high) {
     // Incremental marking has lowered the limit to get a
     // chance to do a step.
-    allocation_info_.limit = Min(
-        allocation_info_.limit + inline_allocation_limit_step_,
+    Address new_limit = Min(
+        allocation_info_.limit() + inline_allocation_limit_step_,
         high);
+    allocation_info_.set_limit(new_limit);
     int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
     heap()->incremental_marking()->Step(
         bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
@@ -1973,7 +1975,7 @@
 
 size_t NewSpace::CommittedPhysicalMemory() {
   if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
-  MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
+  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
   size_t size = to_space_.CommittedPhysicalMemory();
   if (from_space_.is_committed()) {
     size += from_space_.CommittedPhysicalMemory();
@@ -2499,9 +2501,9 @@
   Object* object = NULL;
   if (!maybe->ToObject(&object)) return false;
   HeapObject* allocation = HeapObject::cast(object);
-  Address top = allocation_info_.top;
+  Address top = allocation_info_.top();
   if ((top - bytes) == allocation->address()) {
-    allocation_info_.top = allocation->address();
+    allocation_info_.set_top(allocation->address());
     return true;
   }
   // There may be a borderline case here where the allocation succeeded, but
@@ -2547,9 +2549,9 @@
 bool PagedSpace::ReserveSpace(int size_in_bytes) {
   ASSERT(size_in_bytes <= AreaSize());
   ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
-  Address current_top = allocation_info_.top;
+  Address current_top = allocation_info_.top();
   Address new_top = current_top + size_in_bytes;
-  if (new_top <= allocation_info_.limit) return true;
+  if (new_top <= allocation_info_.limit()) return true;
 
   HeapObject* new_area = free_list_.Allocate(size_in_bytes);
   if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
@@ -2624,16 +2626,17 @@
 
 
 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
-  if (allocation_info_.top >= allocation_info_.limit) return;
+  if (allocation_info_.top() >= allocation_info_.limit()) return;
 
-  if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
+  if (Page::FromAllocationTop(allocation_info_.top())->
+      IsEvacuationCandidate()) {
     // Create filler object to keep page iterable if it was iterable.
     int remaining =
-        static_cast<int>(allocation_info_.limit - allocation_info_.top);
-    heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
+        static_cast<int>(allocation_info_.limit() - allocation_info_.top());
+    heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
 
-    allocation_info_.top = NULL;
-    allocation_info_.limit = NULL;
+    allocation_info_.set_top(NULL);
+    allocation_info_.set_limit(NULL);
   }
 }
 
diff --git a/src/spaces.h b/src/spaces.h
index 6144c95..2cd92c5 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -1317,18 +1317,53 @@
 // space.
 class AllocationInfo {
  public:
-  AllocationInfo() : top(NULL), limit(NULL) {
+  AllocationInfo() : top_(NULL), limit_(NULL) {
   }
 
-  Address top;  // Current allocation top.
-  Address limit;  // Current allocation limit.
+  INLINE(void set_top(Address top)) {
+    SLOW_ASSERT(top == NULL ||
+        (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
+    top_ = top;
+  }
+
+  INLINE(Address top()) const {
+    SLOW_ASSERT(top_ == NULL ||
+        (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
+    return top_;
+  }
+
+  Address* top_address() {
+    return &top_;
+  }
+
+  INLINE(void set_limit(Address limit)) {
+    SLOW_ASSERT(limit == NULL ||
+        (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
+    limit_ = limit;
+  }
+
+  INLINE(Address limit()) const {
+    SLOW_ASSERT(limit_ == NULL ||
+        (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0);
+    return limit_;
+  }
+
+  Address* limit_address() {
+    return &limit_;
+  }
 
 #ifdef DEBUG
   bool VerifyPagedAllocation() {
-    return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
-        && (top <= limit);
+    return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_))
+        && (top_ <= limit_);
   }
 #endif
+
+ private:
+  // Current allocation top.
+  Address top_;
+  // Current allocation limit.
+  Address limit_;
 };
 
 
@@ -1707,12 +1742,18 @@
   virtual intptr_t Waste() { return accounting_stats_.Waste(); }
 
   // Returns the allocation pointer in this space.
-  Address top() { return allocation_info_.top; }
-  Address limit() { return allocation_info_.limit; }
+  Address top() { return allocation_info_.top(); }
+  Address limit() { return allocation_info_.limit(); }
 
-  // The allocation top and limit addresses.
-  Address* allocation_top_address() { return &allocation_info_.top; }
-  Address* allocation_limit_address() { return &allocation_info_.limit; }
+  // The allocation top address.
+  Address* allocation_top_address() {
+    return allocation_info_.top_address();
+  }
+
+  // The allocation limit address.
+  Address* allocation_limit_address() {
+    return allocation_info_.limit_address();
+  }
 
   enum AllocationType {
     NEW_OBJECT,
@@ -1745,9 +1786,9 @@
   void SetTop(Address top, Address limit) {
     ASSERT(top == limit ||
            Page::FromAddress(top) == Page::FromAddress(limit - 1));
-    MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
-    allocation_info_.top = top;
-    allocation_info_.limit = limit;
+    MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+    allocation_info_.set_top(top);
+    allocation_info_.set_limit(limit);
   }
 
   void Allocate(int bytes) {
@@ -2388,9 +2429,15 @@
 
   // Return the address of the allocation pointer in the active semispace.
   Address top() {
-    ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
-    return allocation_info_.top;
+    ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
+    return allocation_info_.top();
   }
+
+  void set_top(Address top) {
+    ASSERT(to_space_.current_page()->ContainsLimit(top));
+    allocation_info_.set_top(top);
+  }
+
   // Return the address of the first object in the active semispace.
   Address bottom() { return to_space_.space_start(); }
 
@@ -2415,9 +2462,15 @@
     return reinterpret_cast<Address>(index << kPointerSizeLog2);
   }
 
-  // The allocation top and limit addresses.
-  Address* allocation_top_address() { return &allocation_info_.top; }
-  Address* allocation_limit_address() { return &allocation_info_.limit; }
+  // The allocation top and limit address.
+  Address* allocation_top_address() {
+    return allocation_info_.top_address();
+  }
+
+  // The allocation limit address.
+  Address* allocation_limit_address() {
+    return allocation_info_.limit_address();
+  }
 
   MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
 
@@ -2427,13 +2480,14 @@
   void LowerInlineAllocationLimit(intptr_t step) {
     inline_allocation_limit_step_ = step;
     if (step == 0) {
-      allocation_info_.limit = to_space_.page_high();
+      allocation_info_.set_limit(to_space_.page_high());
     } else {
-      allocation_info_.limit = Min(
-          allocation_info_.top + inline_allocation_limit_step_,
-          allocation_info_.limit);
+      Address new_limit = Min(
+          allocation_info_.top() + inline_allocation_limit_step_,
+          allocation_info_.limit());
+      allocation_info_.set_limit(new_limit);
     }
-    top_on_previous_step_ = allocation_info_.top;
+    top_on_previous_step_ = allocation_info_.top();
   }
 
   // Get the extent of the inactive semispace (for use as a marking stack,
@@ -2580,9 +2634,9 @@
 // For contiguous spaces, top should be in the space (or at the end) and limit
 // should be the end of the space.
 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
-  SLOW_ASSERT((space).page_low() <= (info).top             \
-              && (info).top <= (space).page_high()         \
-              && (info).limit <= (space).page_high())
+  SLOW_ASSERT((space).page_low() <= (info).top() \
+              && (info).top() <= (space).page_high() \
+              && (info).limit() <= (space).page_high())
 
 
 // -----------------------------------------------------------------------------
diff --git a/src/utils.h b/src/utils.h
index 4a08319..062019a 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -419,8 +419,8 @@
   // Returns a vector using the same backing storage as this one,
   // spanning from and including 'from', to but not including 'to'.
   Vector<T> SubVector(int from, int to) {
-    ASSERT(to <= length_);
-    ASSERT(from < to);
+    SLOW_ASSERT(to <= length_);
+    SLOW_ASSERT(from < to);
     ASSERT(0 <= from);
     return Vector<T>(start() + from, to - from);
   }
diff --git a/src/v8utils.h b/src/v8utils.h
index fd3f4a5..02e57eb 100644
--- a/src/v8utils.h
+++ b/src/v8utils.h
@@ -194,61 +194,6 @@
 }
 
 
-// Copies data from |src| to |dst|. No restrictions.
-template <typename T>
-inline void MoveBytes(T* dst, const T* src, size_t num_bytes) {
-  STATIC_ASSERT(sizeof(T) == 1);
-  switch (num_bytes) {
-  case 0: return;
-  case 1:
-    *dst = *src;
-    return;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
-  case 2:
-    *reinterpret_cast<uint16_t*>(dst) = *reinterpret_cast<const uint16_t*>(src);
-    return;
-  case 3: {
-    uint16_t part1 = *reinterpret_cast<const uint16_t*>(src);
-    byte part2 = *(src + 2);
-    *reinterpret_cast<uint16_t*>(dst) = part1;
-    *(dst + 2) = part2;
-    return;
-  }
-  case 4:
-    *reinterpret_cast<uint32_t*>(dst) = *reinterpret_cast<const uint32_t*>(src);
-    return;
-  case 5:
-  case 6:
-  case 7:
-  case 8: {
-    uint32_t part1 = *reinterpret_cast<const uint32_t*>(src);
-    uint32_t part2 = *reinterpret_cast<const uint32_t*>(src + num_bytes - 4);
-    *reinterpret_cast<uint32_t*>(dst) = part1;
-    *reinterpret_cast<uint32_t*>(dst + num_bytes - 4) = part2;
-    return;
-  }
-  case 9:
-  case 10:
-  case 11:
-  case 12:
-  case 13:
-  case 14:
-  case 15:
-  case 16: {
-    double part1 = *reinterpret_cast<const double*>(src);
-    double part2 = *reinterpret_cast<const double*>(src + num_bytes - 8);
-    *reinterpret_cast<double*>(dst) = part1;
-    *reinterpret_cast<double*>(dst + num_bytes - 8) = part2;
-    return;
-  }
-#endif
-  default:
-    OS::MemMove(dst, src, num_bytes);
-    return;
-  }
-}
-
-
 template <typename T, typename U>
 inline void MemsetPointer(T** dest, U* value, int counter) {
 #ifdef DEBUG
diff --git a/src/version.cc b/src/version.cc
index 40a847b..be577e3 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     22
-#define BUILD_NUMBER      22
+#define BUILD_NUMBER      23
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 1502d79..dcb9fa5 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -2476,6 +2476,17 @@
 }
 
 
+// SSE operations.
+
+void Assembler::andps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x54);
+  emit_sse_operand(dst, src);
+}
+
+
 // SSE 2 operations.
 
 void Assembler::movd(XMMRegister dst, Register src) {
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index c715bce..508c622 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -1346,13 +1346,26 @@
 
   void sahf();
 
+  // SSE instructions
+  void movaps(XMMRegister dst, XMMRegister src);
+  void movss(XMMRegister dst, const Operand& src);
+  void movss(const Operand& dst, XMMRegister src);
+
+  void cvttss2si(Register dst, const Operand& src);
+  void cvttss2si(Register dst, XMMRegister src);
+  void cvtlsi2ss(XMMRegister dst, Register src);
+
+  void xorps(XMMRegister dst, XMMRegister src);
+  void andps(XMMRegister dst, XMMRegister src);
+
+  void movmskps(Register dst, XMMRegister src);
+
   // SSE2 instructions
   void movd(XMMRegister dst, Register src);
   void movd(Register dst, XMMRegister src);
   void movq(XMMRegister dst, Register src);
   void movq(Register dst, XMMRegister src);
   void movq(XMMRegister dst, XMMRegister src);
-  void extractps(Register dst, XMMRegister src, byte imm8);
 
   // Don't use this unless it's important to keep the
   // top half of the destination register unchanged.
@@ -1370,13 +1383,7 @@
   void movdqu(XMMRegister dst, const Operand& src);
 
   void movapd(XMMRegister dst, XMMRegister src);
-  void movaps(XMMRegister dst, XMMRegister src);
 
-  void movss(XMMRegister dst, const Operand& src);
-  void movss(const Operand& dst, XMMRegister src);
-
-  void cvttss2si(Register dst, const Operand& src);
-  void cvttss2si(Register dst, XMMRegister src);
   void cvttsd2si(Register dst, const Operand& src);
   void cvttsd2si(Register dst, XMMRegister src);
   void cvttsd2siq(Register dst, XMMRegister src);
@@ -1386,7 +1393,6 @@
   void cvtqsi2sd(XMMRegister dst, const Operand& src);
   void cvtqsi2sd(XMMRegister dst, Register src);
 
-  void cvtlsi2ss(XMMRegister dst, Register src);
 
   void cvtss2sd(XMMRegister dst, XMMRegister src);
   void cvtss2sd(XMMRegister dst, const Operand& src);
@@ -1405,11 +1411,16 @@
   void andpd(XMMRegister dst, XMMRegister src);
   void orpd(XMMRegister dst, XMMRegister src);
   void xorpd(XMMRegister dst, XMMRegister src);
-  void xorps(XMMRegister dst, XMMRegister src);
   void sqrtsd(XMMRegister dst, XMMRegister src);
 
   void ucomisd(XMMRegister dst, XMMRegister src);
   void ucomisd(XMMRegister dst, const Operand& src);
+  void cmpltsd(XMMRegister dst, XMMRegister src);
+
+  void movmskpd(Register dst, XMMRegister src);
+
+  // SSE 4.1 instruction
+  void extractps(Register dst, XMMRegister src, byte imm8);
 
   enum RoundingMode {
     kRoundToNearest = 0x0,
@@ -1420,17 +1431,6 @@
 
   void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
 
-  void movmskpd(Register dst, XMMRegister src);
-  void movmskps(Register dst, XMMRegister src);
-
-  void cmpltsd(XMMRegister dst, XMMRegister src);
-
-  // The first argument is the reg field, the second argument is the r/m field.
-  void emit_sse_operand(XMMRegister dst, XMMRegister src);
-  void emit_sse_operand(XMMRegister reg, const Operand& adr);
-  void emit_sse_operand(XMMRegister dst, Register src);
-  void emit_sse_operand(Register dst, XMMRegister src);
-
   // Debugging
   void Print();
 
@@ -1611,6 +1611,12 @@
   // Emit the code-object-relative offset of the label's position
   inline void emit_code_relative_offset(Label* label);
 
+  // The first argument is the reg field, the second argument is the r/m field.
+  void emit_sse_operand(XMMRegister dst, XMMRegister src);
+  void emit_sse_operand(XMMRegister reg, const Operand& adr);
+  void emit_sse_operand(XMMRegister dst, Register src);
+  void emit_sse_operand(Register dst, XMMRegister src);
+
   // Emit machine code for one of the operations ADD, ADC, SUB, SBC,
   // AND, OR, XOR, or CMP.  The encodings of these operations are all
   // similar, differing just in the opcode or in the reg field of the
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 0f879d1..7735b55 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -1260,6 +1260,13 @@
     byte_size_operand_ = idesc.byte_size_operation;
     current += PrintOperands(idesc.mnem, idesc.op_order_, current);
 
+  } else if (opcode == 0x54) {
+    // xorps xmm, xmm/m128
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    AppendToBuffer("andps %s,", NameOfXMMRegister(regop));
+    current += PrintRightXMMOperand(current);
+
   } else if (opcode == 0x57) {
     // xorps xmm, xmm/m128
     int mod, regop, rm;
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 508ce5c..85895b3 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -3369,7 +3369,7 @@
     XMMRegister input_reg = ToDoubleRegister(instr->value());
     __ xorps(scratch, scratch);
     __ subsd(scratch, input_reg);
-    __ andpd(input_reg, scratch);
+    __ andps(input_reg, scratch);
   } else if (r.IsInteger32()) {
     EmitIntegerMathAbs(instr);
   } else if (r.IsSmi()) {
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index 8b9da49..301545c 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -354,19 +354,29 @@
       CpuFeatureScope fscope(&assm, SSE2);
       __ cvttss2si(edx, Operand(ebx, ecx, times_4, 10000));
       __ cvtsi2sd(xmm1, Operand(ebx, ecx, times_4, 10000));
-      __ addsd(xmm1, xmm0);
-      __ mulsd(xmm1, xmm0);
-      __ subsd(xmm1, xmm0);
-      __ divsd(xmm1, xmm0);
       __ movsd(xmm1, Operand(ebx, ecx, times_4, 10000));
       __ movsd(Operand(ebx, ecx, times_4, 10000), xmm1);
-      __ ucomisd(xmm0, xmm1);
-
+      __ movaps(xmm0, xmm1);
       // 128 bit move instructions.
       __ movdqa(xmm0, Operand(ebx, ecx, times_4, 10000));
       __ movdqa(Operand(ebx, ecx, times_4, 10000), xmm0);
       __ movdqu(xmm0, Operand(ebx, ecx, times_4, 10000));
       __ movdqu(Operand(ebx, ecx, times_4, 10000), xmm0);
+
+      __ addsd(xmm1, xmm0);
+      __ mulsd(xmm1, xmm0);
+      __ subsd(xmm1, xmm0);
+      __ divsd(xmm1, xmm0);
+      __ ucomisd(xmm0, xmm1);
+      __ cmpltsd(xmm0, xmm1);
+
+      __ andps(xmm0, xmm1);
+      __ andpd(xmm0, xmm1);
+      __ psllq(xmm0, 17);
+      __ psllq(xmm0, xmm1);
+      __ psrlq(xmm0, 17);
+      __ psrlq(xmm0, xmm1);
+      __ por(xmm0, xmm1);
     }
   }
 
@@ -393,36 +403,6 @@
     }
   }
 
-  // andpd, cmpltsd, movaps, psllq, psrlq, por.
-  {
-    if (CpuFeatures::IsSupported(SSE2)) {
-      CpuFeatureScope fscope(&assm, SSE2);
-      __ andpd(xmm0, xmm1);
-      __ andpd(xmm1, xmm2);
-
-      __ cmpltsd(xmm0, xmm1);
-      __ cmpltsd(xmm1, xmm2);
-
-      __ movaps(xmm0, xmm1);
-      __ movaps(xmm1, xmm2);
-
-      __ psllq(xmm0, 17);
-      __ psllq(xmm1, 42);
-
-      __ psllq(xmm0, xmm1);
-      __ psllq(xmm1, xmm2);
-
-      __ psrlq(xmm0, 17);
-      __ psrlq(xmm1, 42);
-
-      __ psrlq(xmm0, xmm1);
-      __ psrlq(xmm1, xmm2);
-
-      __ por(xmm0, xmm1);
-      __ por(xmm1, xmm2);
-    }
-  }
-
   {
     if (CpuFeatures::IsSupported(SSE2) &&
         CpuFeatures::IsSupported(SSE4_1)) {
diff --git a/test/cctest/test-disasm-x64.cc b/test/cctest/test-disasm-x64.cc
index 3a6c458..8fd0369 100644
--- a/test/cctest/test-disasm-x64.cc
+++ b/test/cctest/test-disasm-x64.cc
@@ -335,61 +335,53 @@
   __ fcompp();
   __ fwait();
   __ nop();
-  {
-    if (CpuFeatures::IsSupported(SSE2)) {
-      CpuFeatures::Scope fscope(SSE2);
-      __ cvttss2si(rdx, Operand(rbx, rcx, times_4, 10000));
-      __ cvttss2si(rdx, xmm1);
-      __ cvttsd2si(rdx, Operand(rbx, rcx, times_4, 10000));
-      __ cvttsd2si(rdx, xmm1);
-      __ cvttsd2siq(rdx, xmm1);
-      __ addsd(xmm1, xmm0);
-      __ mulsd(xmm1, xmm0);
-      __ subsd(xmm1, xmm0);
-      __ divsd(xmm1, xmm0);
-      __ movsd(xmm1, Operand(rbx, rcx, times_4, 10000));
-      __ movsd(Operand(rbx, rcx, times_4, 10000), xmm1);
-      __ ucomisd(xmm0, xmm1);
 
-      // 128 bit move instructions.
-      __ movdqa(xmm0, Operand(rbx, rcx, times_4, 10000));
-      __ movdqa(Operand(rbx, rcx, times_4, 10000), xmm0);
-    }
+  // SSE instruction
+  {
+    __ cvttss2si(rdx, Operand(rbx, rcx, times_4, 10000));
+    __ cvttss2si(rdx, xmm1);
+    __ movaps(xmm0, xmm1);
+
+    __ andps(xmm0, xmm1);
+  }
+  // SSE 2 instructions
+  {
+    __ cvttsd2si(rdx, Operand(rbx, rcx, times_4, 10000));
+    __ cvttsd2si(rdx, xmm1);
+    __ cvttsd2siq(rdx, xmm1);
+    __ movsd(xmm1, Operand(rbx, rcx, times_4, 10000));
+    __ movsd(Operand(rbx, rcx, times_4, 10000), xmm1);
+    // 128 bit move instructions.
+    __ movdqa(xmm0, Operand(rbx, rcx, times_4, 10000));
+    __ movdqa(Operand(rbx, rcx, times_4, 10000), xmm0);
+
+    __ addsd(xmm1, xmm0);
+    __ mulsd(xmm1, xmm0);
+    __ subsd(xmm1, xmm0);
+    __ divsd(xmm1, xmm0);
+    __ ucomisd(xmm0, xmm1);
+
+    __ andpd(xmm0, xmm1);
   }
 
   // cmov.
   {
-    if (CpuFeatures::IsSupported(CMOV)) {
-      CpuFeatures::Scope use_cmov(CMOV);
-      __ cmovq(overflow, rax, Operand(rax, 0));
-      __ cmovq(no_overflow, rax, Operand(rax, 1));
-      __ cmovq(below, rax, Operand(rax, 2));
-      __ cmovq(above_equal, rax, Operand(rax, 3));
-      __ cmovq(equal, rax, Operand(rbx, 0));
-      __ cmovq(not_equal, rax, Operand(rbx, 1));
-      __ cmovq(below_equal, rax, Operand(rbx, 2));
-      __ cmovq(above, rax, Operand(rbx, 3));
-      __ cmovq(sign, rax, Operand(rcx, 0));
-      __ cmovq(not_sign, rax, Operand(rcx, 1));
-      __ cmovq(parity_even, rax, Operand(rcx, 2));
-      __ cmovq(parity_odd, rax, Operand(rcx, 3));
-      __ cmovq(less, rax, Operand(rdx, 0));
-      __ cmovq(greater_equal, rax, Operand(rdx, 1));
-      __ cmovq(less_equal, rax, Operand(rdx, 2));
-      __ cmovq(greater, rax, Operand(rdx, 3));
-    }
-  }
-
-  // andpd, etc.
-  {
-    if (CpuFeatures::IsSupported(SSE2)) {
-      CpuFeatures::Scope fscope(SSE2);
-      __ andpd(xmm0, xmm1);
-      __ andpd(xmm1, xmm2);
-
-      __ movaps(xmm0, xmm1);
-      __ movaps(xmm1, xmm2);
-    }
+    __ cmovq(overflow, rax, Operand(rax, 0));
+    __ cmovq(no_overflow, rax, Operand(rax, 1));
+    __ cmovq(below, rax, Operand(rax, 2));
+    __ cmovq(above_equal, rax, Operand(rax, 3));
+    __ cmovq(equal, rax, Operand(rbx, 0));
+    __ cmovq(not_equal, rax, Operand(rbx, 1));
+    __ cmovq(below_equal, rax, Operand(rbx, 2));
+    __ cmovq(above, rax, Operand(rbx, 3));
+    __ cmovq(sign, rax, Operand(rcx, 0));
+    __ cmovq(not_sign, rax, Operand(rcx, 1));
+    __ cmovq(parity_even, rax, Operand(rcx, 2));
+    __ cmovq(parity_odd, rax, Operand(rcx, 3));
+    __ cmovq(less, rax, Operand(rdx, 0));
+    __ cmovq(greater_equal, rax, Operand(rdx, 1));
+    __ cmovq(less_equal, rax, Operand(rdx, 2));
+    __ cmovq(greater, rax, Operand(rdx, 3));
   }
 
   {
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index 624969f..e62bdeb 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -73,33 +73,23 @@
 
 
 TEST(Promotion) {
-  // This test requires compaction. If compaction is turned off, we
-  // skip the entire test.
-  if (FLAG_never_compact) return;
-
   CcTest::InitializeVM();
-
-  // Ensure that we get a compacting collection so that objects are promoted
-  // from new space.
-  FLAG_gc_global = true;
-  FLAG_always_compact = true;
   Heap* heap = CcTest::heap();
-  heap->ConfigureHeap(2*256*KB, 8*MB, 8*MB);
+  heap->ConfigureHeap(2*256*KB, 1*MB, 1*MB);
 
   v8::HandleScope sc(CcTest::isolate());
 
   // Allocate a fixed array in the new space.
-  int array_size =
+  int array_length =
       (Page::kMaxNonCodeHeapObjectSize - FixedArray::kHeaderSize) /
-      (kPointerSize * 4);
-  Object* obj = heap->AllocateFixedArray(array_size)->ToObjectChecked();
-
+      (4 * kPointerSize);
+  Object* obj = heap->AllocateFixedArray(array_length)->ToObjectChecked();
   Handle<FixedArray> array(FixedArray::cast(obj));
 
   // Array should be in the new space.
   CHECK(heap->InSpace(*array, NEW_SPACE));
 
-  // Call the m-c collector, so array becomes an old object.
+  // Call mark compact GC, so array becomes an old object.
   heap->CollectGarbage(OLD_POINTER_SPACE);
 
   // Array now sits in the old space
@@ -108,42 +98,27 @@
 
 
 TEST(NoPromotion) {
-  // Test the situation that some objects in new space are promoted to
-  // the old space
   CcTest::InitializeVM();
-
-  CcTest::heap()->ConfigureHeap(2*256*KB, 8*MB, 8*MB);
+  Heap* heap = CcTest::heap();
+  heap->ConfigureHeap(2*256*KB, 1*MB, 1*MB);
 
   v8::HandleScope sc(CcTest::isolate());
 
-  // Do a mark compact GC to shrink the heap.
-  CcTest::heap()->CollectGarbage(OLD_POINTER_SPACE);
-
-  // Allocate a big Fixed array in the new space.
-  int length = (Page::kMaxNonCodeHeapObjectSize -
-      FixedArray::kHeaderSize) / (2 * kPointerSize);
-  Object* obj = CcTest::heap()->AllocateFixedArray(length)->
-      ToObjectChecked();
-
+  // Allocate a big fixed array in the new space.
+  int array_length =
+      (Page::kMaxNonCodeHeapObjectSize - FixedArray::kHeaderSize) /
+      (2 * kPointerSize);
+  Object* obj = heap->AllocateFixedArray(array_length)->ToObjectChecked();
   Handle<FixedArray> array(FixedArray::cast(obj));
 
-  // Array still stays in the new space.
-  CHECK(CcTest::heap()->InSpace(*array, NEW_SPACE));
+  // Array should be in the new space.
+  CHECK(heap->InSpace(*array, NEW_SPACE));
 
-  // Allocate objects in the old space until out of memory.
-  FixedArray* host = *array;
-  while (true) {
-    Object* obj;
-    { MaybeObject* maybe_obj = CcTest::heap()->AllocateFixedArray(100, TENURED);
-      if (!maybe_obj->ToObject(&obj)) break;
-    }
-
-    host->set(0, obj);
-    host = FixedArray::cast(obj);
-  }
+  // Simulate a full old space to make promotion fail.
+  SimulateFullSpace(heap->old_pointer_space());
 
   // Call mark compact GC, and it should pass.
-  CcTest::heap()->CollectGarbage(OLD_POINTER_SPACE);
+  heap->CollectGarbage(OLD_POINTER_SPACE);
 }
 
 
diff --git a/test/cctest/test-utils.cc b/test/cctest/test-utils.cc
index feff477..86d52fa 100644
--- a/test/cctest/test-utils.cc
+++ b/test/cctest/test-utils.cc
@@ -103,35 +103,22 @@
 
 void TestMemMove(byte* area1,
                  byte* area2,
-                 byte* area3,
                  int src_offset,
                  int dest_offset,
                  int length) {
   for (int i = 0; i < kAreaSize; i++) {
     area1[i] = i & 0xFF;
     area2[i] = i & 0xFF;
-    area3[i] = i & 0xFF;
   }
   OS::MemMove(area1 + dest_offset, area1 + src_offset, length);
-  MoveBytes(area2 + dest_offset, area2 + src_offset, length);
-  memmove(area3 + dest_offset, area3 + src_offset, length);
-  if (memcmp(area1, area3, kAreaSize) != 0) {
+  memmove(area2 + dest_offset, area2 + src_offset, length);
+  if (memcmp(area1, area2, kAreaSize) != 0) {
     printf("OS::MemMove(): src_offset: %d, dest_offset: %d, length: %d\n",
            src_offset, dest_offset, length);
     for (int i = 0; i < kAreaSize; i++) {
-      if (area1[i] == area3[i]) continue;
+      if (area1[i] == area2[i]) continue;
       printf("diff at offset %d (%p): is %d, should be %d\n",
-             i, reinterpret_cast<void*>(area1 + i), area1[i], area3[i]);
-    }
-    CHECK(false);
-  }
-  if (memcmp(area2, area3, kAreaSize) != 0) {
-    printf("MoveBytes(): src_offset: %d, dest_offset: %d, length: %d\n",
-           src_offset, dest_offset, length);
-    for (int i = 0; i < kAreaSize; i++) {
-      if (area2[i] == area3[i]) continue;
-      printf("diff at offset %d (%p): is %d, should be %d\n",
-             i, reinterpret_cast<void*>(area2 + i), area2[i], area3[i]);
+             i, reinterpret_cast<void*>(area1 + i), area1[i], area2[i]);
     }
     CHECK(false);
   }
@@ -142,7 +129,6 @@
   v8::V8::Initialize();
   byte* area1 = new byte[kAreaSize];
   byte* area2 = new byte[kAreaSize];
-  byte* area3 = new byte[kAreaSize];
 
   static const int kMinOffset = 32;
   static const int kMaxOffset = 64;
@@ -152,13 +138,12 @@
   for (int src_offset = kMinOffset; src_offset <= kMaxOffset; src_offset++) {
     for (int dst_offset = kMinOffset; dst_offset <= kMaxOffset; dst_offset++) {
       for (int length = 0; length <= kMaxLength; length++) {
-        TestMemMove(area1, area2, area3, src_offset, dst_offset, length);
+        TestMemMove(area1, area2, src_offset, dst_offset, length);
       }
     }
   }
   delete[] area1;
   delete[] area2;
-  delete[] area3;
 }
 
 
diff --git a/test/mjsunit/array-functions-prototype-misc.js b/test/mjsunit/array-functions-prototype-misc.js
index 0543c32..74dc9a6 100644
--- a/test/mjsunit/array-functions-prototype-misc.js
+++ b/test/mjsunit/array-functions-prototype-misc.js
@@ -31,7 +31,7 @@
  * should work on other objects too, so we test that too.
  */
 
-var LARGE = 40000000;
+var LARGE = 4000000;
 var VERYLARGE = 4000000000;
 
 // Nicer for firefox 1.5.  Unless you uncomment the following two lines,
@@ -276,7 +276,7 @@
 }
 
 // Take something near the end of the array.
-for (var i = 0; i < 100; i++) {
+for (var i = 0; i < 10; i++) {
   var top = a.splice(LARGE, 5);
   assertEquals(5, top.length);
   assertEquals(LARGE, top[0]);
diff --git a/test/mjsunit/compiler/expression-trees.js b/test/mjsunit/compiler/expression-trees.js
index fac6b4c..0d971a9 100644
--- a/test/mjsunit/compiler/expression-trees.js
+++ b/test/mjsunit/compiler/expression-trees.js
@@ -55,46 +55,43 @@
   }
 }
 
-// All 429 possible bitwise OR trees with eight leaves.
-var identifiers = ['a','b','c','d','e','f','g','h'];
+// All possible bitwise OR trees with six leaves, i.e. CatalanNumber[5] = 42,
+// see http://mathworld.wolfram.com/CatalanNumber.html.
+var identifiers = ['a','b','c','d','e','f'];
 var or_trees = makeTrees("|", identifiers);
 var and_trees = makeTrees("&", identifiers);
 
-// Set up leaf masks to set 8 least-significant bits.
+// Set up leaf masks to set 6 least-significant bits.
 var a = 1 << 0;
 var b = 1 << 1;
 var c = 1 << 2;
 var d = 1 << 3;
 var e = 1 << 4;
 var f = 1 << 5;
-var g = 1 << 6;
-var h = 1 << 7;
 
 for (var i = 0; i < or_trees.length; ++i) {
-  for (var j = 0; j < 8; ++j) {
+  for (var j = 0; j < 6; ++j) {
     var or_fun = new Function("return " + or_trees[i]);
-    if (j == 0) assertEquals(255, or_fun());
+    if (j == 0) assertEquals(63, or_fun());
 
     // Set the j'th variable to a string to force a bailout.
     eval(identifiers[j] + "+= ''");
-    assertEquals(255, or_fun());
+    assertEquals(63, or_fun());
     // Set it back to a number for the next iteration.
     eval(identifiers[j] + "= +" + identifiers[j]);
   }
 }
 
-// Set up leaf masks to clear 8 least-significant bits.
-a ^= 255;
-b ^= 255;
-c ^= 255;
-d ^= 255;
-e ^= 255;
-f ^= 255;
-g ^= 255;
-h ^= 255;
+// Set up leaf masks to clear 6 least-significant bits.
+a ^= 63;
+b ^= 63;
+c ^= 63;
+d ^= 63;
+e ^= 63;
+f ^= 63;
 
 for (i = 0; i < and_trees.length; ++i) {
-  for (var j = 0; j < 8; ++j) {
+  for (var j = 0; j < 6; ++j) {
     var and_fun = new Function("return " + and_trees[i]);
     if (j == 0) assertEquals(0, and_fun());
 
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 8eb060e..3cd5125 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -33,9 +33,6 @@
   # TODO(mvstanton) Re-enable when the performance is bearable again.
   'regress/regress-2185-2': [SKIP],
 
-  # TODO(mvstanton) Re-enable when the bug is fixed.
-  'regress/regress-2612': [PASS, ['mode == debug', SKIP]],
-
   ##############################################################################
   # Fails.
   'regress/regress-1119': [FAIL],
diff --git a/test/mjsunit/regress/regress-array-pop-nonconfigurable.js b/test/mjsunit/regress/regress-array-pop-nonconfigurable.js
new file mode 100644
index 0000000..129e198
--- /dev/null
+++ b/test/mjsunit/regress/regress-array-pop-nonconfigurable.js
@@ -0,0 +1,31 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var a = [];
+Object.defineProperty(a, 0, {});
+assertThrows(function() { a.pop(); });
+
diff --git a/tools/presubmit.py b/tools/presubmit.py
index 780dab9..1ab6347 100755
--- a/tools/presubmit.py
+++ b/tools/presubmit.py
@@ -282,8 +282,8 @@
   Check that all files include a copyright notice and no trailing whitespaces.
   """
 
-  RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c', 'SConscript',
-      'SConstruct', '.status', '.gyp', '.gypi']
+  RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c',
+                         '.status', '.gyp', '.gypi']
 
   # Overwriting the one in the parent class.
   def FindFilesIn(self, path):
@@ -292,7 +292,7 @@
                                 stdout=PIPE, cwd=path, shell=True)
       result = []
       for file in output.stdout.read().split():
-        for dir_part in os.path.dirname(file).split(os.sep):
+        for dir_part in os.path.dirname(file).replace(os.sep, '/').split('/'):
           if self.IgnoreDir(dir_part):
             break
         else: