Version 3.22.2

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@16953 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index f3e6ea7..f3498c7 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,8 @@
+2013-09-26: Version 3.22.2
+
+        Performance and stability improvements on all platforms.
+
+
 2013-09-25: Version 3.22.1
 
         Sped up creating typed arrays from array-like objects.
diff --git a/src/accessors.cc b/src/accessors.cc
index 669c02b..5023266 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -78,6 +78,61 @@
 }
 
 
+static V8_INLINE bool CheckForName(Handle<String> name,
+                                   String* property_name,
+                                   int offset,
+                                   int* object_offset) {
+  if (name->Equals(property_name)) {
+    *object_offset = offset;
+    return true;
+  }
+  return false;
+}
+
+
+bool Accessors::IsJSObjectFieldAccessor(
+      Handle<Map> map, Handle<String> name,
+      int* object_offset) {
+  Isolate* isolate = map->GetIsolate();
+  switch (map->instance_type()) {
+    case JS_ARRAY_TYPE:
+      return
+        CheckForName(name, isolate->heap()->length_string(),
+                     JSArray::kLengthOffset, object_offset);
+    case JS_TYPED_ARRAY_TYPE:
+      return
+        CheckForName(name, isolate->heap()->length_string(),
+                     JSTypedArray::kLengthOffset, object_offset) ||
+        CheckForName(name, isolate->heap()->byte_length_string(),
+                     JSTypedArray::kByteLengthOffset, object_offset) ||
+        CheckForName(name, isolate->heap()->byte_offset_string(),
+                     JSTypedArray::kByteOffsetOffset, object_offset) ||
+        CheckForName(name, isolate->heap()->buffer_string(),
+                     JSTypedArray::kBufferOffset, object_offset);
+    case JS_ARRAY_BUFFER_TYPE:
+      return
+        CheckForName(name, isolate->heap()->byte_length_string(),
+                     JSArrayBuffer::kByteLengthOffset, object_offset);
+    case JS_DATA_VIEW_TYPE:
+      return
+        CheckForName(name, isolate->heap()->byte_length_string(),
+                     JSDataView::kByteLengthOffset, object_offset) ||
+        CheckForName(name, isolate->heap()->byte_offset_string(),
+                     JSDataView::kByteOffsetOffset, object_offset) ||
+        CheckForName(name, isolate->heap()->buffer_string(),
+                     JSDataView::kBufferOffset, object_offset);
+    default: {
+      if (map->instance_type() < FIRST_NONSTRING_TYPE) {
+        return
+          CheckForName(name, isolate->heap()->length_string(),
+                       String::kLengthOffset, object_offset);
+      }
+      return false;
+    }
+  }
+}
+
+
 //
 // Accessors::ArrayLength
 //
diff --git a/src/accessors.h b/src/accessors.h
index d9a2130..b2dee27 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -86,6 +86,13 @@
   static Handle<AccessorInfo> MakeModuleExport(
       Handle<String> name, int index, PropertyAttributes attributes);
 
+  // Returns true for properties that are accessors to object fields.
+  // If true, *object_offset contains offset of object field.
+  static bool IsJSObjectFieldAccessor(
+      Handle<Map> map, Handle<String> name,
+      int* object_offset);
+
+
  private:
   // Accessor functions only used through the descriptor.
   static MaybeObject* FunctionSetPrototype(Isolate* isolate,
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 530c1d2..13c0e44 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -937,6 +937,24 @@
 }
 
 
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+  // We check the stack limit as indicator that recompilation might be done.
+  Label ok;
+  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+  __ cmp(sp, Operand(ip));
+  __ b(hs, &ok);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kStackGuard, 0);
+  }
+  __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+          RelocInfo::CODE_TARGET);
+
+  __ bind(&ok);
+  __ Ret();
+}
+
+
 void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
   // 1. Make sure we have at least one argument.
   // r0: actual number of arguments
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index d49f842..3becc96 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -4893,96 +4893,86 @@
 
 static const int32_t kBranchBeforeInterrupt =  0x5a000004;
 
-// The back edge bookkeeping code matches the pattern:
-//
-//  <decrement profiling counter>
-//  2a 00 00 01       bpl ok
-//  e5 9f c? ??       ldr ip, [pc, <interrupt stub address>]
-//  e1 2f ff 3c       blx ip
-//  ok-label
-//
-// We patch the code to the following form:
-//
-//  <decrement profiling counter>
-//  e1 a0 00 00       mov r0, r0 (NOP)
-//  e5 9f c? ??       ldr ip, [pc, <on-stack replacement address>]
-//  e1 2f ff 3c       blx ip
-//  ok-label
 
 void BackEdgeTable::PatchAt(Code* unoptimized_code,
-                            Address pc_after,
+                            Address pc,
+                            BackEdgeState target_state,
                             Code* replacement_code) {
   static const int kInstrSize = Assembler::kInstrSize;
-  // Turn the jump into nops.
-  CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
-  patcher.masm()->nop();
+  Address branch_address = pc - 3 * kInstrSize;
+  CodePatcher patcher(branch_address, 1);
+
+  switch (target_state) {
+    case INTERRUPT:
+      //  <decrement profiling counter>
+      //  2a 00 00 01       bpl ok
+      //  e5 9f c? ??       ldr ip, [pc, <interrupt stub address>]
+      //  e1 2f ff 3c       blx ip
+      //  ok-label
+      patcher.masm()->b(4 * kInstrSize, pl);  // Jump offset is 4 instructions.
+      ASSERT_EQ(kBranchBeforeInterrupt, Memory::int32_at(branch_address));
+      break;
+    case ON_STACK_REPLACEMENT:
+    case OSR_AFTER_STACK_CHECK:
+      //  <decrement profiling counter>
+      //  e1 a0 00 00       mov r0, r0 (NOP)
+      //  e5 9f c? ??       ldr ip, [pc, <on-stack replacement address>]
+      //  e1 2f ff 3c       blx ip
+      //  ok-label
+      patcher.masm()->nop();
+      break;
+  }
+
+  Address pc_immediate_load_address = pc - 2 * kInstrSize;
   // Replace the call address.
-  uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
-      2 * kInstrSize) & 0xfff;
-  Address interrupt_address_pointer = pc_after + interrupt_address_offset;
+  uint32_t interrupt_address_offset =
+      Memory::uint16_at(pc_immediate_load_address) & 0xfff;
+  Address interrupt_address_pointer = pc + interrupt_address_offset;
   Memory::uint32_at(interrupt_address_pointer) =
       reinterpret_cast<uint32_t>(replacement_code->entry());
 
   unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
-      unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
+      unoptimized_code, pc_immediate_load_address, replacement_code);
 }
 
 
-void BackEdgeTable::RevertAt(Code* unoptimized_code,
-                             Address pc_after,
-                             Code* interrupt_code) {
-  static const int kInstrSize = Assembler::kInstrSize;
-  // Restore the original jump.
-  CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
-  patcher.masm()->b(4 * kInstrSize, pl);  // ok-label is 4 instructions later.
-  ASSERT_EQ(kBranchBeforeInterrupt,
-            Memory::int32_at(pc_after - 3 * kInstrSize));
-  // Restore the original call address.
-  uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
-      2 * kInstrSize) & 0xfff;
-  Address interrupt_address_pointer = pc_after + interrupt_address_offset;
-  Memory::uint32_at(interrupt_address_pointer) =
-      reinterpret_cast<uint32_t>(interrupt_code->entry());
-
-  interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
-      unoptimized_code, pc_after - 2 * kInstrSize, interrupt_code);
-}
-
-
-#ifdef DEBUG
 BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
     Isolate* isolate,
     Code* unoptimized_code,
-    Address pc_after) {
+    Address pc) {
   static const int kInstrSize = Assembler::kInstrSize;
-  ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
+  ASSERT(Memory::int32_at(pc - kInstrSize) == kBlxIp);
 
+  Address branch_address = pc - 3 * kInstrSize;
+  Address pc_immediate_load_address = pc - 2 * kInstrSize;
   uint32_t interrupt_address_offset =
-      Memory::uint16_at(pc_after - 2 * kInstrSize) & 0xfff;
-  Address interrupt_address_pointer = pc_after + interrupt_address_offset;
+      Memory::uint16_at(pc_immediate_load_address) & 0xfff;
+  Address interrupt_address_pointer = pc + interrupt_address_offset;
 
-  if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) {
+  if (Memory::int32_at(branch_address) == kBranchBeforeInterrupt) {
+    ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
+           reinterpret_cast<uint32_t>(
+               isolate->builtins()->InterruptCheck()->entry()));
     ASSERT(Assembler::IsLdrPcImmediateOffset(
-        Assembler::instr_at(pc_after - 2 * kInstrSize)));
-    Code* osr_builtin =
-        isolate->builtins()->builtin(Builtins::kOnStackReplacement);
-    ASSERT(reinterpret_cast<uint32_t>(osr_builtin->entry()) ==
-           Memory::uint32_at(interrupt_address_pointer));
-    return ON_STACK_REPLACEMENT;
-  } else {
-    // Get the interrupt stub code object to match against from cache.
-    Code* interrupt_builtin =
-        isolate->builtins()->builtin(Builtins::kInterruptCheck);
-    ASSERT(Assembler::IsLdrPcImmediateOffset(
-        Assembler::instr_at(pc_after - 2 * kInstrSize)));
-    ASSERT_EQ(kBranchBeforeInterrupt,
-              Memory::int32_at(pc_after - 3 * kInstrSize));
-    ASSERT(reinterpret_cast<uint32_t>(interrupt_builtin->entry()) ==
-           Memory::uint32_at(interrupt_address_pointer));
+               Assembler::instr_at(pc_immediate_load_address)));
     return INTERRUPT;
   }
+
+  ASSERT(Assembler::IsNop(Assembler::instr_at(branch_address)));
+  ASSERT(Assembler::IsLdrPcImmediateOffset(
+             Assembler::instr_at(pc_immediate_load_address)));
+
+  if (Memory::uint32_at(interrupt_address_pointer) ==
+      reinterpret_cast<uint32_t>(
+          isolate->builtins()->OnStackReplacement()->entry())) {
+    return ON_STACK_REPLACEMENT;
+  }
+
+  ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
+         reinterpret_cast<uint32_t>(
+             isolate->builtins()->OsrAfterStackCheck()->entry()));
+  return OSR_AFTER_STACK_CHECK;
 }
-#endif  // DEBUG
 
 
 } }  // namespace v8::internal
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 0756aef..332131a 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1043,7 +1043,7 @@
   }
 
   {  // -- J S O N
-    Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON"));
+    Handle<String> name = factory->InternalizeUtf8String("JSON");
     Handle<JSFunction> cons = factory->NewFunction(name,
                                                    factory->the_hole_value());
     JSFunction::SetInstancePrototype(cons,
diff --git a/src/builtins.h b/src/builtins.h
index 54787f7..682fa1c 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -214,6 +214,8 @@
                                     Code::kNoExtraICState)              \
   V(InterruptCheck,                 BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
+  V(OsrAfterStackCheck,             BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
   V(StackCheck,                     BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
   CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
@@ -397,7 +399,7 @@
 
   static void Generate_StringConstructCode(MacroAssembler* masm);
   static void Generate_OnStackReplacement(MacroAssembler* masm);
-
+  static void Generate_OsrAfterStackCheck(MacroAssembler* masm);
   static void Generate_InterruptCheck(MacroAssembler* masm);
   static void Generate_StackCheck(MacroAssembler* masm);
 
diff --git a/src/compiler.cc b/src/compiler.cc
index 47634ec..01e261a 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -260,7 +260,7 @@
 }
 
 
-void OptimizingCompiler::RecordOptimizationStats() {
+void RecompileJob::RecordOptimizationStats() {
   Handle<JSFunction> function = info()->closure();
   int opt_count = function->shared()->opt_count();
   function->shared()->set_opt_count(opt_count + 1);
@@ -297,23 +297,23 @@
 // A return value of true indicates the compilation pipeline is still
 // going, not necessarily that we optimized the code.
 static bool MakeCrankshaftCode(CompilationInfo* info) {
-  OptimizingCompiler compiler(info);
-  OptimizingCompiler::Status status = compiler.CreateGraph();
+  RecompileJob job(info);
+  RecompileJob::Status status = job.CreateGraph();
 
-  if (status != OptimizingCompiler::SUCCEEDED) {
-    return status != OptimizingCompiler::FAILED;
+  if (status != RecompileJob::SUCCEEDED) {
+    return status != RecompileJob::FAILED;
   }
-  status = compiler.OptimizeGraph();
-  if (status != OptimizingCompiler::SUCCEEDED) {
-    status = compiler.AbortOptimization();
-    return status != OptimizingCompiler::FAILED;
+  status = job.OptimizeGraph();
+  if (status != RecompileJob::SUCCEEDED) {
+    status = job.AbortOptimization();
+    return status != RecompileJob::FAILED;
   }
-  status = compiler.GenerateAndInstallCode();
-  return status != OptimizingCompiler::FAILED;
+  status = job.GenerateAndInstallCode();
+  return status != RecompileJob::FAILED;
 }
 
 
-OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
+RecompileJob::Status RecompileJob::CreateGraph() {
   ASSERT(isolate()->use_crankshaft());
   ASSERT(info()->IsOptimizing());
   ASSERT(!info()->IsCompilingForDebugging());
@@ -452,7 +452,7 @@
 }
 
 
-OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
+RecompileJob::Status RecompileJob::OptimizeGraph() {
   DisallowHeapAllocation no_allocation;
   DisallowHandleAllocation no_handles;
   DisallowHandleDereference no_deref;
@@ -475,7 +475,7 @@
 }
 
 
-OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
+RecompileJob::Status RecompileJob::GenerateAndInstallCode() {
   ASSERT(last_status() == SUCCEEDED);
   ASSERT(!info()->HasAbortedDueToDependencyChange());
   DisallowCodeDependencyChange no_dependency_change;
@@ -1032,16 +1032,15 @@
       info->SaveHandles();
 
       if (Rewriter::Rewrite(*info) && Scope::Analyze(*info)) {
-        OptimizingCompiler* compiler =
-            new(info->zone()) OptimizingCompiler(*info);
-        OptimizingCompiler::Status status = compiler->CreateGraph();
-        if (status == OptimizingCompiler::SUCCEEDED) {
+        RecompileJob* job = new(info->zone()) RecompileJob(*info);
+        RecompileJob::Status status = job->CreateGraph();
+        if (status == RecompileJob::SUCCEEDED) {
           info.Detach();
           shared->code()->set_profiler_ticks(0);
-          isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
+          isolate->optimizing_compiler_thread()->QueueForOptimization(job);
           ASSERT(!isolate->has_pending_exception());
           return true;
-        } else if (status == OptimizingCompiler::BAILED_OUT) {
+        } else if (status == RecompileJob::BAILED_OUT) {
           isolate->clear_pending_exception();
           InstallFullCode(*info);
         }
@@ -1054,9 +1053,8 @@
 }
 
 
-Handle<Code> Compiler::InstallOptimizedCode(
-    OptimizingCompiler* optimizing_compiler) {
-  SmartPointer<CompilationInfo> info(optimizing_compiler->info());
+Handle<Code> Compiler::InstallOptimizedCode(RecompileJob* job) {
+  SmartPointer<CompilationInfo> info(job->info());
   // The function may have already been optimized by OSR.  Simply continue.
   // Except when OSR already disabled optimization for some reason.
   if (info->shared_info()->optimization_disabled()) {
@@ -1077,24 +1075,24 @@
       isolate, Logger::TimerEventScope::v8_recompile_synchronous);
   // If crankshaft succeeded, install the optimized code else install
   // the unoptimized code.
-  OptimizingCompiler::Status status = optimizing_compiler->last_status();
+  RecompileJob::Status status = job->last_status();
   if (info->HasAbortedDueToDependencyChange()) {
     info->set_bailout_reason(kBailedOutDueToDependencyChange);
-    status = optimizing_compiler->AbortOptimization();
-  } else if (status != OptimizingCompiler::SUCCEEDED) {
+    status = job->AbortOptimization();
+  } else if (status != RecompileJob::SUCCEEDED) {
     info->set_bailout_reason(kFailedBailedOutLastTime);
-    status = optimizing_compiler->AbortOptimization();
+    status = job->AbortOptimization();
   } else if (isolate->DebuggerHasBreakPoints()) {
     info->set_bailout_reason(kDebuggerIsActive);
-    status = optimizing_compiler->AbortOptimization();
+    status = job->AbortOptimization();
   } else {
-    status = optimizing_compiler->GenerateAndInstallCode();
-    ASSERT(status == OptimizingCompiler::SUCCEEDED ||
-           status == OptimizingCompiler::BAILED_OUT);
+    status = job->GenerateAndInstallCode();
+    ASSERT(status == RecompileJob::SUCCEEDED ||
+           status == RecompileJob::BAILED_OUT);
   }
 
   InstallCodeCommon(*info);
-  if (status == OptimizingCompiler::SUCCEEDED) {
+  if (status == RecompileJob::SUCCEEDED) {
     Handle<Code> code = info->code();
     ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty(isolate));
     info->closure()->ReplaceCode(*code);
@@ -1115,8 +1113,8 @@
   // profiler ticks to prevent too soon re-opt after a deopt.
   info->shared_info()->code()->set_profiler_ticks(0);
   ASSERT(!info->closure()->IsInRecompileQueue());
-  return (status == OptimizingCompiler::SUCCEEDED) ? info->code()
-                                                   : Handle<Code>::null();
+  return (status == RecompileJob::SUCCEEDED) ? info->code()
+                                             : Handle<Code>::null();
 }
 
 
diff --git a/src/compiler.h b/src/compiler.h
index 8ceb61d..dc7c19f 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -86,6 +86,7 @@
   ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
   Handle<Context> context() const { return context_; }
   BailoutId osr_ast_id() const { return osr_ast_id_; }
+  uint32_t osr_pc_offset() const { return osr_pc_offset_; }
   int opt_count() const { return opt_count_; }
   int num_parameters() const;
   int num_heap_slots() const;
@@ -505,9 +506,9 @@
 // fail, bail-out to the full code generator or succeed.  Apart from
 // their return value, the status of the phase last run can be checked
 // using last_status().
-class OptimizingCompiler: public ZoneObject {
+class RecompileJob: public ZoneObject {
  public:
-  explicit OptimizingCompiler(CompilationInfo* info)
+  explicit RecompileJob(CompilationInfo* info)
       : info_(info),
         graph_builder_(NULL),
         graph_(NULL),
@@ -532,6 +533,13 @@
     return SetLastStatus(BAILED_OUT);
   }
 
+  void WaitForInstall() {
+    ASSERT(!info_->osr_ast_id().IsNone());
+    awaiting_install_ = true;
+  }
+
+  bool IsWaitingForInstall() { return awaiting_install_; }
+
  private:
   CompilationInfo* info_;
   HOptimizedGraphBuilder* graph_builder_;
@@ -541,6 +549,7 @@
   TimeDelta time_taken_to_optimize_;
   TimeDelta time_taken_to_codegen_;
   Status last_status_;
+  bool awaiting_install_;
 
   MUST_USE_RESULT Status SetLastStatus(Status status) {
     last_status_ = status;
@@ -549,9 +558,8 @@
   void RecordOptimizationStats();
 
   struct Timer {
-    Timer(OptimizingCompiler* compiler, TimeDelta* location)
-        : compiler_(compiler),
-          location_(location) {
+    Timer(RecompileJob* job, TimeDelta* location)
+        : job_(job), location_(location) {
       ASSERT(location_ != NULL);
       timer_.Start();
     }
@@ -560,7 +568,7 @@
       *location_ += timer_.Elapsed();
     }
 
-    OptimizingCompiler* compiler_;
+    RecompileJob* job_;
     ElapsedTimer timer_;
     TimeDelta* location_;
   };
@@ -625,7 +633,7 @@
                               bool is_toplevel,
                               Handle<Script> script);
 
-  static Handle<Code> InstallOptimizedCode(OptimizingCompiler* info);
+  static Handle<Code> InstallOptimizedCode(RecompileJob* job);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   static bool MakeCodeForLiveEdit(CompilationInfo* info);
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index a8229ea..e745471 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -322,6 +322,7 @@
            "artificial compilation delay in ms")
 DEFINE_bool(concurrent_osr, false,
             "concurrent on-stack replacement")
+DEFINE_implication(concurrent_osr, concurrent_recompilation)
 
 DEFINE_bool(omit_map_checks_for_leaf_maps, true,
             "do not emit check maps for constant values that have a leaf map, "
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index f1877fb..c4ae1d7 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -1618,8 +1618,7 @@
 void BackEdgeTable::Patch(Isolate* isolate,
                           Code* unoptimized) {
   DisallowHeapAllocation no_gc;
-  Code* replacement_code =
-      isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+  Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
 
   // Iterate over the back edge table and patch every interrupt
   // call to an unconditional call to the replacement code.
@@ -1631,7 +1630,7 @@
       ASSERT_EQ(INTERRUPT, GetBackEdgeState(isolate,
                                             unoptimized,
                                             back_edges.pc(i)));
-      PatchAt(unoptimized, back_edges.pc(i), replacement_code);
+      PatchAt(unoptimized, back_edges.pc(i), ON_STACK_REPLACEMENT, patch);
     }
   }
 
@@ -1643,8 +1642,7 @@
 void BackEdgeTable::Revert(Isolate* isolate,
                            Code* unoptimized) {
   DisallowHeapAllocation no_gc;
-  Code* interrupt_code =
-      isolate->builtins()->builtin(Builtins::kInterruptCheck);
+  Code* patch = isolate->builtins()->builtin(Builtins::kInterruptCheck);
 
   // Iterate over the back edge table and revert the patched interrupt calls.
   ASSERT(unoptimized->back_edges_patched_for_osr());
@@ -1653,10 +1651,10 @@
   BackEdgeTable back_edges(unoptimized, &no_gc);
   for (uint32_t i = 0; i < back_edges.length(); i++) {
     if (static_cast<int>(back_edges.loop_depth(i)) <= loop_nesting_level) {
-      ASSERT_EQ(ON_STACK_REPLACEMENT, GetBackEdgeState(isolate,
-                                                       unoptimized,
-                                                       back_edges.pc(i)));
-      RevertAt(unoptimized, back_edges.pc(i), interrupt_code);
+      ASSERT_NE(INTERRUPT, GetBackEdgeState(isolate,
+                                            unoptimized,
+                                            back_edges.pc(i)));
+      PatchAt(unoptimized, back_edges.pc(i), INTERRUPT, patch);
     }
   }
 
@@ -1667,6 +1665,29 @@
 }
 
 
+void BackEdgeTable::AddStackCheck(CompilationInfo* info) {
+  DisallowHeapAllocation no_gc;
+  Isolate* isolate = info->isolate();
+  Code* code = info->shared_info()->code();
+  Address pc = code->instruction_start() + info->osr_pc_offset();
+  ASSERT_EQ(ON_STACK_REPLACEMENT, GetBackEdgeState(isolate, code, pc));
+  Code* patch = isolate->builtins()->builtin(Builtins::kOsrAfterStackCheck);
+  PatchAt(code, pc, OSR_AFTER_STACK_CHECK, patch);
+}
+
+
+void BackEdgeTable::RemoveStackCheck(CompilationInfo* info) {
+  DisallowHeapAllocation no_gc;
+  Isolate* isolate = info->isolate();
+  Code* code = info->shared_info()->code();
+  Address pc = code->instruction_start() + info->osr_pc_offset();
+  if (GetBackEdgeState(isolate, code, pc) == OSR_AFTER_STACK_CHECK) {
+    Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+    PatchAt(code, pc, ON_STACK_REPLACEMENT, patch);
+  }
+}
+
+
 #ifdef DEBUG
 bool BackEdgeTable::Verify(Isolate* isolate,
                            Code* unoptimized,
diff --git a/src/full-codegen.h b/src/full-codegen.h
index adfa1c1..93a2528 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -912,7 +912,8 @@
 
   enum BackEdgeState {
     INTERRUPT,
-    ON_STACK_REPLACEMENT
+    ON_STACK_REPLACEMENT,
+    OSR_AFTER_STACK_CHECK
   };
 
   // Patch all interrupts with allowed loop depth in the unoptimized code to
@@ -920,28 +921,29 @@
   static void Patch(Isolate* isolate,
                     Code* unoptimized_code);
 
-  // Patch the interrupt at the instruction before pc_after in
-  // the unoptimized code to unconditionally call replacement_code.
+  // Patch the back edge to the target state, provided the correct callee.
   static void PatchAt(Code* unoptimized_code,
-                      Address pc_after,
+                      Address pc,
+                      BackEdgeState target_state,
                       Code* replacement_code);
 
-  // Change all patched interrupts patched in the unoptimized code
-  // back to normal interrupts.
+  // Change all patched back edges back to normal interrupts.
   static void Revert(Isolate* isolate,
                      Code* unoptimized_code);
 
-  // Change patched interrupt in the unoptimized code
-  // back to a normal interrupt.
-  static void RevertAt(Code* unoptimized_code,
-                       Address pc_after,
-                       Code* interrupt_code);
+  // Change a back edge patched for on-stack replacement to perform a
+  // stack check first.
+  static void AddStackCheck(CompilationInfo* info);
 
-#ifdef DEBUG
+  // Remove the stack check, if available, and replace by on-stack replacement.
+  static void RemoveStackCheck(CompilationInfo* info);
+
+  // Return the current patch state of the back edge.
   static BackEdgeState GetBackEdgeState(Isolate* isolate,
                                         Code* unoptimized_code,
                                         Address pc_after);
 
+#ifdef DEBUG
   // Verify that all back edges of a certain loop depth are patched.
   static bool Verify(Isolate* isolate,
                      Code* unoptimized_code,
diff --git a/src/handles.cc b/src/handles.cc
index 033fdab..6b9025d 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -285,15 +285,6 @@
 }
 
 
-Handle<String> SubString(Handle<String> str,
-                         int start,
-                         int end,
-                         PretenureFlag pretenure) {
-  CALL_HEAP_FUNCTION(str->GetIsolate(),
-                     str->SubString(start, end, pretenure), String);
-}
-
-
 // Wrappers for scripts are kept alive and cached in weak global
 // handles referred from foreign objects held by the scripts as long as
 // they are used. When they are not used anymore, the garbage
diff --git a/src/handles.h b/src/handles.h
index 585f7b4..0f619d5 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -299,11 +299,6 @@
 Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
                                Handle<FixedArray> second);
 
-Handle<String> SubString(Handle<String> str,
-                         int start,
-                         int end,
-                         PretenureFlag pretenure = NOT_TENURED);
-
 // Sets the expected number of properties for the function's instances.
 void SetExpectedNofProperties(Handle<JSFunction> func, int nof);
 
diff --git a/src/heap.h b/src/heap.h
index cb979a6..e059434 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -292,7 +292,10 @@
   V(throw_string, "throw")                                               \
   V(done_string, "done")                                                 \
   V(value_string, "value")                                               \
-  V(next_string, "next")
+  V(next_string, "next")                                                 \
+  V(byte_length_string, "byteLength")                                    \
+  V(byte_offset_string, "byteOffset")                                    \
+  V(buffer_string, "buffer")
 
 // Forward declarations.
 class GCTracer;
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index d301036..6125ca2 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1237,6 +1237,16 @@
 }
 
 
+bool HMul::MulMinusOne() {
+  if (left()->EqualsInteger32Constant(-1) ||
+      right()->EqualsInteger32Constant(-1)) {
+    return true;
+  }
+
+  return false;
+}
+
+
 HValue* HMod::Canonicalize() {
   return this;
 }
@@ -1622,10 +1632,13 @@
     Range* a = left()->range();
     Range* b = right()->range();
     Range* res = a->Copy(zone);
-    if (!res->MulAndCheckOverflow(r, b)) {
-      // Clearing the kCanOverflow flag when kAllUsesAreTruncatingToInt32
-      // would be wrong, because truncated integer multiplication is too
-      // precise and therefore not the same as converting to Double and back.
+    if (!res->MulAndCheckOverflow(r, b) ||
+        (((r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) ||
+         (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) &&
+         MulMinusOne())) {
+      // Truncated int multiplication is too precise and therefore not the
+      // same as converting to Double and back.
+      // Handle truncated integer multiplication by -1 special.
       ClearFlag(kCanOverflow);
     }
     res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) &&
@@ -1647,7 +1660,10 @@
     result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
                                   (a->CanBeMinusZero() ||
                                    (a->CanBeZero() && b->CanBeNegative())));
-    if (!a->Includes(kMinInt) || !b->Includes(-1)) {
+    if (!a->Includes(kMinInt) ||
+        !b->Includes(-1) ||
+        CheckFlag(kAllUsesTruncatingToInt32)) {
+      // It is safe to clear kCanOverflow when kAllUsesTruncatingToInt32.
       ClearFlag(HValue::kCanOverflow);
     }
 
@@ -2596,6 +2612,12 @@
   ASSERT(CheckFlag(kFlexibleRepresentation));
   Representation new_rep = RepresentationFromInputs();
   UpdateRepresentation(new_rep, h_infer, "inputs");
+
+  if (representation().IsSmi() && HasNonSmiUse()) {
+    UpdateRepresentation(
+        Representation::Integer32(), h_infer, "use requirements");
+  }
+
   if (observed_output_representation_.IsNone()) {
     new_rep = RepresentationFromUses();
     UpdateRepresentation(new_rep, h_infer, "uses");
@@ -2603,11 +2625,6 @@
     new_rep = RepresentationFromOutput();
     UpdateRepresentation(new_rep, h_infer, "output");
   }
-
-  if (representation().IsSmi() && HasNonSmiUse()) {
-    UpdateRepresentation(
-        Representation::Integer32(), h_infer, "use requirements");
-  }
 }
 
 
@@ -2634,7 +2651,7 @@
   return ((current_rep.IsInteger32() && CheckUsesForFlag(kTruncatingToInt32)) ||
           (current_rep.IsSmi() && CheckUsesForFlag(kTruncatingToSmi))) &&
          // Mul in Integer32 mode would be too precise.
-         !this->IsMul();
+         (!this->IsMul() || HMul::cast(this)->MulMinusOne());
 }
 
 
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 71bdb33..3b6822d 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -1051,6 +1051,47 @@
     return new(zone) I(p1, p2, p3, p4, p5);                                    \
   }
 
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(I)                         \
+  static I* New(Zone* zone, HValue* context) {                                 \
+    return new(zone) I(context);                                               \
+  }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(I, P1)                     \
+  static I* New(Zone* zone, HValue* context, P1 p1) {                          \
+    return new(zone) I(context, p1);                                           \
+  }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(I, P1, P2)                 \
+  static I* New(Zone* zone, HValue* context, P1 p1, P2 p2) {                   \
+    return new(zone) I(context, p1, p2);                                       \
+  }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(I, P1, P2, P3)             \
+  static I* New(Zone* zone, HValue* context, P1 p1, P2 p2, P3 p3) {            \
+    return new(zone) I(context, p1, p2, p3);                                   \
+  }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(I, P1, P2, P3, P4)         \
+  static I* New(Zone* zone,                                                    \
+                HValue* context,                                               \
+                P1 p1,                                                         \
+                P2 p2,                                                         \
+                P3 p3,                                                         \
+                P4 p4) {                                                       \
+    return new(zone) I(context, p1, p2, p3, p4);                               \
+  }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(I, P1, P2, P3, P4, P5)     \
+  static I* New(Zone* zone,                                                    \
+                HValue* context,                                               \
+                P1 p1,                                                         \
+                P2 p2,                                                         \
+                P3 p3,                                                         \
+                P4 p4,                                                         \
+                P5 p5) {                                                       \
+    return new(zone) I(context, p1, p2, p3, p4, p5);                           \
+  }
+
 
 class HInstruction : public HValue {
  public:
@@ -1392,18 +1433,8 @@
 
 class HReturn V8_FINAL : public HTemplateControlInstruction<0, 3> {
  public:
-  static HInstruction* New(Zone* zone,
-                           HValue* context,
-                           HValue* value,
-                           HValue* parameter_count) {
-    return new(zone) HReturn(value, context, parameter_count);
-  }
-
-  static HInstruction* New(Zone* zone,
-                           HValue* context,
-                           HValue* value) {
-    return new(zone) HReturn(value, context, 0);
-  }
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HReturn, HValue*, HValue*);
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HReturn, HValue*);
 
   virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
     return Representation::Tagged();
@@ -1418,7 +1449,7 @@
   DECLARE_CONCRETE_INSTRUCTION(Return)
 
  private:
-  HReturn(HValue* value, HValue* context, HValue* parameter_count) {
+  HReturn(HValue* context, HValue* value, HValue* parameter_count = 0) {
     SetOperandAt(0, value);
     SetOperandAt(1, context);
     SetOperandAt(2, parameter_count);
@@ -1444,11 +1475,7 @@
 
 class HThrow V8_FINAL : public HTemplateInstruction<2> {
  public:
-  static HThrow* New(Zone* zone,
-                     HValue* context,
-                     HValue* value) {
-    return new(zone) HThrow(context, value);
-  }
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HThrow, HValue*);
 
   virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
     return Representation::Tagged();
@@ -1749,7 +1776,7 @@
     kBackwardsBranch
   };
 
-  DECLARE_INSTRUCTION_FACTORY_P2(HStackCheck, HValue*, Type);
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HStackCheck, Type);
 
   HValue* context() { return OperandAt(0); }
 
@@ -1939,22 +1966,9 @@
 
 class HDeclareGlobals V8_FINAL : public HUnaryOperation {
  public:
-  HDeclareGlobals(HValue* context,
-                  Handle<FixedArray> pairs,
-                  int flags)
-      : HUnaryOperation(context),
-        pairs_(pairs),
-        flags_(flags) {
-    set_representation(Representation::Tagged());
-    SetAllSideEffects();
-  }
-
-  static HDeclareGlobals* New(Zone* zone,
-                              HValue* context,
-                              Handle<FixedArray> pairs,
-                              int flags) {
-    return new(zone) HDeclareGlobals(context, pairs, flags);
-  }
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HDeclareGlobals,
+                                              Handle<FixedArray>,
+                                              int);
 
   HValue* context() { return OperandAt(0); }
   Handle<FixedArray> pairs() const { return pairs_; }
@@ -1967,6 +1981,16 @@
   }
 
  private:
+  HDeclareGlobals(HValue* context,
+                  Handle<FixedArray> pairs,
+                  int flags)
+      : HUnaryOperation(context),
+        pairs_(pairs),
+        flags_(flags) {
+    set_representation(Representation::Tagged());
+    SetAllSideEffects();
+  }
+
   Handle<FixedArray> pairs_;
   int flags_;
 };
@@ -2083,16 +2107,7 @@
 
 class HInvokeFunction V8_FINAL : public HBinaryCall {
  public:
-  HInvokeFunction(HValue* context, HValue* function, int argument_count)
-      : HBinaryCall(context, function, argument_count) {
-  }
-
-  static HInvokeFunction* New(Zone* zone,
-                              HValue* context,
-                              HValue* function,
-                              int argument_count) {
-    return new(zone) HInvokeFunction(context, function, argument_count);
-  }
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInvokeFunction, HValue*, int);
 
   HInvokeFunction(HValue* context,
                   HValue* function,
@@ -2121,6 +2136,10 @@
   DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
 
  private:
+  HInvokeFunction(HValue* context, HValue* function, int argument_count)
+      : HBinaryCall(context, function, argument_count) {
+  }
+
   Handle<JSFunction> known_function_;
   int formal_parameter_count_;
 };
@@ -2128,10 +2147,9 @@
 
 class HCallConstantFunction V8_FINAL : public HCall<0> {
  public:
-  HCallConstantFunction(Handle<JSFunction> function, int argument_count)
-      : HCall<0>(argument_count),
-        function_(function),
-        formal_parameter_count_(function->shared()->formal_parameter_count()) {}
+  DECLARE_INSTRUCTION_FACTORY_P2(HCallConstantFunction,
+                                 Handle<JSFunction>,
+                                 int);
 
   Handle<JSFunction> function() const { return function_; }
   int formal_parameter_count() const { return formal_parameter_count_; }
@@ -2150,6 +2168,11 @@
   DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction)
 
  private:
+  HCallConstantFunction(Handle<JSFunction> function, int argument_count)
+      : HCall<0>(argument_count),
+        function_(function),
+        formal_parameter_count_(function->shared()->formal_parameter_count()) {}
+
   Handle<JSFunction> function_;
   int formal_parameter_count_;
 };
@@ -2157,22 +2180,23 @@
 
 class HCallKeyed V8_FINAL : public HBinaryCall {
  public:
-  HCallKeyed(HValue* context, HValue* key, int argument_count)
-      : HBinaryCall(context, key, argument_count) {
-  }
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallKeyed, HValue*, int);
 
   HValue* context() { return first(); }
   HValue* key() { return second(); }
 
   DECLARE_CONCRETE_INSTRUCTION(CallKeyed)
+
+ private:
+  HCallKeyed(HValue* context, HValue* key, int argument_count)
+      : HBinaryCall(context, key, argument_count) {
+  }
 };
 
 
 class HCallNamed V8_FINAL : public HUnaryCall {
  public:
-  HCallNamed(HValue* context, Handle<String> name, int argument_count)
-      : HUnaryCall(context, argument_count), name_(name) {
-  }
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallNamed, Handle<String>, int);
 
   virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
 
@@ -2182,42 +2206,33 @@
   DECLARE_CONCRETE_INSTRUCTION(CallNamed)
 
  private:
+  HCallNamed(HValue* context, Handle<String> name, int argument_count)
+      : HUnaryCall(context, argument_count), name_(name) {
+  }
+
   Handle<String> name_;
 };
 
 
 class HCallFunction V8_FINAL : public HBinaryCall {
  public:
-  HCallFunction(HValue* context, HValue* function, int argument_count)
-      : HBinaryCall(context, function, argument_count) {
-  }
-
-  static HCallFunction* New(Zone* zone,
-                            HValue* context,
-                            HValue* function,
-                            int argument_count) {
-    return new(zone) HCallFunction(context, function, argument_count);
-  }
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallFunction, HValue*, int);
 
   HValue* context() { return first(); }
   HValue* function() { return second(); }
 
   DECLARE_CONCRETE_INSTRUCTION(CallFunction)
+
+ private:
+  HCallFunction(HValue* context, HValue* function, int argument_count)
+      : HBinaryCall(context, function, argument_count) {
+  }
 };
 
 
 class HCallGlobal V8_FINAL : public HUnaryCall {
  public:
-  HCallGlobal(HValue* context, Handle<String> name, int argument_count)
-      : HUnaryCall(context, argument_count), name_(name) {
-  }
-
-  static HCallGlobal* New(Zone* zone,
-                          HValue* context,
-                          Handle<String> name,
-                          int argument_count) {
-    return new(zone) HCallGlobal(context, name, argument_count);
-  }
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallGlobal, Handle<String>, int);
 
   virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
 
@@ -2227,6 +2242,10 @@
   DECLARE_CONCRETE_INSTRUCTION(CallGlobal)
 
  private:
+  HCallGlobal(HValue* context, Handle<String> name, int argument_count)
+      : HUnaryCall(context, argument_count), name_(name) {
+  }
+
   Handle<String> name_;
 };
 
@@ -2257,23 +2276,26 @@
 
 class HCallNew V8_FINAL : public HBinaryCall {
  public:
-  HCallNew(HValue* context, HValue* constructor, int argument_count)
-      : HBinaryCall(context, constructor, argument_count) {}
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallNew, HValue*, int);
 
   HValue* context() { return first(); }
   HValue* constructor() { return second(); }
 
   DECLARE_CONCRETE_INSTRUCTION(CallNew)
+
+ private:
+  HCallNew(HValue* context, HValue* constructor, int argument_count)
+      : HBinaryCall(context, constructor, argument_count) {}
 };
 
 
 class HCallNewArray V8_FINAL : public HBinaryCall {
  public:
-  HCallNewArray(HValue* context, HValue* constructor, int argument_count,
-                Handle<Cell> type_cell, ElementsKind elements_kind)
-      : HBinaryCall(context, constructor, argument_count),
-        elements_kind_(elements_kind),
-        type_cell_(type_cell) {}
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HCallNewArray,
+                                              HValue*,
+                                              int,
+                                              Handle<Cell>,
+                                              ElementsKind);
 
   HValue* context() { return first(); }
   HValue* constructor() { return second(); }
@@ -2289,6 +2311,12 @@
   DECLARE_CONCRETE_INSTRUCTION(CallNewArray)
 
  private:
+  HCallNewArray(HValue* context, HValue* constructor, int argument_count,
+                Handle<Cell> type_cell, ElementsKind elements_kind)
+      : HBinaryCall(context, constructor, argument_count),
+        elements_kind_(elements_kind),
+        type_cell_(type_cell) {}
+
   ElementsKind elements_kind_;
   Handle<Cell> type_cell_;
 };
@@ -2296,13 +2324,10 @@
 
 class HCallRuntime V8_FINAL : public HCall<1> {
  public:
-  static HCallRuntime* New(Zone* zone,
-                           HValue* context,
-                           Handle<String> name,
-                           const Runtime::Function* c_function,
-                           int argument_count) {
-    return new(zone) HCallRuntime(context, name, c_function, argument_count);
-  }
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCallRuntime,
+                                              Handle<String>,
+                                              const Runtime::Function*,
+                                              int);
 
   virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
 
@@ -3877,12 +3902,9 @@
 
 class HMathFloorOfDiv V8_FINAL : public HBinaryOperation {
  public:
-  static HMathFloorOfDiv* New(Zone* zone,
-                              HValue* context,
-                              HValue* left,
-                              HValue* right) {
-    return new(zone) HMathFloorOfDiv(context, left, right);
-  }
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HMathFloorOfDiv,
+                                              HValue*,
+                                              HValue*);
 
   virtual HValue* EnsureAndPropagateNotMinusZero(
       BitVector* visited) V8_OVERRIDE;
@@ -3928,7 +3950,6 @@
   }
 
   DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation)
-
  private:
   virtual bool IsDeletable() const V8_OVERRIDE { return true; }
 };
@@ -4159,18 +4180,10 @@
 
 class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> {
  public:
-  HStringCompareAndBranch(HValue* context,
-                           HValue* left,
-                           HValue* right,
-                           Token::Value token)
-      : token_(token) {
-    ASSERT(Token::IsCompareOp(token));
-    SetOperandAt(0, context);
-    SetOperandAt(1, left);
-    SetOperandAt(2, right);
-    set_representation(Representation::Tagged());
-    SetGVNFlag(kChangesNewSpacePromotion);
-  }
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HStringCompareAndBranch,
+                                              HValue*,
+                                              HValue*,
+                                              Token::Value);
 
   HValue* context() { return OperandAt(0); }
   HValue* left() { return OperandAt(1); }
@@ -4190,6 +4203,19 @@
   DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch)
 
  private:
+  HStringCompareAndBranch(HValue* context,
+                          HValue* left,
+                          HValue* right,
+                          Token::Value token)
+      : token_(token) {
+    ASSERT(Token::IsCompareOp(token));
+    SetOperandAt(0, context);
+    SetOperandAt(1, left);
+    SetOperandAt(2, right);
+    set_representation(Representation::Tagged());
+    SetGVNFlag(kChangesNewSpacePromotion);
+  }
+
   Token::Value token_;
 };
 
@@ -4325,15 +4351,9 @@
 
 class HInstanceOfKnownGlobal V8_FINAL : public HTemplateInstruction<2> {
  public:
-  HInstanceOfKnownGlobal(HValue* context,
-                         HValue* left,
-                         Handle<JSFunction> right)
-      : HTemplateInstruction<2>(HType::Boolean()), function_(right) {
-    SetOperandAt(0, context);
-    SetOperandAt(1, left);
-    set_representation(Representation::Tagged());
-    SetAllSideEffects();
-  }
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInstanceOfKnownGlobal,
+                                              HValue*,
+                                              Handle<JSFunction>);
 
   HValue* context() { return OperandAt(0); }
   HValue* left() { return OperandAt(1); }
@@ -4346,6 +4366,16 @@
   DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal)
 
  private:
+  HInstanceOfKnownGlobal(HValue* context,
+                         HValue* left,
+                         Handle<JSFunction> right)
+      : HTemplateInstruction<2>(HType::Boolean()), function_(right) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, left);
+    set_representation(Representation::Tagged());
+    SetAllSideEffects();
+  }
+
   Handle<JSFunction> function_;
 };
 
@@ -4551,6 +4581,8 @@
     HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
   }
 
+  bool MulMinusOne();
+
   DECLARE_CONCRETE_INSTRUCTION(Mul)
 
  protected:
@@ -4982,12 +5014,7 @@
 
 class HCallStub V8_FINAL : public HUnaryCall {
  public:
-  HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
-      : HUnaryCall(context, argument_count),
-        major_key_(major_key),
-        transcendental_type_(TranscendentalCache::kNumberOfCaches) {
-  }
-
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallStub, CodeStub::Major, int);
   CodeStub::Major major_key() { return major_key_; }
 
   HValue* context() { return value(); }
@@ -5004,6 +5031,12 @@
   DECLARE_CONCRETE_INSTRUCTION(CallStub)
 
  private:
+  HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
+      : HUnaryCall(context, argument_count),
+        major_key_(major_key),
+        transcendental_type_(TranscendentalCache::kNumberOfCaches) {
+  }
+
   CodeStub::Major major_key_;
   TranscendentalCache::Type transcendental_type_;
 };
@@ -5626,13 +5659,6 @@
                 ? Representation::Smi() : Representation::Tagged());
   }
 
-  static HObjectAccess ForTypedArrayLength() {
-    return HObjectAccess(
-        kInobject,
-        JSTypedArray::kLengthOffset,
-        Representation::Tagged());
-  }
-
   static HObjectAccess ForAllocationSiteTransitionInfo() {
     return HObjectAccess(kInobject, AllocationSite::kTransitionInfoOffset);
   }
@@ -6545,12 +6571,9 @@
 
 class HStringCharCodeAt V8_FINAL : public HTemplateInstruction<3> {
  public:
-  static HStringCharCodeAt* New(Zone* zone,
-                                HValue* context,
-                                HValue* string,
-                                HValue* index) {
-    return new(zone) HStringCharCodeAt(context, string, index);
-  }
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HStringCharCodeAt,
+                                              HValue*,
+                                              HValue*);
 
   virtual Representation RequiredInputRepresentation(int index) {
     // The index is supposed to be Integer32.
@@ -6654,6 +6677,24 @@
 
 class HRegExpLiteral V8_FINAL : public HMaterializedLiteral<1> {
  public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HRegExpLiteral,
+                                              Handle<FixedArray>,
+                                              Handle<String>,
+                                              Handle<String>,
+                                              int);
+
+  HValue* context() { return OperandAt(0); }
+  Handle<FixedArray> literals() { return literals_; }
+  Handle<String> pattern() { return pattern_; }
+  Handle<String> flags() { return flags_; }
+
+  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
+
+ private:
   HRegExpLiteral(HValue* context,
                  Handle<FixedArray> literals,
                  Handle<String> pattern,
@@ -6668,18 +6709,6 @@
     set_type(HType::JSObject());
   }
 
-  HValue* context() { return OperandAt(0); }
-  Handle<FixedArray> literals() { return literals_; }
-  Handle<String> pattern() { return pattern_; }
-  Handle<String> flags() { return flags_; }
-
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
-    return Representation::Tagged();
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
-
- private:
   Handle<FixedArray> literals_;
   Handle<String> pattern_;
   Handle<String> flags_;
@@ -6688,20 +6717,9 @@
 
 class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
  public:
-  HFunctionLiteral(HValue* context,
-                   Handle<SharedFunctionInfo> shared,
-                   bool pretenure)
-      : HTemplateInstruction<1>(HType::JSObject()),
-        shared_info_(shared),
-        pretenure_(pretenure),
-        has_no_literals_(shared->num_literals() == 0),
-        is_generator_(shared->is_generator()),
-        language_mode_(shared->language_mode()) {
-    SetOperandAt(0, context);
-    set_representation(Representation::Tagged());
-    SetGVNFlag(kChangesNewSpacePromotion);
-  }
-
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HFunctionLiteral,
+                                              Handle<SharedFunctionInfo>,
+                                              bool);
   HValue* context() { return OperandAt(0); }
 
   virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
@@ -6717,6 +6735,20 @@
   LanguageMode language_mode() const { return language_mode_; }
 
  private:
+  HFunctionLiteral(HValue* context,
+                   Handle<SharedFunctionInfo> shared,
+                   bool pretenure)
+      : HTemplateInstruction<1>(HType::JSObject()),
+        shared_info_(shared),
+        pretenure_(pretenure),
+        has_no_literals_(shared->num_literals() == 0),
+        is_generator_(shared->is_generator()),
+        language_mode_(shared->language_mode()) {
+    SetOperandAt(0, context);
+    set_representation(Representation::Tagged());
+    SetGVNFlag(kChangesNewSpacePromotion);
+  }
+
   virtual bool IsDeletable() const V8_OVERRIDE { return true; }
 
   Handle<SharedFunctionInfo> shared_info_;
@@ -6729,11 +6761,7 @@
 
 class HTypeof V8_FINAL : public HTemplateInstruction<2> {
  public:
-  explicit HTypeof(HValue* context, HValue* value) {
-    SetOperandAt(0, context);
-    SetOperandAt(1, value);
-    set_representation(Representation::Tagged());
-  }
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HTypeof, HValue*);
 
   HValue* context() { return OperandAt(0); }
   HValue* value() { return OperandAt(1); }
@@ -6747,6 +6775,12 @@
   DECLARE_CONCRETE_INSTRUCTION(Typeof)
 
  private:
+  explicit HTypeof(HValue* context, HValue* value) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, value);
+    set_representation(Representation::Tagged());
+  }
+
   virtual bool IsDeletable() const V8_OVERRIDE { return true; }
 };
 
@@ -6904,11 +6938,7 @@
 
 class HForInPrepareMap V8_FINAL : public HTemplateInstruction<2> {
  public:
-  static HForInPrepareMap* New(Zone* zone,
-                               HValue* context,
-                               HValue* object) {
-    return new(zone) HForInPrepareMap(context, object);
-  }
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HForInPrepareMap, HValue*);
 
   virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
     return Representation::Tagged();
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 0a7e9f0..fe59b54 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -3061,8 +3061,7 @@
   VisitDeclarations(scope->declarations());
   Add<HSimulate>(BailoutId::Declarations());
 
-  HValue* context = environment()->context();
-  Add<HStackCheck>(context, HStackCheck::kFunctionEntry);
+  Add<HStackCheck>(HStackCheck::kFunctionEntry);
 
   VisitStatements(current_info()->function()->body());
   if (HasStackOverflow()) return false;
@@ -3518,8 +3517,6 @@
     return Bailout(kSwitchStatementMixedOrNonLiteralSwitchLabels);
   }
 
-  HValue* context = environment()->context();
-
   CHECK_ALIVE(VisitForValue(stmt->tag()));
   Add<HSimulate>(stmt->EntryId());
   HValue* tag_value = Pop();
@@ -3570,9 +3567,9 @@
           Representation::Smi(), Representation::Smi());
       compare = compare_;
     } else {
-      compare = new(zone()) HStringCompareAndBranch(context, tag_value,
-                                                    label_value,
-                                                    Token::EQ_STRICT);
+      compare = New<HStringCompareAndBranch>(tag_value,
+                                             label_value,
+                                             Token::EQ_STRICT);
     }
 
     compare->SetSuccessorAt(0, body_block);
@@ -3664,9 +3661,8 @@
                                            BreakAndContinueInfo* break_info) {
   BreakAndContinueScope push(break_info, this);
   Add<HSimulate>(stmt->StackCheckId());
-  HValue* context = environment()->context();
-  HStackCheck* stack_check = HStackCheck::cast(Add<HStackCheck>(
-      context, HStackCheck::kBackwardsBranch));
+  HStackCheck* stack_check =
+      HStackCheck::cast(Add<HStackCheck>(HStackCheck::kBackwardsBranch));
   ASSERT(loop_entry->IsLoopHeader());
   loop_entry->loop_information()->set_stack_check(stack_check);
   CHECK_BAILOUT(Visit(stmt->body()));
@@ -3967,9 +3963,8 @@
   }
   // We also have a stack overflow if the recursive compilation did.
   if (HasStackOverflow()) return;
-  HValue* context = environment()->context();
   HFunctionLiteral* instr =
-      new(zone()) HFunctionLiteral(context, shared_info, expr->pretenure());
+      New<HFunctionLiteral>(shared_info, expr->pretenure());
   return ast_context()->ReturnInstruction(instr, expr->id());
 }
 
@@ -4148,13 +4143,10 @@
   ASSERT(current_block()->HasPredecessor());
   Handle<JSFunction> closure = function_state()->compilation_info()->closure();
   Handle<FixedArray> literals(closure->literals());
-  HValue* context = environment()->context();
-
-  HRegExpLiteral* instr = new(zone()) HRegExpLiteral(context,
-                                                     literals,
-                                                     expr->pattern(),
-                                                     expr->flags(),
-                                                     expr->literal_index());
+  HRegExpLiteral* instr = New<HRegExpLiteral>(literals,
+                                              expr->pattern(),
+                                              expr->flags(),
+                                              expr->literal_index());
   return ast_context()->ReturnInstruction(instr, expr->id());
 }
 
@@ -4766,7 +4758,7 @@
 
 bool HOptimizedGraphBuilder::PropertyAccessInfo::CanLoadMonomorphic() {
   if (!CanInlinePropertyAccess(*map_)) return IsStringLength();
-  if (IsArrayLength()) return true;
+  if (IsJSObjectFieldAccessor()) return true;
   if (!LookupDescriptor()) return false;
   if (lookup_.IsFound()) return true;
   return LookupInPrototypes();
@@ -4798,9 +4790,10 @@
     return true;
   }
 
-  if (IsTypedArrayLength()) {
+  if (IsJSObjectFieldAccessor()) {
+    InstanceType instance_type = map_->instance_type();
     for (int i = 1; i < types->length(); ++i) {
-      if (types->at(i)->instance_type() != JS_TYPED_ARRAY_TYPE) return false;
+      if (types->at(i)->instance_type() != instance_type) return false;
     }
     return true;
   }
@@ -4821,20 +4814,10 @@
     BailoutId ast_id,
     BailoutId return_id,
     bool can_inline_accessor) {
-  if (info->IsStringLength()) {
-    return New<HLoadNamedField>(
-        checked_object, HObjectAccess::ForStringLength());
-  }
 
-  if (info->IsArrayLength()) {
-    return New<HLoadNamedField>(
-        checked_object, HObjectAccess::ForArrayLength(
-            info->map()->elements_kind()));
-  }
-
-  if (info->IsTypedArrayLength()) {
-    return New<HLoadNamedField>(
-        checked_object, HObjectAccess::ForTypedArrayLength());
+  HObjectAccess access = HObjectAccess::ForMap();  // bogus default
+  if (info->GetJSObjectFieldAccess(&access)) {
+    return New<HLoadNamedField>(checked_object, access);
   }
 
   HValue* checked_holder = checked_object;
@@ -4857,7 +4840,7 @@
       return NULL;
     }
     Add<HPushArgument>(Pop());
-    return new(zone()) HCallConstantFunction(info->accessor(), 1);
+    return New<HCallConstantFunction>(info->accessor(), 1);
   }
 
   ASSERT(info->lookup()->IsConstant());
@@ -5153,7 +5136,7 @@
       Drop(2);
       Add<HPushArgument>(object);
       Add<HPushArgument>(value);
-      instr = new(zone()) HCallConstantFunction(setter, 2);
+      instr = New<HCallConstantFunction>(setter, 2);
     } else {
       Drop(2);
       CHECK_ALIVE(instr = BuildStoreNamedMonomorphic(object,
@@ -5537,16 +5520,6 @@
 }
 
 
-HInstruction* HOptimizedGraphBuilder::BuildCallGetter(
-    HValue* object,
-    Handle<Map> map,
-    Handle<JSFunction> getter,
-    Handle<JSObject> holder) {
-  AddCheckConstantFunction(holder, object, map);
-  Add<HPushArgument>(object);
-  return new(zone()) HCallConstantFunction(getter, 1);
-}
-
 
 HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
                                                             HValue* key) {
@@ -6153,7 +6126,7 @@
   if (!TryInlineCall(expr)) {
     int argument_count = expr->arguments()->length() + 1;  // Includes receiver.
     HCallConstantFunction* call =
-        new(zone()) HCallConstantFunction(expr->target(), argument_count);
+      New<HCallConstantFunction>(expr->target(), argument_count);
     call->set_position(expr->position());
     PreProcessCall(call);
     AddInstruction(call);
@@ -6270,7 +6243,7 @@
       if (HasStackOverflow()) return;
     } else {
       HCallConstantFunction* call =
-          new(zone()) HCallConstantFunction(expr->target(), argument_count);
+          New<HCallConstantFunction>(expr->target(), argument_count);
       call->set_position(expr->position());
       PreProcessCall(call);
       AddInstruction(call);
@@ -6292,8 +6265,7 @@
     if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
     FinishExitWithHardDeoptimization("Unknown map in polymorphic call", join);
   } else {
-    HValue* context = environment()->context();
-    HCallNamed* call = new(zone()) HCallNamed(context, name, argument_count);
+    HCallNamed* call = New<HCallNamed>(name, argument_count);
     call->set_position(expr->position());
     PreProcessCall(call);
 
@@ -7052,8 +7024,7 @@
 
       CHECK_ALIVE(VisitArgumentList(expr->arguments()));
 
-      HValue* context = environment()->context();
-      call = new(zone()) HCallKeyed(context, key, argument_count);
+      call = New<HCallKeyed>(key, argument_count);
       call->set_position(expr->position());
       Drop(argument_count + 1);  // 1 is the key.
       return ast_context()->ReturnInstruction(call, expr->id());
@@ -7092,16 +7063,13 @@
         // When the target has a custom call IC generator, use the IC,
         // because it is likely to generate better code.  Also use the IC
         // when a primitive receiver check is required.
-        HValue* context = environment()->context();
-        call = PreProcessCall(
-            new(zone()) HCallNamed(context, name, argument_count));
+        call = PreProcessCall(New<HCallNamed>(name, argument_count));
       } else {
         AddCheckConstantFunction(expr->holder(), receiver, map);
 
         if (TryInlineCall(expr)) return;
         call = PreProcessCall(
-            new(zone()) HCallConstantFunction(expr->target(),
-                                              argument_count));
+            New<HCallConstantFunction>(expr->target(), argument_count));
       }
     } else if (types != NULL && types->length() > 1) {
       ASSERT(expr->check_type() == RECEIVER_MAP_CHECK);
@@ -7109,9 +7077,7 @@
       return;
 
     } else {
-      HValue* context = environment()->context();
-      call = PreProcessCall(
-          new(zone()) HCallNamed(context, name, argument_count));
+      call = PreProcessCall(New<HCallNamed>(name, argument_count));
     }
 
   } else {
@@ -7171,9 +7137,7 @@
         if (CallStubCompiler::HasCustomCallGenerator(expr->target())) {
           // When the target has a custom call IC generator, use the IC,
           // because it is likely to generate better code.
-          HValue* context = environment()->context();
-          call = PreProcessCall(
-              new(zone()) HCallNamed(context, var->name(), argument_count));
+          call = PreProcessCall(New<HCallNamed>(var->name(), argument_count));
         } else {
           call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
                                                            argument_count));
@@ -7247,7 +7211,6 @@
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
   int argument_count = expr->arguments()->length() + 1;  // Plus constructor.
-  HValue* context = environment()->context();
   Factory* factory = isolate()->factory();
 
   if (FLAG_inline_construct &&
@@ -7336,8 +7299,8 @@
     receiver->DeleteAndReplaceWith(NULL);
     check->DeleteAndReplaceWith(NULL);
     environment()->SetExpressionStackAt(receiver_index, function);
-    HInstruction* call = PreProcessCall(
-        new(zone()) HCallNew(context, function, argument_count));
+    HInstruction* call =
+      PreProcessCall(New<HCallNew>(function, argument_count));
     call->set_position(expr->position());
     return ast_context()->ReturnInstruction(call, expr->id());
   } else {
@@ -7352,10 +7315,10 @@
     if (expr->target().is_identical_to(array_function)) {
       Handle<Cell> cell = expr->allocation_info_cell();
       Add<HCheckValue>(constructor, array_function);
-      call = new(zone()) HCallNewArray(context, constructor, argument_count,
-                                       cell, expr->elements_kind());
+      call = New<HCallNewArray>(constructor, argument_count,
+                                cell, expr->elements_kind());
     } else {
-      call = new(zone()) HCallNew(context, constructor, argument_count);
+      call = New<HCallNew>(constructor, argument_count);
     }
     Drop(argument_count);
     call->set_position(expr->position());
@@ -7480,8 +7443,7 @@
 void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForTypeOf(expr->expression()));
   HValue* value = Pop();
-  HValue* context = environment()->context();
-  HInstruction* instr = new(zone()) HTypeof(context, value);
+  HInstruction* instr = New<HTypeof>(value);
   return ast_context()->ReturnInstruction(instr, expr->id());
 }
 
@@ -8270,7 +8232,7 @@
     } else {
       Add<HCheckValue>(right, target);
       HInstanceOfKnownGlobal* result =
-          new(zone()) HInstanceOfKnownGlobal(context, left, target);
+        New<HInstanceOfKnownGlobal>(left, target);
       result->set_position(expr->position());
       return ast_context()->ReturnInstruction(result, expr->id());
     }
@@ -8283,7 +8245,7 @@
     Add<HPushArgument>(right);
     // TODO(olivf) InvokeFunction produces a check for the parameter count,
     // even though we are certain to pass the correct number of arguments here.
-    HInstruction* result = new(zone()) HInvokeFunction(context, function, 2);
+    HInstruction* result = New<HInvokeFunction>(function, 2);
     result->set_position(expr->position());
     return ast_context()->ReturnInstruction(result, expr->id());
   }
@@ -9082,9 +9044,7 @@
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* right = Pop();
   HValue* left = Pop();
-  HValue* context = environment()->context();
-  HInstruction* result = HStringAdd::New(
-      zone(), context, left, right, STRING_ADD_CHECK_BOTH);
+  HInstruction* result = New<HStringAdd>(left, right, STRING_ADD_CHECK_BOTH);
   return ast_context()->ReturnInstruction(result, call->id());
 }
 
@@ -9093,8 +9053,7 @@
 void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
   ASSERT_EQ(3, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
-  HValue* context = environment()->context();
-  HCallStub* result = new(zone()) HCallStub(context, CodeStub::SubString, 3);
+  HCallStub* result = New<HCallStub>(CodeStub::SubString, 3);
   Drop(3);
   return ast_context()->ReturnInstruction(result, call->id());
 }
@@ -9104,9 +9063,7 @@
 void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
-  HValue* context = environment()->context();
-  HCallStub* result =
-      new(zone()) HCallStub(context, CodeStub::StringCompare, 2);
+  HCallStub* result = New<HCallStub>(CodeStub::StringCompare, 2);
   Drop(2);
   return ast_context()->ReturnInstruction(result, call->id());
 }
@@ -9116,8 +9073,7 @@
 void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
   ASSERT_EQ(4, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
-  HValue* context = environment()->context();
-  HCallStub* result = new(zone()) HCallStub(context, CodeStub::RegExpExec, 4);
+  HCallStub* result = New<HCallStub>(CodeStub::RegExpExec, 4);
   Drop(4);
   return ast_context()->ReturnInstruction(result, call->id());
 }
@@ -9127,9 +9083,7 @@
 void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
   ASSERT_EQ(3, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
-  HValue* context = environment()->context();
-  HCallStub* result =
-      new(zone()) HCallStub(context, CodeStub::RegExpConstructResult, 3);
+  HCallStub* result = New<HCallStub>(CodeStub::RegExpConstructResult, 3);
   Drop(3);
   return ast_context()->ReturnInstruction(result, call->id());
 }
@@ -9207,9 +9161,7 @@
 void HOptimizedGraphBuilder::GenerateMathSin(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
-  HValue* context = environment()->context();
-  HCallStub* result =
-      new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
+  HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
   result->set_transcendental_type(TranscendentalCache::SIN);
   Drop(1);
   return ast_context()->ReturnInstruction(result, call->id());
@@ -9219,9 +9171,7 @@
 void HOptimizedGraphBuilder::GenerateMathCos(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
-  HValue* context = environment()->context();
-  HCallStub* result =
-      new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
+  HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
   result->set_transcendental_type(TranscendentalCache::COS);
   Drop(1);
   return ast_context()->ReturnInstruction(result, call->id());
@@ -9231,9 +9181,7 @@
 void HOptimizedGraphBuilder::GenerateMathTan(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
-  HValue* context = environment()->context();
-  HCallStub* result =
-      new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
+  HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
   result->set_transcendental_type(TranscendentalCache::TAN);
   Drop(1);
   return ast_context()->ReturnInstruction(result, call->id());
@@ -9243,9 +9191,7 @@
 void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
-  HValue* context = environment()->context();
-  HCallStub* result =
-      new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
+  HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
   result->set_transcendental_type(TranscendentalCache::LOG);
   Drop(1);
   return ast_context()->ReturnInstruction(result, call->id());
@@ -9256,9 +9202,7 @@
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
-  HValue* context = environment()->context();
-  HInstruction* result =
-      HUnaryMathOperation::New(zone(), context, value, kMathSqrt);
+  HInstruction* result = New<HUnaryMathOperation>(value, kMathSqrt);
   return ast_context()->ReturnInstruction(result, call->id());
 }
 
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 6aa8217..be23fa8 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -30,6 +30,7 @@
 
 #include "v8.h"
 
+#include "accessors.h"
 #include "allocation.h"
 #include "ast.h"
 #include "compiler.h"
@@ -2046,19 +2047,26 @@
     // PropertyAccessInfo is built for types->first().
     bool CanLoadAsMonomorphic(SmallMapList* types);
 
-    bool IsStringLength() {
-      return map_->instance_type() < FIRST_NONSTRING_TYPE &&
-          name_->Equals(isolate()->heap()->length_string());
+    bool IsJSObjectFieldAccessor() {
+      int offset;  // unused
+      return Accessors::IsJSObjectFieldAccessor(map_, name_, &offset);
     }
 
-    bool IsArrayLength() {
-      return map_->instance_type() == JS_ARRAY_TYPE &&
-          name_->Equals(isolate()->heap()->length_string());
-    }
-
-    bool IsTypedArrayLength() {
-      return map_->instance_type() == JS_TYPED_ARRAY_TYPE &&
-          name_->Equals(isolate()->heap()->length_string());
+    bool GetJSObjectFieldAccess(HObjectAccess* access) {
+      if (IsStringLength()) {
+        *access = HObjectAccess::ForStringLength();
+        return true;
+      } else if (IsArrayLength()) {
+        *access = HObjectAccess::ForArrayLength(map_->elements_kind());
+        return true;
+      } else {
+        int offset;
+        if (Accessors::IsJSObjectFieldAccessor(map_, name_, &offset)) {
+          *access = HObjectAccess::ForJSObjectOffset(offset);
+          return true;
+        }
+        return false;
+      }
     }
 
     bool has_holder() { return !holder_.is_null(); }
@@ -2073,6 +2081,16 @@
    private:
     Isolate* isolate() { return lookup_.isolate(); }
 
+    bool IsStringLength() {
+      return map_->instance_type() < FIRST_NONSTRING_TYPE &&
+          name_->Equals(isolate()->heap()->length_string());
+    }
+
+    bool IsArrayLength() {
+      return map_->instance_type() == JS_ARRAY_TYPE &&
+          name_->Equals(isolate()->heap()->length_string());
+    }
+
     bool LoadResult(Handle<Map> map);
     bool LookupDescriptor();
     bool LookupInPrototypes();
@@ -2173,10 +2191,6 @@
   HInstruction* BuildLoadNamedGeneric(HValue* object,
                                       Handle<String> name,
                                       Property* expr);
-  HInstruction* BuildCallGetter(HValue* object,
-                                Handle<Map> map,
-                                Handle<JSFunction> getter,
-                                Handle<JSObject> holder);
 
   HCheckMaps* AddCheckMap(HValue* object, Handle<Map> map);
 
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 91ff72c..24a2e6e 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -1307,6 +1307,24 @@
 }
 
 
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+  // We check the stack limit as indicator that recompilation might be done.
+  Label ok;
+  ExternalReference stack_limit =
+      ExternalReference::address_of_stack_limit(masm->isolate());
+  __ cmp(esp, Operand::StaticVariable(stack_limit));
+  __ j(above_equal, &ok, Label::kNear);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kStackGuard, 0);
+  }
+  __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
+         RelocInfo::CODE_TARGET);
+
+  __ bind(&ok);
+  __ ret(0);
+}
+
 #undef __
 }
 }  // namespace v8::internal
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 5250f4b..d284f53 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -4898,79 +4898,70 @@
 static const byte kNopByteOne = 0x66;
 static const byte kNopByteTwo = 0x90;
 
-// The back edge bookkeeping code matches the pattern:
-//
-//     sub <profiling_counter>, <delta>
-//     jns ok
-//     call <interrupt stub>
-//   ok:
-//
-// The patched back edge looks like this:
-//
-//     sub <profiling_counter>, <delta>  ;; Not changed
-//     nop
-//     nop
-//     call <on-stack replacment>
-//   ok:
 
 void BackEdgeTable::PatchAt(Code* unoptimized_code,
                             Address pc,
+                            BackEdgeState target_state,
                             Code* replacement_code) {
-  // Turn the jump into nops.
   Address call_target_address = pc - kIntSize;
-  *(call_target_address - 3) = kNopByteOne;
-  *(call_target_address - 2) = kNopByteTwo;
-  // Replace the call address.
+  Address jns_instr_address = call_target_address - 3;
+  Address jns_offset_address = call_target_address - 2;
+
+  switch (target_state) {
+    case INTERRUPT:
+      //     sub <profiling_counter>, <delta>  ;; Not changed
+      //     jns ok
+      //     call <interrupt stub>
+      //   ok:
+      *jns_instr_address = kJnsInstruction;
+      *jns_offset_address = kJnsOffset;
+      break;
+    case ON_STACK_REPLACEMENT:
+    case OSR_AFTER_STACK_CHECK:
+      //     sub <profiling_counter>, <delta>  ;; Not changed
+      //     nop
+      //     nop
+      //     call <on-stack replacment>
+      //   ok:
+      *jns_instr_address = kNopByteOne;
+      *jns_offset_address = kNopByteTwo;
+      break;
+  }
+
   Assembler::set_target_address_at(call_target_address,
                                    replacement_code->entry());
-
   unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
       unoptimized_code, call_target_address, replacement_code);
 }
 
 
-void BackEdgeTable::RevertAt(Code* unoptimized_code,
-                             Address pc,
-                             Code* interrupt_code) {
-  // Restore the original jump.
-  Address call_target_address = pc - kIntSize;
-  *(call_target_address - 3) = kJnsInstruction;
-  *(call_target_address - 2) = kJnsOffset;
-  // Restore the original call address.
-  Assembler::set_target_address_at(call_target_address,
-                                   interrupt_code->entry());
-
-  interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
-      unoptimized_code, call_target_address, interrupt_code);
-}
-
-
-#ifdef DEBUG
 BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
     Isolate* isolate,
     Code* unoptimized_code,
     Address pc) {
   Address call_target_address = pc - kIntSize;
+  Address jns_instr_address = call_target_address - 3;
   ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
-  if (*(call_target_address - 3) == kNopByteOne) {
-    ASSERT_EQ(kNopByteTwo,      *(call_target_address - 2));
-    Code* osr_builtin =
-        isolate->builtins()->builtin(Builtins::kOnStackReplacement);
-    ASSERT_EQ(osr_builtin->entry(),
+
+  if (*jns_instr_address == kJnsInstruction) {
+    ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+    ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
               Assembler::target_address_at(call_target_address));
-    return ON_STACK_REPLACEMENT;
-  } else {
-    // Get the interrupt stub code object to match against from cache.
-    Code* interrupt_builtin =
-        isolate->builtins()->builtin(Builtins::kInterruptCheck);
-    ASSERT_EQ(interrupt_builtin->entry(),
-              Assembler::target_address_at(call_target_address));
-    ASSERT_EQ(kJnsInstruction,  *(call_target_address - 3));
-    ASSERT_EQ(kJnsOffset,       *(call_target_address - 2));
     return INTERRUPT;
   }
+
+  ASSERT_EQ(kNopByteOne, *jns_instr_address);
+  ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+
+  if (Assembler::target_address_at(call_target_address) ==
+      isolate->builtins()->OnStackReplacement()->entry()) {
+    return ON_STACK_REPLACEMENT;
+  }
+
+  ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
+            Assembler::target_address_at(call_target_address));
+  return OSR_AFTER_STACK_CHECK;
 }
-#endif  // DEBUG
 
 
 } }  // namespace v8::internal
diff --git a/src/ic.cc b/src/ic.cc
index 84e65ac..43d162c 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -403,7 +403,7 @@
 
 
 void CallICBase::Clear(Address address, Code* target) {
-  if (target->ic_state() == UNINITIALIZED) return;
+  if (IsCleared(target)) return;
   bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
   Code* code =
       target->GetIsolate()->stub_cache()->FindCallInitialize(
@@ -415,35 +415,35 @@
 
 
 void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target) {
-  if (target->ic_state() == UNINITIALIZED) return;
+  if (IsCleared(target)) return;
   // Make sure to also clear the map used in inline fast cases.  If we
   // do not clear these maps, cached code can keep objects alive
   // through the embedded maps.
-  SetTargetAtAddress(address, *initialize_stub(isolate));
+  SetTargetAtAddress(address, *pre_monomorphic_stub(isolate));
 }
 
 
 void LoadIC::Clear(Isolate* isolate, Address address, Code* target) {
-  if (target->ic_state() == UNINITIALIZED) return;
-  SetTargetAtAddress(address, *initialize_stub(isolate));
+  if (IsCleared(target)) return;
+  SetTargetAtAddress(address, *pre_monomorphic_stub(isolate));
 }
 
 
 void StoreIC::Clear(Isolate* isolate, Address address, Code* target) {
-  if (target->ic_state() == UNINITIALIZED) return;
+  if (IsCleared(target)) return;
   SetTargetAtAddress(address,
       (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
-        ? *initialize_stub_strict(isolate)
-        : *initialize_stub(isolate));
+        ? *pre_monomorphic_stub_strict(isolate)
+        : *pre_monomorphic_stub(isolate));
 }
 
 
 void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target) {
-  if (target->ic_state() == UNINITIALIZED) return;
+  if (IsCleared(target)) return;
   SetTargetAtAddress(address,
       (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
-        ? *initialize_stub_strict(isolate)
-        : *initialize_stub(isolate));
+        ? *pre_monomorphic_stub_strict(isolate)
+        : *pre_monomorphic_stub(isolate));
 }
 
 
@@ -1348,20 +1348,19 @@
       if (!holder.is_identical_to(receiver)) break;
       return isolate()->stub_cache()->ComputeLoadNormal(name, receiver);
     case CALLBACKS: {
-      Handle<Object> callback(lookup->GetCallbackObject(), isolate());
-      if (name->Equals(isolate()->heap()->length_string())) {
-        if (receiver->IsJSArray()) {
-          PropertyIndex lengthIndex = PropertyIndex::NewHeaderIndex(
-              JSArray::kLengthOffset / kPointerSize);
+      {
+        // Use simple field loads for some well-known callback properties.
+        int object_offset;
+        Handle<Map> map(receiver->map());
+        if (Accessors::IsJSObjectFieldAccessor(map, name, &object_offset)) {
+          PropertyIndex index =
+              PropertyIndex::NewHeaderIndex(object_offset / kPointerSize);
           return isolate()->stub_cache()->ComputeLoadField(
-              name, receiver, receiver, lengthIndex, Representation::Tagged());
-        } else if (receiver->IsJSTypedArray()) {
-          PropertyIndex lengthIndex = PropertyIndex::NewHeaderIndex(
-              JSTypedArray::kLengthOffset / kPointerSize);
-          return isolate()->stub_cache()->ComputeLoadField(
-              name, receiver, receiver, lengthIndex, Representation::Tagged());
+              name, receiver, receiver, index, Representation::Tagged());
         }
       }
+
+      Handle<Object> callback(lookup->GetCallbackObject(), isolate());
       if (callback->IsExecutableAccessorInfo()) {
         Handle<ExecutableAccessorInfo> info =
             Handle<ExecutableAccessorInfo>::cast(callback);
diff --git a/src/ic.h b/src/ic.h
index 8f09e1d..60c9079 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -134,6 +134,11 @@
                                              Object* object,
                                              InlineCacheHolderFlag holder);
 
+  static bool IsCleared(Code* code) {
+    InlineCacheState state = code->ic_state();
+    return state == UNINITIALIZED || state == PREMONOMORPHIC;
+  }
+
  protected:
   Address fp() const { return fp_; }
   Address pc() const { return *pc_address_; }
@@ -423,8 +428,11 @@
   static Handle<Code> initialize_stub(Isolate* isolate) {
     return isolate->builtins()->LoadIC_Initialize();
   }
+  static Handle<Code> pre_monomorphic_stub(Isolate* isolate) {
+    return isolate->builtins()->LoadIC_PreMonomorphic();
+  }
   virtual Handle<Code> pre_monomorphic_stub() {
-    return isolate()->builtins()->LoadIC_PreMonomorphic();
+    return pre_monomorphic_stub(isolate());
   }
 
   static void Clear(Isolate* isolate, Address address, Code* target);
@@ -502,8 +510,11 @@
   static Handle<Code> initialize_stub(Isolate* isolate) {
     return isolate->builtins()->KeyedLoadIC_Initialize();
   }
+  static Handle<Code> pre_monomorphic_stub(Isolate* isolate) {
+    return isolate->builtins()->KeyedLoadIC_PreMonomorphic();
+  }
   virtual Handle<Code> pre_monomorphic_stub() {
-    return isolate()->builtins()->KeyedLoadIC_PreMonomorphic();
+    return pre_monomorphic_stub(isolate());
   }
   Handle<Code> indexed_interceptor_stub() {
     return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor();
@@ -564,11 +575,17 @@
   virtual Handle<Code> generic_stub_strict() const {
     return isolate()->builtins()->StoreIC_Generic_Strict();
   }
-  virtual Handle<Code> pre_monomorphic_stub() const {
-    return isolate()->builtins()->StoreIC_PreMonomorphic();
+  virtual Handle<Code> pre_monomorphic_stub() {
+    return pre_monomorphic_stub(isolate());
   }
-  virtual Handle<Code> pre_monomorphic_stub_strict() const {
-    return isolate()->builtins()->StoreIC_PreMonomorphic_Strict();
+  static Handle<Code> pre_monomorphic_stub(Isolate* isolate) {
+    return isolate->builtins()->StoreIC_PreMonomorphic();
+  }
+  virtual Handle<Code> pre_monomorphic_stub_strict() {
+    return pre_monomorphic_stub_strict(isolate());
+  }
+  static Handle<Code> pre_monomorphic_stub_strict(Isolate* isolate) {
+    return isolate->builtins()->StoreIC_PreMonomorphic_Strict();
   }
   virtual Handle<Code> global_proxy_stub() {
     return isolate()->builtins()->StoreIC_GlobalProxy();
@@ -675,11 +692,17 @@
                                                Handle<Object> value);
   virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
 
-  virtual Handle<Code> pre_monomorphic_stub() const {
-    return isolate()->builtins()->KeyedStoreIC_PreMonomorphic();
+  virtual Handle<Code> pre_monomorphic_stub() {
+    return pre_monomorphic_stub(isolate());
   }
-  virtual Handle<Code> pre_monomorphic_stub_strict() const {
-    return isolate()->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
+  static Handle<Code> pre_monomorphic_stub(Isolate* isolate) {
+    return isolate->builtins()->KeyedStoreIC_PreMonomorphic();
+  }
+  virtual Handle<Code> pre_monomorphic_stub_strict() {
+    return pre_monomorphic_stub_strict(isolate());
+  }
+  static Handle<Code> pre_monomorphic_stub_strict(Isolate* isolate) {
+    return isolate->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
   }
   virtual Handle<Code> megamorphic_stub() {
     return isolate()->builtins()->KeyedStoreIC_Generic();
diff --git a/src/isolate.cc b/src/isolate.cc
index 6eb2960..5a80d3d 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1803,7 +1803,7 @@
       heap_profiler_(NULL),
       function_entry_hook_(NULL),
       deferred_handles_head_(NULL),
-      optimizing_compiler_thread_(this),
+      optimizing_compiler_thread_(NULL),
       marking_thread_(NULL),
       sweeper_thread_(NULL),
       stress_deopt_count_(0) {
@@ -1898,7 +1898,10 @@
     debugger()->UnloadDebugger();
 #endif
 
-    if (FLAG_concurrent_recompilation) optimizing_compiler_thread_.Stop();
+    if (FLAG_concurrent_recompilation) {
+      optimizing_compiler_thread_->Stop();
+      delete optimizing_compiler_thread_;
+    }
 
     if (FLAG_sweeper_threads > 0) {
       for (int i = 0; i < FLAG_sweeper_threads; i++) {
@@ -2240,6 +2243,11 @@
 
   deoptimizer_data_ = new DeoptimizerData(memory_allocator_);
 
+  if (FLAG_concurrent_recompilation) {
+    optimizing_compiler_thread_ = new OptimizingCompilerThread(this);
+    optimizing_compiler_thread_->Start();
+  }
+
   const bool create_heap_objects = (des == NULL);
   if (create_heap_objects && !heap_.CreateHeapObjects()) {
     V8::FatalProcessOutOfMemory("heap object creation");
@@ -2346,8 +2354,6 @@
     FastNewClosureStub::InstallDescriptors(this);
   }
 
-  if (FLAG_concurrent_recompilation) optimizing_compiler_thread_.Start();
-
   if (FLAG_marking_threads > 0) {
     marking_thread_ = new MarkingThread*[FLAG_marking_threads];
     for (int i = 0; i < FLAG_marking_threads; i++) {
diff --git a/src/isolate.h b/src/isolate.h
index b7ea209..cfea075 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -1099,7 +1099,7 @@
 #endif  // DEBUG
 
   OptimizingCompilerThread* optimizing_compiler_thread() {
-    return &optimizing_compiler_thread_;
+    return optimizing_compiler_thread_;
   }
 
   // PreInits and returns a default isolate. Needed when a new thread tries
@@ -1369,7 +1369,7 @@
 #endif
 
   DeferredHandles* deferred_handles_head_;
-  OptimizingCompilerThread optimizing_compiler_thread_;
+  OptimizingCompilerThread* optimizing_compiler_thread_;
   MarkingThread** marking_thread_;
   SweeperThread** sweeper_thread_;
 
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 1b18140..ed7764b 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -966,6 +966,23 @@
 }
 
 
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+  // We check the stack limit as indicator that recompilation might be done.
+  Label ok;
+  __ LoadRoot(at, Heap::kStackLimitRootIndex);
+  __ Branch(&ok, hs, sp, Operand(at));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kStackGuard, 0);
+  }
+  __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+          RelocInfo::CODE_TARGET);
+
+  __ bind(&ok);
+  __ Ret();
+}
+
+
 void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
   // 1. Make sure we have at least one argument.
   // a0: actual number of arguments
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index a12faee..a264ac4 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -540,52 +540,67 @@
   ASSERT(!temp2.is(temp3));
   ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
 
-  Label done;
+  Label zero, infinity, done;
 
   __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
 
   __ ldc1(double_scratch1, ExpConstant(0, temp3));
-  __ Move(result, kDoubleRegZero);
-  __ BranchF(&done, NULL, ge, double_scratch1, input);
+  __ BranchF(&zero, NULL, ge, double_scratch1, input);
+
   __ ldc1(double_scratch2, ExpConstant(1, temp3));
-  __ ldc1(result, ExpConstant(2, temp3));
-  __ BranchF(&done, NULL, ge, input, double_scratch2);
+  __ BranchF(&infinity, NULL, ge, input, double_scratch2);
+
   __ ldc1(double_scratch1, ExpConstant(3, temp3));
   __ ldc1(result, ExpConstant(4, temp3));
   __ mul_d(double_scratch1, double_scratch1, input);
   __ add_d(double_scratch1, double_scratch1, result);
-  __ Move(temp2, temp1, double_scratch1);
+  __ FmoveLow(temp2, double_scratch1);
   __ sub_d(double_scratch1, double_scratch1, result);
   __ ldc1(result, ExpConstant(6, temp3));
   __ ldc1(double_scratch2, ExpConstant(5, temp3));
   __ mul_d(double_scratch1, double_scratch1, double_scratch2);
   __ sub_d(double_scratch1, double_scratch1, input);
   __ sub_d(result, result, double_scratch1);
-  __ mul_d(input, double_scratch1, double_scratch1);
-  __ mul_d(result, result, input);
-  __ srl(temp1, temp2, 11);
+  __ mul_d(double_scratch2, double_scratch1, double_scratch1);
+  __ mul_d(result, result, double_scratch2);
   __ ldc1(double_scratch2, ExpConstant(7, temp3));
   __ mul_d(result, result, double_scratch2);
   __ sub_d(result, result, double_scratch1);
-  __ ldc1(double_scratch2, ExpConstant(8, temp3));
+  // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
+  ASSERT(*reinterpret_cast<double*>
+         (ExternalReference::math_exp_constants(8).address()) == 1);
+  __ Move(double_scratch2, 1);
   __ add_d(result, result, double_scratch2);
-  __ li(at, 0x7ff);
-  __ And(temp2, temp2, at);
+  __ srl(temp1, temp2, 11);
+  __ Ext(temp2, temp2, 0, 11);
   __ Addu(temp1, temp1, Operand(0x3ff));
-  __ sll(temp1, temp1, 20);
 
   // Must not call ExpConstant() after overwriting temp3!
   __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
   __ sll(at, temp2, 3);
-  __ addu(at, at, temp3);
-  __ lw(at, MemOperand(at));
-  __ Addu(temp3, temp3, Operand(kPointerSize));
-  __ sll(temp2, temp2, 3);
-  __ addu(temp2, temp2, temp3);
-  __ lw(temp2, MemOperand(temp2));
-  __ Or(temp1, temp1, temp2);
-  __ Move(input, at, temp1);
-  __ mul_d(result, result, input);
+  __ Addu(temp3, temp3, Operand(at));
+  __ lw(temp2, MemOperand(temp3, 0));
+  __ lw(temp3, MemOperand(temp3, kPointerSize));
+  // The first word is loaded is the lower number register.
+  if (temp2.code() < temp3.code()) {
+    __ sll(at, temp1, 20);
+    __ Or(temp1, temp3, at);
+    __ Move(double_scratch1, temp2, temp1);
+  } else {
+    __ sll(at, temp1, 20);
+    __ Or(temp1, temp2, at);
+    __ Move(double_scratch1, temp3, temp1);
+  }
+  __ mul_d(result, result, double_scratch1);
+  __ Branch(&done);
+
+  __ bind(&zero);
+  __ Move(result, kDoubleRegZero);
+  __ Branch(&done);
+
+  __ bind(&infinity);
+  __ ldc1(result, ExpConstant(2, temp3));
+
   __ bind(&done);
 }
 
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index 32d7d0d..822b94a 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -99,6 +99,7 @@
 
 class MathExpGenerator : public AllStatic {
  public:
+  // Register input isn't modified. All other registers are clobbered.
   static void EmitMathExp(MacroAssembler* masm,
                           DoubleRegister input,
                           DoubleRegister result,
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index f889470..b9e282f 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -4926,86 +4926,80 @@
 #undef __
 
 
-// This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping.
-// The back edge bookkeeping code matches the pattern:
-//
-// sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
-// beq at, zero_reg, ok
-// lui t9, <interrupt stub address> upper
-// ori t9, <interrupt stub address> lower
-// jalr t9
-// nop
-// ok-label ----- pc_after points here
-//
-// We patch the code to the following form:
-//
-// addiu at, zero_reg, 1
-// beq at, zero_reg, ok  ;; Not changed
-// lui t9, <on-stack replacement address> upper
-// ori t9, <on-stack replacement address> lower
-// jalr t9  ;; Not changed
-// nop  ;; Not changed
-// ok-label ----- pc_after points here
-
 void BackEdgeTable::PatchAt(Code* unoptimized_code,
-                            Address pc_after,
+                            Address pc,
+                            BackEdgeState target_state,
                             Code* replacement_code) {
   static const int kInstrSize = Assembler::kInstrSize;
-  // Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
-  CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
-  patcher.masm()->addiu(at, zero_reg, 1);
+  Address branch_address = pc - 6 * kInstrSize;
+  CodePatcher patcher(branch_address, 1);
+
+  switch (target_state) {
+    case INTERRUPT:
+      // slt at, a3, zero_reg (in case of count based interrupts)
+      // beq at, zero_reg, ok
+      // lui t9, <interrupt stub address> upper
+      // ori t9, <interrupt stub address> lower
+      // jalr t9
+      // nop
+      // ok-label ----- pc_after points here
+      patcher.masm()->slt(at, a3, zero_reg);
+      break;
+    case ON_STACK_REPLACEMENT:
+    case OSR_AFTER_STACK_CHECK:
+      // addiu at, zero_reg, 1
+      // beq at, zero_reg, ok  ;; Not changed
+      // lui t9, <on-stack replacement address> upper
+      // ori t9, <on-stack replacement address> lower
+      // jalr t9  ;; Not changed
+      // nop  ;; Not changed
+      // ok-label ----- pc_after points here
+      patcher.masm()->addiu(at, zero_reg, 1);
+      break;
+  }
+  Address pc_immediate_load_address = pc - 4 * kInstrSize;
   // Replace the stack check address in the load-immediate (lui/ori pair)
   // with the entry address of the replacement code.
-  Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
+  Assembler::set_target_address_at(pc_immediate_load_address,
                                    replacement_code->entry());
 
   unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
-      unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
+      unoptimized_code, pc_immediate_load_address, replacement_code);
 }
 
 
-void BackEdgeTable::RevertAt(Code* unoptimized_code,
-                             Address pc_after,
-                             Code* interrupt_code) {
-  static const int kInstrSize = Assembler::kInstrSize;
-  // Restore the sltu instruction so beq can be taken again.
-  CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
-  patcher.masm()->slt(at, a3, zero_reg);
-  // Restore the original call address.
-  Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
-                                   interrupt_code->entry());
-
-  interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
-      unoptimized_code, pc_after - 4 * kInstrSize, interrupt_code);
-}
-
-
-#ifdef DEBUG
 BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
     Isolate* isolate,
     Code* unoptimized_code,
-    Address pc_after) {
+    Address pc) {
   static const int kInstrSize = Assembler::kInstrSize;
-  ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
-  if (Assembler::IsAddImmediate(
-      Assembler::instr_at(pc_after - 6 * kInstrSize))) {
-    Code* osr_builtin =
-        isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+  Address branch_address = pc - 6 * kInstrSize;
+  Address pc_immediate_load_address = pc - 4 * kInstrSize;
+
+  ASSERT(Assembler::IsBeq(Assembler::instr_at(pc - 5 * kInstrSize)));
+  if (!Assembler::IsAddImmediate(Assembler::instr_at(branch_address))) {
     ASSERT(reinterpret_cast<uint32_t>(
-        Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
-        reinterpret_cast<uint32_t>(osr_builtin->entry()));
-    return ON_STACK_REPLACEMENT;
-  } else {
-    // Get the interrupt stub code object to match against from cache.
-    Code* interrupt_builtin =
-        isolate->builtins()->builtin(Builtins::kInterruptCheck);
-    ASSERT(reinterpret_cast<uint32_t>(
-        Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
-        reinterpret_cast<uint32_t>(interrupt_builtin->entry()));
+        Assembler::target_address_at(pc_immediate_load_address)) ==
+           reinterpret_cast<uint32_t>(
+               isolate->builtins()->InterruptCheck()->entry()));
     return INTERRUPT;
   }
+
+  ASSERT(Assembler::IsAddImmediate(Assembler::instr_at(branch_address)));
+
+  if (reinterpret_cast<uint32_t>(
+      Assembler::target_address_at(pc_immediate_load_address)) ==
+          reinterpret_cast<uint32_t>(
+              isolate->builtins()->OnStackReplacement()->entry())) {
+    return ON_STACK_REPLACEMENT;
+  }
+
+  ASSERT(reinterpret_cast<uint32_t>(
+      Assembler::target_address_at(pc_immediate_load_address)) ==
+         reinterpret_cast<uint32_t>(
+             isolate->builtins()->OsrAfterStackCheck()->entry()));
+  return OSR_AFTER_STACK_CHECK;
 }
-#endif  // DEBUG
 
 
 } }  // namespace v8::internal
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 7333398..730c1d0 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1218,7 +1218,7 @@
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
   ASSERT(instr->representation().IsDouble());
   ASSERT(instr->value()->representation().IsDouble());
-  LOperand* input = UseTempRegister(instr->value());
+  LOperand* input = UseRegister(instr->value());
   LOperand* temp1 = TempRegister();
   LOperand* temp2 = TempRegister();
   LOperand* double_temp = FixedTemp(f6);  // Chosen by fair dice roll.
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 8c20c7a..48bd78e 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -5921,6 +5921,7 @@
 
 
 MaybeObject* NameDictionaryShape::AsObject(Heap* heap, Name* key) {
+  ASSERT(key->IsUniqueName());
   return key;
 }
 
diff --git a/src/objects.cc b/src/objects.cc
index 169307f..1399e33 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -672,12 +672,19 @@
                                      PropertyDetails details) {
   ASSERT(!object->HasFastProperties());
   Handle<NameDictionary> property_dictionary(object->property_dictionary());
+
+  if (!name->IsUniqueName()) {
+    name = object->GetIsolate()->factory()->InternalizedStringFromString(
+        Handle<String>::cast(name));
+  }
+
   int entry = property_dictionary->FindEntry(*name);
   if (entry == NameDictionary::kNotFound) {
     Handle<Object> store_value = value;
     if (object->IsGlobalObject()) {
       store_value = object->GetIsolate()->factory()->NewPropertyCell(value);
     }
+
     property_dictionary =
         NameDictionaryAdd(property_dictionary, name, store_value, details);
     object->set_properties(*property_dictionary);
@@ -2044,6 +2051,12 @@
                                      TransitionFlag transition_flag) {
   ASSERT(!object->IsJSGlobalProxy());
   Isolate* isolate = object->GetIsolate();
+
+  if (!name->IsUniqueName()) {
+    name = isolate->factory()->InternalizedStringFromString(
+        Handle<String>::cast(name));
+  }
+
   if (extensibility_check == PERFORM_EXTENSIBILITY_CHECK &&
       !object->map()->is_extensible()) {
     if (strict_mode == kNonStrictMode) {
@@ -9734,7 +9747,8 @@
 Handle<Object> SharedFunctionInfo::GetSourceCode() {
   if (!HasSourceCode()) return GetIsolate()->factory()->undefined_value();
   Handle<String> source(String::cast(Script::cast(script())->source()));
-  return SubString(source, start_position(), end_position());
+  return GetIsolate()->factory()->NewSubString(
+      source, start_position(), end_position());
 }
 
 
diff --git a/src/objects.h b/src/objects.h
index 437e61a..3f229a8 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -6207,9 +6207,6 @@
 //
 // Installation of ids for the selected builtin functions is handled
 // by the bootstrapper.
-//
-// NOTE: Order is important: math functions should be at the end of
-// the list and MathFloor should be the first math function.
 #define FUNCTIONS_WITH_ID_LIST(V)                   \
   V(Array.prototype, push, ArrayPush)               \
   V(Array.prototype, pop, ArrayPop)                 \
@@ -6244,8 +6241,7 @@
 #undef DECLARE_FUNCTION_ID
   // Fake id for a special case of Math.pow. Note, it continues the
   // list of math functions.
-  kMathPowHalf,
-  kFirstMathFunctionId = kMathFloor
+  kMathPowHalf
 };
 
 
diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc
index 029c115..e6467f1 100644
--- a/src/optimizing-compiler-thread.cc
+++ b/src/optimizing-compiler-thread.cc
@@ -29,6 +29,7 @@
 
 #include "v8.h"
 
+#include "full-codegen.h"
 #include "hydrogen.h"
 #include "isolate.h"
 #include "v8threads.h"
@@ -93,70 +94,74 @@
 
 
 void OptimizingCompilerThread::CompileNext() {
-  OptimizingCompiler* optimizing_compiler = NULL;
-  bool result = input_queue_.Dequeue(&optimizing_compiler);
+  RecompileJob* job = NULL;
+  bool result = input_queue_.Dequeue(&job);
   USE(result);
   ASSERT(result);
   Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
 
   // The function may have already been optimized by OSR.  Simply continue.
-  OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
+  RecompileJob::Status status = job->OptimizeGraph();
   USE(status);   // Prevent an unused-variable error in release mode.
-  ASSERT(status != OptimizingCompiler::FAILED);
+  ASSERT(status != RecompileJob::FAILED);
 
   // The function may have already been optimized by OSR.  Simply continue.
   // Use a mutex to make sure that functions marked for install
   // are always also queued.
-  if (!optimizing_compiler->info()->osr_ast_id().IsNone()) {
-    ASSERT(FLAG_concurrent_osr);
-    LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
-    osr_candidates_.RemoveElement(optimizing_compiler);
-    ready_for_osr_.Add(optimizing_compiler);
-  } else {
-    LockGuard<Mutex> access_queue(&queue_mutex_);
-    output_queue_.Enqueue(optimizing_compiler);
-    isolate_->stack_guard()->RequestInstallCode();
+  LockGuard<Mutex> access_queue(&queue_mutex_);
+  output_queue_.Enqueue(job);
+  isolate_->stack_guard()->RequestInstallCode();
+}
+
+
+static void DisposeRecompileJob(RecompileJob* compiler,
+                                bool restore_function_code) {
+  // The recompile job is allocated in the CompilationInfo's zone.
+  CompilationInfo* info = compiler->info();
+  if (restore_function_code) {
+    Handle<JSFunction> function = info->closure();
+    function->ReplaceCode(function->shared()->code());
   }
+  delete info;
 }
 
 
 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
-  OptimizingCompiler* optimizing_compiler;
-  // The optimizing compiler is allocated in the CompilationInfo's zone.
-  while (input_queue_.Dequeue(&optimizing_compiler)) {
+  RecompileJob* job;
+  while (input_queue_.Dequeue(&job)) {
     // This should not block, since we have one signal on the input queue
     // semaphore corresponding to each element in the input queue.
     input_queue_semaphore_.Wait();
-    CompilationInfo* info = optimizing_compiler->info();
-    if (restore_function_code) {
-      Handle<JSFunction> function = info->closure();
-      function->ReplaceCode(function->shared()->code());
+    if (job->info()->osr_ast_id().IsNone()) {
+      // OSR jobs are dealt with separately.
+      DisposeRecompileJob(job, restore_function_code);
     }
-    delete info;
   }
   Release_Store(&queue_length_, static_cast<AtomicWord>(0));
-
-  LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
-  osr_candidates_.Clear();
 }
 
 
 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
-  OptimizingCompiler* optimizing_compiler;
-  // The optimizing compiler is allocated in the CompilationInfo's zone.
+  RecompileJob* job;
   while (true) {
     { LockGuard<Mutex> access_queue(&queue_mutex_);
-      if (!output_queue_.Dequeue(&optimizing_compiler)) break;
+      if (!output_queue_.Dequeue(&job)) break;
     }
-    CompilationInfo* info = optimizing_compiler->info();
-    if (restore_function_code) {
-      Handle<JSFunction> function = info->closure();
-      function->ReplaceCode(function->shared()->code());
+    if (job->info()->osr_ast_id().IsNone()) {
+      // OSR jobs are dealt with separately.
+      DisposeRecompileJob(job, restore_function_code);
     }
-    delete info;
   }
+}
 
-  RemoveStaleOSRCandidates(0);
+
+void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
+  RecompileJob* job;
+  for (int i = 0; i < osr_buffer_size_; i++) {
+    job = osr_buffer_[i];
+    if (job != NULL) DisposeRecompileJob(job, restore_function_code);
+  }
+  osr_cursor_ = 0;
 }
 
 
@@ -166,6 +171,10 @@
   input_queue_semaphore_.Signal();
   stop_semaphore_.Wait();
   FlushOutputQueue(true);
+  if (FLAG_concurrent_osr) FlushOsrBuffer(true);
+  if (FLAG_trace_concurrent_recompilation) {
+    PrintF("  ** Flushed concurrent recompilation queues.\n");
+  }
 }
 
 
@@ -186,12 +195,15 @@
     FlushOutputQueue(false);
   }
 
+  if (FLAG_concurrent_osr) FlushOsrBuffer(false);
+
   if (FLAG_trace_concurrent_recompilation) {
     double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
     PrintF("  ** Compiler thread did %.2f%% useful work\n", percentage);
   }
 
-  if (FLAG_trace_osr && FLAG_concurrent_osr) {
+  if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) &&
+      FLAG_concurrent_osr) {
     PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
   }
 
@@ -203,62 +215,75 @@
   ASSERT(!IsOptimizerThread());
   HandleScope handle_scope(isolate_);
 
-  OptimizingCompiler* compiler;
+  RecompileJob* job;
   while (true) {
     { LockGuard<Mutex> access_queue(&queue_mutex_);
-      if (!output_queue_.Dequeue(&compiler)) break;
+      if (!output_queue_.Dequeue(&job)) break;
     }
-    Compiler::InstallOptimizedCode(compiler);
+    CompilationInfo* info = job->info();
+    if (info->osr_ast_id().IsNone()) {
+      Compiler::InstallOptimizedCode(job);
+    } else {
+      if (FLAG_trace_osr) {
+        PrintF("[COSR - ");
+        info->closure()->PrintName();
+        PrintF(" is ready for install and entry at AST id %d]\n",
+               info->osr_ast_id().ToInt());
+      }
+      job->WaitForInstall();
+      BackEdgeTable::RemoveStackCheck(info);
+    }
   }
-
-  // Remove the oldest OSR candidates that are ready so that we
-  // only have limited number of them waiting.
-  if (FLAG_concurrent_osr) RemoveStaleOSRCandidates();
 }
 
 
-void OptimizingCompilerThread::QueueForOptimization(
-    OptimizingCompiler* optimizing_compiler) {
+void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) {
   ASSERT(IsQueueAvailable());
   ASSERT(!IsOptimizerThread());
   Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
-  if (optimizing_compiler->info()->osr_ast_id().IsNone()) {
-    optimizing_compiler->info()->closure()->MarkInRecompileQueue();
+  CompilationInfo* info = job->info();
+  if (info->osr_ast_id().IsNone()) {
+    info->closure()->MarkInRecompileQueue();
   } else {
-    LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
-    osr_candidates_.Add(optimizing_compiler);
+    if (FLAG_trace_concurrent_recompilation) {
+      PrintF("  ** Queueing ");
+      info->closure()->PrintName();
+      PrintF(" for concurrent on-stack replacement.\n");
+    }
+    AddToOsrBuffer(job);
     osr_attempts_++;
+    BackEdgeTable::AddStackCheck(info);
   }
-  input_queue_.Enqueue(optimizing_compiler);
+  input_queue_.Enqueue(job);
   input_queue_semaphore_.Signal();
 }
 
 
-OptimizingCompiler* OptimizingCompilerThread::FindReadyOSRCandidate(
+RecompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
     Handle<JSFunction> function, uint32_t osr_pc_offset) {
   ASSERT(!IsOptimizerThread());
-  OptimizingCompiler* result = NULL;
-  { LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
-    for (int i = 0; i < ready_for_osr_.length(); i++) {
-      if (ready_for_osr_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
-        osr_hits_++;
-        result = ready_for_osr_.Remove(i);
-        break;
-      }
+  RecompileJob* result = NULL;
+  for (int i = 0; i < osr_buffer_size_; i++) {
+    result = osr_buffer_[i];
+    if (result == NULL) continue;
+    if (result->IsWaitingForInstall() &&
+        result->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+      osr_hits_++;
+      osr_buffer_[i] = NULL;
+      return result;
     }
   }
-  RemoveStaleOSRCandidates();
-  return result;
+  return NULL;
 }
 
 
 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
                                               uint32_t osr_pc_offset) {
   ASSERT(!IsOptimizerThread());
-  LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
-  for (int i = 0; i < osr_candidates_.length(); i++) {
-    if (osr_candidates_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
-      return true;
+  for (int i = 0; i < osr_buffer_size_; i++) {
+    if (osr_buffer_[i] != NULL &&
+        osr_buffer_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+      return !osr_buffer_[i]->IsWaitingForInstall();
     }
   }
   return false;
@@ -267,30 +292,40 @@
 
 bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
   ASSERT(!IsOptimizerThread());
-  LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
-  for (int i = 0; i < osr_candidates_.length(); i++) {
-    if (*osr_candidates_[i]->info()->closure() == function) {
-      return true;
+  for (int i = 0; i < osr_buffer_size_; i++) {
+    if (osr_buffer_[i] != NULL &&
+        *osr_buffer_[i]->info()->closure() == function) {
+      return !osr_buffer_[i]->IsWaitingForInstall();
     }
   }
   return false;
 }
 
 
-void OptimizingCompilerThread::RemoveStaleOSRCandidates(int limit) {
+void OptimizingCompilerThread::AddToOsrBuffer(RecompileJob* job) {
   ASSERT(!IsOptimizerThread());
-  LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
-  while (ready_for_osr_.length() > limit) {
-    OptimizingCompiler* compiler = ready_for_osr_.Remove(0);
-    CompilationInfo* throw_away = compiler->info();
-    if (FLAG_trace_osr) {
-      PrintF("[COSR - Discarded ");
-      throw_away->closure()->PrintName();
-      PrintF(", AST id %d]\n",
-             throw_away->osr_ast_id().ToInt());
+  // Store into next empty slot or replace next stale OSR job that's waiting
+  // in vain.  Dispose in the latter case.
+  RecompileJob* stale;
+  while (true) {
+    stale = osr_buffer_[osr_cursor_];
+    if (stale == NULL) break;
+    if (stale->IsWaitingForInstall()) {
+      CompilationInfo* info = stale->info();
+      if (FLAG_trace_osr) {
+        PrintF("[COSR - Discarded ");
+        info->closure()->PrintName();
+        PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
+      }
+      BackEdgeTable::RemoveStackCheck(info);
+      DisposeRecompileJob(stale, false);
+      break;
     }
-    delete throw_away;
+    AdvanceOsrCursor();
   }
+
+  osr_buffer_[osr_cursor_] = job;
+  AdvanceOsrCursor();
 }
 
 
diff --git a/src/optimizing-compiler-thread.h b/src/optimizing-compiler-thread.h
index 4231765..2165a4f 100644
--- a/src/optimizing-compiler-thread.h
+++ b/src/optimizing-compiler-thread.h
@@ -40,7 +40,7 @@
 namespace internal {
 
 class HOptimizedGraphBuilder;
-class OptimizingCompiler;
+class RecompileJob;
 class SharedFunctionInfo;
 
 class OptimizingCompilerThread : public Thread {
@@ -53,22 +53,29 @@
       isolate_(isolate),
       stop_semaphore_(0),
       input_queue_semaphore_(0),
-      osr_candidates_(2),
-      ready_for_osr_(2),
+      osr_cursor_(0),
       osr_hits_(0),
       osr_attempts_(0) {
     NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
     NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
+    if (FLAG_concurrent_osr) {
+      osr_buffer_size_ = FLAG_concurrent_recompilation_queue_length + 4;
+      osr_buffer_ = NewArray<RecompileJob*>(osr_buffer_size_);
+      for (int i = 0; i < osr_buffer_size_; i++) osr_buffer_[i] = NULL;
+    }
   }
-  ~OptimizingCompilerThread() {}
+
+  ~OptimizingCompilerThread() {
+    if (FLAG_concurrent_osr) DeleteArray(osr_buffer_);
+  }
 
   void Run();
   void Stop();
   void Flush();
-  void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
+  void QueueForOptimization(RecompileJob* optimizing_compiler);
   void InstallOptimizedFunctions();
-  OptimizingCompiler* FindReadyOSRCandidate(Handle<JSFunction> function,
-                                            uint32_t osr_pc_offset);
+  RecompileJob* FindReadyOSRCandidate(Handle<JSFunction> function,
+                                      uint32_t osr_pc_offset);
   bool IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset);
 
   bool IsQueuedForOSR(JSFunction* function);
@@ -94,14 +101,18 @@
  private:
   enum StopFlag { CONTINUE, STOP, FLUSH };
 
-  // Remove the oldest OSR candidates that are ready so that we
-  // only have |limit| left waiting.
-  void RemoveStaleOSRCandidates(int limit = kReadyForOSRLimit);
-
   void FlushInputQueue(bool restore_function_code);
   void FlushOutputQueue(bool restore_function_code);
+  void FlushOsrBuffer(bool restore_function_code);
   void CompileNext();
 
+  // Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry.
+  // Tasks evicted from the cyclic buffer are discarded.
+  void AddToOsrBuffer(RecompileJob* compiler);
+  void AdvanceOsrCursor() {
+    osr_cursor_ = (osr_cursor_ + 1) % osr_buffer_size_;
+  }
+
 #ifdef DEBUG
   int thread_id_;
   Mutex thread_id_mutex_;
@@ -112,13 +123,16 @@
   Semaphore input_queue_semaphore_;
 
   // Queue of incoming recompilation tasks (including OSR).
-  UnboundQueue<OptimizingCompiler*> input_queue_;
+  UnboundQueue<RecompileJob*> input_queue_;
   // Queue of recompilation tasks ready to be installed (excluding OSR).
-  UnboundQueue<OptimizingCompiler*> output_queue_;
-  // List of recompilation tasks for OSR in the input queue.
-  List<OptimizingCompiler*> osr_candidates_;
-  // List of recompilation tasks ready for OSR.
-  List<OptimizingCompiler*> ready_for_osr_;
+  UnboundQueue<RecompileJob*> output_queue_;
+  // Cyclic buffer of recompilation tasks for OSR.
+  // TODO(yangguo): This may keep zombie tasks indefinitely, holding on to
+  //                a lot of memory.  Fix this.
+  RecompileJob** osr_buffer_;
+  // Cursor for the cyclic buffer.
+  int osr_cursor_;
+  int osr_buffer_size_;
 
   volatile AtomicWord stop_thread_;
   volatile Atomic32 queue_length_;
@@ -127,11 +141,8 @@
 
   // TODO(yangguo): remove this once the memory leak has been figured out.
   Mutex queue_mutex_;
-  Mutex osr_list_mutex_;
   int osr_hits_;
   int osr_attempts_;
-
-  static const int kReadyForOSRLimit = 4;
 };
 
 } }  // namespace v8::internal
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 7b26dfb..41a4f14 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -240,12 +240,16 @@
 class Win32Time {
  public:
   // Constructors.
+  Win32Time();
   explicit Win32Time(double jstime);
   Win32Time(int year, int mon, int day, int hour, int min, int sec);
 
   // Convert timestamp to JavaScript representation.
   double ToJSTime();
 
+  // Set timestamp to current time.
+  void SetToCurrentTime();
+
   // Returns the local timezone offset in milliseconds east of UTC. This is
   // the number of milliseconds you must add to UTC to get local time, i.e.
   // LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This
@@ -314,6 +318,12 @@
 char Win32Time::dst_tz_name_[kTzNameSize];
 
 
+// Initialize timestamp to start of epoc.
+Win32Time::Win32Time() {
+  t() = 0;
+}
+
+
 // Initialize timestamp from a JavaScript timestamp.
 Win32Time::Win32Time(double jstime) {
   t() = static_cast<int64_t>(jstime) * kTimeScaler + kTimeEpoc;
@@ -340,6 +350,62 @@
 }
 
 
+// Set timestamp to current time.
+void Win32Time::SetToCurrentTime() {
+  // The default GetSystemTimeAsFileTime has a ~15.5ms resolution.
+  // Because we're fast, we like fast timers which have at least a
+  // 1ms resolution.
+  //
+  // timeGetTime() provides 1ms granularity when combined with
+  // timeBeginPeriod().  If the host application for v8 wants fast
+  // timers, it can use timeBeginPeriod to increase the resolution.
+  //
+  // Using timeGetTime() has a drawback because it is a 32bit value
+  // and hence rolls-over every ~49days.
+  //
+  // To use the clock, we use GetSystemTimeAsFileTime as our base;
+  // and then use timeGetTime to extrapolate current time from the
+  // start time.  To deal with rollovers, we resync the clock
+  // any time when more than kMaxClockElapsedTime has passed or
+  // whenever timeGetTime creates a rollover.
+
+  static bool initialized = false;
+  static TimeStamp init_time;
+  static DWORD init_ticks;
+  static const int64_t kHundredNanosecondsPerSecond = 10000000;
+  static const int64_t kMaxClockElapsedTime =
+      60*kHundredNanosecondsPerSecond;  // 1 minute
+
+  // If we are uninitialized, we need to resync the clock.
+  bool needs_resync = !initialized;
+
+  // Get the current time.
+  TimeStamp time_now;
+  GetSystemTimeAsFileTime(&time_now.ft_);
+  DWORD ticks_now = timeGetTime();
+
+  // Check if we need to resync due to clock rollover.
+  needs_resync |= ticks_now < init_ticks;
+
+  // Check if we need to resync due to elapsed time.
+  needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
+
+  // Check if we need to resync due to backwards time change.
+  needs_resync |= time_now.t_ < init_time.t_;
+
+  // Resync the clock if necessary.
+  if (needs_resync) {
+    GetSystemTimeAsFileTime(&init_time.ft_);
+    init_ticks = ticks_now = timeGetTime();
+    initialized = true;
+  }
+
+  // Finally, compute the actual time.  Why is this so hard.
+  DWORD elapsed = ticks_now - init_ticks;
+  this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000);
+}
+
+
 // Guess the name of the timezone from the bias.
 // The guess is very biased towards the northern hemisphere.
 const char* Win32Time::GuessTimezoneNameFromBias(int bias) {
@@ -529,7 +595,9 @@
 // Returns current time as the number of milliseconds since
 // 00:00:00 UTC, January 1, 1970.
 double OS::TimeCurrentMillis() {
-  return Time::Now().ToJsTime();
+  Win32Time t;
+  t.SetToCurrentTime();
+  return t.ToJSTime();
 }
 
 
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 32a85cc..7c900b3 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -33,7 +33,6 @@
 #include "bootstrapper.h"
 #include "code-stubs.h"
 #include "compilation-cache.h"
-#include "deoptimizer.h"
 #include "execution.h"
 #include "full-codegen.h"
 #include "global-handles.h"
diff --git a/src/runtime.cc b/src/runtime.cc
index 30763c1..f097681 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -8609,7 +8609,7 @@
   Handle<Code> result = Handle<Code>::null();
   BailoutId ast_id = BailoutId::None();
 
-  if (FLAG_concurrent_recompilation && FLAG_concurrent_osr) {
+  if (FLAG_concurrent_osr) {
     if (isolate->optimizing_compiler_thread()->
             IsQueuedForOSR(function, pc_offset)) {
       // Still waiting for the optimizing compiler thread to finish.  Carry on.
@@ -8621,25 +8621,25 @@
       return NULL;
     }
 
-    OptimizingCompiler* compiler = isolate->optimizing_compiler_thread()->
+    RecompileJob* job = isolate->optimizing_compiler_thread()->
         FindReadyOSRCandidate(function, pc_offset);
 
-    if (compiler == NULL) {
+    if (job == NULL) {
       if (IsSuitableForOnStackReplacement(isolate, function, unoptimized) &&
           Compiler::RecompileConcurrent(function, pc_offset)) {
         if (function->IsMarkedForLazyRecompilation() ||
             function->IsMarkedForConcurrentRecompilation()) {
           // Prevent regular recompilation if we queue this for OSR.
           // TODO(yangguo): remove this as soon as OSR becomes one-shot.
-          function->ReplaceCode(function->shared()->code());
+          function->ReplaceCode(*unoptimized);
         }
         return NULL;
       }
       // Fall through to the end in case of failure.
     } else {
       // TODO(titzer): don't install the OSR code into the function.
-      ast_id = compiler->info()->osr_ast_id();
-      result = Compiler::InstallOptimizedCode(compiler);
+      ast_id = job->info()->osr_ast_id();
+      result = Compiler::InstallOptimizedCode(job);
     }
   } else if (IsSuitableForOnStackReplacement(isolate, function, unoptimized)) {
     ast_id = unoptimized->TranslatePcOffsetToAstId(pc_offset);
diff --git a/src/version.cc b/src/version.cc
index 02ffa65..d4856ae 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     22
-#define BUILD_NUMBER      1
+#define BUILD_NUMBER      2
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 947f687..aa2fdc1 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -1386,6 +1386,23 @@
 }
 
 
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+  // We check the stack limit as indicator that recompilation might be done.
+  Label ok;
+  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+  __ j(above_equal, &ok);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kStackGuard, 0);
+  }
+  __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
+         RelocInfo::CODE_TARGET);
+
+  __ bind(&ok);
+  __ ret(0);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 55feb4c..33d668d 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -4884,79 +4884,70 @@
 static const byte kNopByteOne = 0x66;
 static const byte kNopByteTwo = 0x90;
 
-// The back edge bookkeeping code matches the pattern:
-//
-//     add <profiling_counter>, <-delta>
-//     jns ok
-//     call <stack guard>
-//   ok:
-//
-// We will patch away the branch so the code is:
-//
-//     add <profiling_counter>, <-delta>  ;; Not changed
-//     nop
-//     nop
-//     call <on-stack replacment>
-//   ok:
 
 void BackEdgeTable::PatchAt(Code* unoptimized_code,
-                            Address pc_after,
+                            Address pc,
+                            BackEdgeState target_state,
                             Code* replacement_code) {
-  // Turn the jump into nops.
-  Address call_target_address = pc_after - kIntSize;
-  *(call_target_address - 3) = kNopByteOne;
-  *(call_target_address - 2) = kNopByteTwo;
-  // Replace the call address.
+  Address call_target_address = pc - kIntSize;
+  Address jns_instr_address = call_target_address - 3;
+  Address jns_offset_address = call_target_address - 2;
+
+  switch (target_state) {
+    case INTERRUPT:
+      //     sub <profiling_counter>, <delta>  ;; Not changed
+      //     jns ok
+      //     call <interrupt stub>
+      //   ok:
+      *jns_instr_address = kJnsInstruction;
+      *jns_offset_address = kJnsOffset;
+      break;
+    case ON_STACK_REPLACEMENT:
+    case OSR_AFTER_STACK_CHECK:
+      //     sub <profiling_counter>, <delta>  ;; Not changed
+      //     nop
+      //     nop
+      //     call <on-stack replacment>
+      //   ok:
+      *jns_instr_address = kNopByteOne;
+      *jns_offset_address = kNopByteTwo;
+      break;
+  }
+
   Assembler::set_target_address_at(call_target_address,
                                    replacement_code->entry());
-
   unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
       unoptimized_code, call_target_address, replacement_code);
 }
 
 
-void BackEdgeTable::RevertAt(Code* unoptimized_code,
-                             Address pc_after,
-                             Code* interrupt_code) {
-  // Restore the original jump.
-  Address call_target_address = pc_after - kIntSize;
-  *(call_target_address - 3) = kJnsInstruction;
-  *(call_target_address - 2) = kJnsOffset;
-  // Restore the original call address.
-  Assembler::set_target_address_at(call_target_address,
-                                   interrupt_code->entry());
-
-  interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
-      unoptimized_code, call_target_address, interrupt_code);
-}
-
-
-#ifdef DEBUG
 BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
     Isolate* isolate,
     Code* unoptimized_code,
-    Address pc_after) {
-  Address call_target_address = pc_after - kIntSize;
+    Address pc) {
+  Address call_target_address = pc - kIntSize;
+  Address jns_instr_address = call_target_address - 3;
   ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
-  if (*(call_target_address - 3) == kNopByteOne) {
-    ASSERT_EQ(kNopByteTwo,      *(call_target_address - 2));
-    Code* osr_builtin =
-        isolate->builtins()->builtin(Builtins::kOnStackReplacement);
-    ASSERT_EQ(osr_builtin->entry(),
+
+  if (*jns_instr_address == kJnsInstruction) {
+    ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+    ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
               Assembler::target_address_at(call_target_address));
-    return ON_STACK_REPLACEMENT;
-  } else {
-    // Get the interrupt stub code object to match against from cache.
-    Code* interrupt_builtin =
-        isolate->builtins()->builtin(Builtins::kInterruptCheck);
-    ASSERT_EQ(interrupt_builtin->entry(),
-              Assembler::target_address_at(call_target_address));
-    ASSERT_EQ(kJnsInstruction,  *(call_target_address - 3));
-    ASSERT_EQ(kJnsOffset,       *(call_target_address - 2));
     return INTERRUPT;
   }
+
+  ASSERT_EQ(kNopByteOne, *jns_instr_address);
+  ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+
+  if (Assembler::target_address_at(call_target_address) ==
+      isolate->builtins()->OnStackReplacement()->entry()) {
+    return ON_STACK_REPLACEMENT;
+  }
+
+  ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
+            Assembler::target_address_at(call_target_address));
+  return OSR_AFTER_STACK_CHECK;
 }
-#endif  // DEBUG
 
 
 } }  // namespace v8::internal
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index f2971be..9f4ad46 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -2822,7 +2822,7 @@
   CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
 
   Code* ic_after = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
-  CHECK(ic_after->ic_state() == UNINITIALIZED);
+  CHECK(IC::IsCleared(ic_after));
 }
 
 
@@ -2863,7 +2863,7 @@
   CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
 
   Code* ic_after = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
-  CHECK(ic_after->ic_state() == UNINITIALIZED);
+  CHECK(IC::IsCleared(ic_after));
 }
 
 
diff --git a/test/mjsunit/div-mul-minus-one.js b/test/mjsunit/div-mul-minus-one.js
new file mode 100644
index 0000000..f05bf0f
--- /dev/null
+++ b/test/mjsunit/div-mul-minus-one.js
@@ -0,0 +1,53 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function div(g) {
+  return (g/-1) ^ 1
+}
+
+var kMinInt = 1 << 31;
+var expected_MinInt = div(kMinInt);
+var expected_minus_zero = div(0);
+%OptimizeFunctionOnNextCall(div);
+assertEquals(expected_MinInt, div(kMinInt));
+assertOptimized(div);
+assertEquals(expected_minus_zero , div(0));
+assertOptimized(div);
+
+function mul(g) {
+  return (g * -1) ^ 1
+}
+
+expected_MinInt = mul(kMinInt);
+expected_minus_zero = mul(0);
+%OptimizeFunctionOnNextCall(mul);
+assertEquals(expected_MinInt, mul(kMinInt));
+assertOptimized(mul);
+assertEquals(expected_minus_zero , mul(0));
+assertOptimized(mul);
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index ec780e5..9a229e8 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -43,6 +43,10 @@
   # This test non-deterministically runs out of memory on Windows ia32.
   'regress/regress-crbug-160010': [SKIP],
 
+  # This test fails on Windows XP and Windows Vista.
+  # Issue 288924
+  'timer' : [['system == windows', FAIL]],
+
   ##############################################################################
   # Too slow in debug mode with --stress-opt mode.
   'compiler/regress-stacktrace-methods': [PASS, ['mode == debug', SKIP]],
diff --git a/test/mjsunit/regress/regress-embedded-cons-string.js b/test/mjsunit/regress/regress-embedded-cons-string.js
index 58a0b1c..32b900b 100644
--- a/test/mjsunit/regress/regress-embedded-cons-string.js
+++ b/test/mjsunit/regress/regress-embedded-cons-string.js
@@ -27,7 +27,7 @@
 
 // Flags: --fold-constants --nodead-code-elimination
 // Flags: --expose-gc --allow-natives-syntax
-// Flags: --concurrent-recompilation --concurrent-recompilation-delay=300
+// Flags: --concurrent-recompilation --concurrent-recompilation-delay=600
 
 if (!%IsConcurrentRecompilationSupported()) {
   print("Concurrent recompilation is disabled. Skipping this test.");
diff --git a/test/mjsunit/timer.js b/test/mjsunit/timer.js
new file mode 100644
index 0000000..bac474c
--- /dev/null
+++ b/test/mjsunit/timer.js
@@ -0,0 +1,46 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests timer milliseconds granularity.
+
+(function run() {
+  var start_test = Date.now();
+  // Let the retry run for maximum 100ms to reduce flakiness.
+  for (var start = Date.now(); start - start_test < 100; start = Date.now()) {
+    var end = Date.now();
+    while (end - start == 0) {
+      end = Date.now();
+    }
+    if (end - start == 1) {
+      // Found milliseconds granularity.
+      return;
+    } else {
+      print("Timer difference too big: " + (end - start) + "ms");
+    }
+  }
+  assertTrue(false);
+})()
diff --git a/tools/testrunner/local/statusfile.py b/tools/testrunner/local/statusfile.py
index 7e01d3b..cc1e524 100644
--- a/tools/testrunner/local/statusfile.py
+++ b/tools/testrunner/local/statusfile.py
@@ -52,7 +52,7 @@
 # Support arches, modes to be written as keywords instead of strings.
 VARIABLES = {ALWAYS: True}
 for var in ["debug", "release", "android_arm", "android_ia32", "arm", "ia32",
-            "mipsel", "x64", "nacl_ia32", "nacl_x64", "macos"]:
+            "mipsel", "x64", "nacl_ia32", "nacl_x64", "macos", "windows"]:
   VARIABLES[var] = var
 
 
diff --git a/tools/testrunner/local/utils.py b/tools/testrunner/local/utils.py
index b7caa12..61ee7da 100644
--- a/tools/testrunner/local/utils.py
+++ b/tools/testrunner/local/utils.py
@@ -65,7 +65,7 @@
   elif system == 'Windows' or system == 'Microsoft':
     # On Windows Vista platform.system() can return 'Microsoft' with some
     # versions of Python, see http://bugs.python.org/issue1082
-    return 'win32'
+    return 'windows'
   elif system == 'FreeBSD':
     return 'freebsd'
   elif system == 'OpenBSD':
@@ -105,4 +105,4 @@
 
 
 def IsWindows():
-  return GuessOS() == 'win32'
+  return GuessOS() == 'windows'